From 9767e08b39cf97765bd6b004059ca18ed1d8c6a9 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Thu, 28 Sep 2023 11:19:14 -0700 Subject: [PATCH 01/26] Add recommendations on when to run maintenance (#4398) #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [x] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- website/docs/setup/maintenance.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/website/docs/setup/maintenance.md b/website/docs/setup/maintenance.md index a2ec6b1ed..51563d492 100644 --- a/website/docs/setup/maintenance.md +++ b/website/docs/setup/maintenance.md @@ -43,3 +43,15 @@ may not result in a reduction of objects in the storage service Corso is backing Deletion of old objects in the storage service depends on both wall-clock time and running maintenance. Later maintenance runs on the repository will remove the data. + +## Maintenance guidelines + +For the best experience, the recommendation is to run metadata maintenance every +20–30 backups. Complete maintenance should be run every 1–2 weeks +depending on how many backups are deleted from the repo. More backup deletions +means that complete maintenance should be run more often so that unneeded blobs +in storage get deleted. + +Not running maintenance exactly according to the recommendations won't impact +the correctness of the data in the repo, but could result in decreased +performance. From c3f94fd7f76f377e4728c715abbb8c7846e9fb25 Mon Sep 17 00:00:00 2001 From: Keepers Date: Thu, 28 Sep 2023 15:23:26 -0600 Subject: [PATCH 02/26] move drive pagers to pager pattern (#4316) Drive pager usage currently showcases strong coupling between two layers: drive collection logic processing and drive api. This PR separates that coupling by moving the full item enumeration process into the API, and letting the collection logic process the results. This acs as both a simplification of complex code, and a clearer separation of ownership between the two layers. A detrimental side effect of this change is that drive item enumeration has moved from page-streaming (ie: each page is fully processed before moving on to the next) and onto batch processing (ie: all items are stored in memory and processed in a single pass). Acknowledging that this is an unacceptable regression, a follow-up PR will appear shortly with better handling for stream-processing enumeration from the API layer as a standard part of the pattern for all pager implementations. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :broom: Tech Debt/Cleanup #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/cmd/purge/scripts/onedrivePurge.ps1 | 2 +- .../common/prefixmatcher/mock/mock.go | 2 +- .../m365/collection/drive/collections.go | 208 +++++---- .../m365/collection/drive/collections_test.go | 416 +++++------------- .../m365/collection/drive/handlers.go | 14 +- .../m365/collection/drive/item_collector.go | 142 ------ .../m365/collection/drive/item_handler.go | 14 +- .../m365/collection/drive/item_test.go | 97 +--- .../m365/collection/drive/library_handler.go | 14 +- src/internal/m365/collection/drive/restore.go | 6 +- .../m365/collection/drive/url_cache.go | 61 +-- .../m365/collection/drive/url_cache_test.go | 194 +++----- .../m365/collection/groups/backup_test.go | 5 - .../m365/service/onedrive/mock/handlers.go | 75 +++- .../m365/service/sharepoint/backup_test.go | 12 +- src/pkg/fault/fault.go | 12 +- src/pkg/selectors/exchange.go | 2 +- src/pkg/selectors/groups.go | 2 +- src/pkg/selectors/onedrive.go | 2 +- src/pkg/selectors/scopes.go | 4 +- src/pkg/selectors/scopes_test.go | 12 +- src/pkg/selectors/sharepoint.go | 2 +- src/pkg/services/m365/api/config.go | 2 +- src/pkg/services/m365/api/delta.go | 11 - src/pkg/services/m365/api/drive.go | 18 + src/pkg/services/m365/api/drive_pager.go | 75 ++-- src/pkg/services/m365/api/drive_pager_test.go | 15 + src/pkg/services/m365/api/drive_test.go | 27 +- src/pkg/services/m365/api/item_pager.go | 14 + src/pkg/services/m365/api/mock/pager.go | 9 +- 30 files changed, 551 insertions(+), 918 deletions(-) delete mode 100644 src/internal/m365/collection/drive/item_collector.go delete mode 100644 src/pkg/services/m365/api/delta.go diff --git a/src/cmd/purge/scripts/onedrivePurge.ps1 b/src/cmd/purge/scripts/onedrivePurge.ps1 index e8f258b95..4204d5596 100644 --- a/src/cmd/purge/scripts/onedrivePurge.ps1 +++ b/src/cmd/purge/scripts/onedrivePurge.ps1 @@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) { } } else { - Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required" + Write-Host "User (for OneDrive) or Site (for Sharepoint) is required" Exit } diff --git a/src/internal/common/prefixmatcher/mock/mock.go b/src/internal/common/prefixmatcher/mock/mock.go index ad4568114..4516f8665 100644 --- a/src/internal/common/prefixmatcher/mock/mock.go +++ b/src/internal/common/prefixmatcher/mock/mock.go @@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap { func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) { if pm.Empty() { - require.True(t, r.Empty(), "both prefix maps are empty") + require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys()) return } diff --git a/src/internal/m365/collection/drive/collections.go b/src/internal/m365/collection/drive/collections.go index 40d4d7cd6..2f54b0429 100644 --- a/src/internal/m365/collection/drive/collections.go +++ b/src/internal/m365/collection/drive/collections.go @@ -230,16 +230,16 @@ func (c *Collections) Get( ssmb *prefixmatcher.StringSetMatchBuilder, errs *fault.Bus, ) ([]data.BackupCollection, bool, error) { - prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata) + prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata) if err != nil { return nil, false, err } - ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup) + ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup) driveTombstones := map[string]struct{}{} - for driveID := range oldPathsByDriveID { + for driveID := range oldPrevPathsByDriveID { driveTombstones[driveID] = struct{}{} } @@ -257,76 +257,88 @@ func (c *Collections) Get( } var ( - // Drive ID -> delta URL for drive - deltaURLs = map[string]string{} - // Drive ID -> folder ID -> folder path - folderPaths = map[string]map[string]string{} - numPrevItems = 0 + driveIDToDeltaLink = map[string]string{} + driveIDToPrevPaths = map[string]map[string]string{} + numPrevItems = 0 ) for _, d := range drives { var ( - driveID = ptr.Val(d.GetId()) - driveName = ptr.Val(d.GetName()) - prevDelta = prevDeltas[driveID] - oldPaths = oldPathsByDriveID[driveID] - numOldDelta = 0 - ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName) + driveID = ptr.Val(d.GetId()) + driveName = ptr.Val(d.GetName()) + ictx = clues.Add( + ctx, + "drive_id", driveID, + "drive_name", clues.Hide(driveName)) + + excludedItemIDs = map[string]struct{}{} + oldPrevPaths = oldPrevPathsByDriveID[driveID] + prevDeltaLink = prevDriveIDToDelta[driveID] + + // itemCollection is used to identify which collection a + // file belongs to. This is useful to delete a file from the + // collection it was previously in, in case it was moved to a + // different collection within the same delta query + // item ID -> item ID + itemCollection = map[string]string{} ) delete(driveTombstones, driveID) + if _, ok := driveIDToPrevPaths[driveID]; !ok { + driveIDToPrevPaths[driveID] = map[string]string{} + } + if _, ok := c.CollectionMap[driveID]; !ok { c.CollectionMap[driveID] = map[string]*Collection{} } - if len(prevDelta) > 0 { - numOldDelta++ - } - logger.Ctx(ictx).Infow( "previous metadata for drive", - "num_paths_entries", len(oldPaths), - "num_deltas_entries", numOldDelta) + "num_paths_entries", len(oldPrevPaths)) - delta, paths, excluded, err := collectItems( + items, du, err := c.handler.EnumerateDriveItemsDelta( ictx, - c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()), driveID, - driveName, - c.UpdateCollections, - oldPaths, - prevDelta, - errs) + prevDeltaLink) if err != nil { return nil, false, err } - // Used for logging below. - numDeltas := 0 - // It's alright to have an empty folders map (i.e. no folders found) but not // an empty delta token. This is because when deserializing the metadata we // remove entries for which there is no corresponding delta token/folder. If // we leave empty delta tokens then we may end up setting the State field // for collections when not actually getting delta results. - if len(delta.URL) > 0 { - deltaURLs[driveID] = delta.URL - numDeltas++ + if len(du.URL) > 0 { + driveIDToDeltaLink[driveID] = du.URL + } + + newPrevPaths, err := c.UpdateCollections( + ctx, + driveID, + driveName, + items, + oldPrevPaths, + itemCollection, + excludedItemIDs, + du.Reset, + errs) + if err != nil { + return nil, false, clues.Stack(err) } // Avoid the edge case where there's no paths but we do have a valid delta // token. We can accomplish this by adding an empty paths map for this // drive. If we don't have this then the next backup won't use the delta // token because it thinks the folder paths weren't persisted. - folderPaths[driveID] = map[string]string{} - maps.Copy(folderPaths[driveID], paths) + driveIDToPrevPaths[driveID] = map[string]string{} + maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths) logger.Ctx(ictx).Infow( "persisted metadata for drive", - "num_paths_entries", len(paths), - "num_deltas_entries", numDeltas, - "delta_reset", delta.Reset) + "num_new_paths_entries", len(newPrevPaths), + "delta_reset", du.Reset) numDriveItems := c.NumItems - numPrevItems numPrevItems = c.NumItems @@ -338,7 +350,7 @@ func (c *Collections) Get( err = c.addURLCacheToDriveCollections( ictx, driveID, - prevDelta, + prevDeltaLink, errs) if err != nil { return nil, false, err @@ -347,8 +359,8 @@ func (c *Collections) Get( // For both cases we don't need to do set difference on folder map if the // delta token was valid because we should see all the changes. - if !delta.Reset { - if len(excluded) == 0 { + if !du.Reset { + if len(excludedItemIDs) == 0 { continue } @@ -357,7 +369,7 @@ func (c *Collections) Get( return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx) } - ssmb.Add(p.String(), excluded) + ssmb.Add(p.String(), excludedItemIDs) continue } @@ -372,13 +384,11 @@ func (c *Collections) Get( foundFolders[id] = struct{}{} } - for fldID, p := range oldPaths { + for fldID, p := range oldPrevPaths { if _, ok := foundFolders[fldID]; ok { continue } - delete(paths, fldID) - prevPath, err := path.FromDataLayerPath(p, false) if err != nil { err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p) @@ -446,14 +456,14 @@ func (c *Collections) Get( // empty/missing and default to a full backup. logger.CtxErr(ctx, err).Info("making metadata collection path prefixes") - return collections, canUsePreviousBackup, nil + return collections, canUsePrevBackup, nil } md, err := graph.MakeMetadataCollection( pathPrefix, []graph.MetadataCollectionEntry{ - graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths), - graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs), + graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths), + graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink), }, c.statusUpdater) @@ -466,7 +476,7 @@ func (c *Collections) Get( collections = append(collections, md) } - return collections, canUsePreviousBackup, nil + return collections, canUsePrevBackup, nil } // addURLCacheToDriveCollections adds an URL cache to all collections belonging to @@ -480,7 +490,7 @@ func (c *Collections) addURLCacheToDriveCollections( driveID, prevDelta, urlCacheRefreshInterval, - c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()), + c.handler, errs) if err != nil { return err @@ -536,22 +546,21 @@ func updateCollectionPaths( func (c *Collections) handleDelete( itemID, driveID string, - oldPaths, newPaths map[string]string, + oldPrevPaths, currPrevPaths, newPrevPaths map[string]string, isFolder bool, excluded map[string]struct{}, - itemCollection map[string]map[string]string, invalidPrevDelta bool, ) error { if !isFolder { // Try to remove the item from the Collection if an entry exists for this // item. This handles cases where an item was created and deleted during the // same delta query. - if parentID, ok := itemCollection[driveID][itemID]; ok { + if parentID, ok := currPrevPaths[itemID]; ok { if col := c.CollectionMap[driveID][parentID]; col != nil { col.Remove(itemID) } - delete(itemCollection[driveID], itemID) + delete(currPrevPaths, itemID) } // Don't need to add to exclude list if the delta is invalid since the @@ -572,7 +581,7 @@ func (c *Collections) handleDelete( var prevPath path.Path - prevPathStr, ok := oldPaths[itemID] + prevPathStr, ok := oldPrevPaths[itemID] if ok { var err error @@ -589,7 +598,7 @@ func (c *Collections) handleDelete( // Nested folders also return deleted delta results so we don't have to // worry about doing a prefix search in the map to remove the subtree of // the deleted folder/package. - delete(newPaths, itemID) + delete(newPrevPaths, itemID) if prevPath == nil || invalidPrevDelta { // It is possible that an item was created and deleted between two delta @@ -679,21 +688,29 @@ func (c *Collections) getCollectionPath( // UpdateCollections initializes and adds the provided drive items to Collections // A new collection is created for every drive folder (or package). -// oldPaths is the unchanged data that was loaded from the metadata file. -// newPaths starts as a copy of oldPaths and is updated as changes are found in -// the returned results. +// oldPrevPaths is the unchanged data that was loaded from the metadata file. +// This map is not modified during the call. +// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in +// the returned results. Items are added to this collection throughout the call. +// newPrevPaths, ie: the items added during this call, get returned as a map. func (c *Collections) UpdateCollections( ctx context.Context, driveID, driveName string, items []models.DriveItemable, - oldPaths map[string]string, - newPaths map[string]string, + oldPrevPaths map[string]string, + currPrevPaths map[string]string, excluded map[string]struct{}, - itemCollection map[string]map[string]string, invalidPrevDelta bool, errs *fault.Bus, -) error { - el := errs.Local() +) (map[string]string, error) { + var ( + el = errs.Local() + newPrevPaths = map[string]string{} + ) + + if !invalidPrevDelta { + maps.Copy(newPrevPaths, oldPrevPaths) + } for _, item := range items { if el.Failure() != nil { @@ -703,8 +720,12 @@ func (c *Collections) UpdateCollections( var ( itemID = ptr.Val(item.GetId()) itemName = ptr.Val(item.GetName()) - ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName)) isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil + ictx = clues.Add( + ctx, + "item_id", itemID, + "item_name", clues.Hide(itemName), + "item_is_folder", isFolder) ) if item.GetMalware() != nil { @@ -726,13 +747,13 @@ func (c *Collections) UpdateCollections( if err := c.handleDelete( itemID, driveID, - oldPaths, - newPaths, + oldPrevPaths, + currPrevPaths, + newPrevPaths, isFolder, excluded, - itemCollection, invalidPrevDelta); err != nil { - return clues.Stack(err).WithClues(ictx) + return nil, clues.Stack(err).WithClues(ictx) } continue @@ -758,13 +779,13 @@ func (c *Collections) UpdateCollections( // Deletions are handled above so this is just moves/renames. var prevPath path.Path - prevPathStr, ok := oldPaths[itemID] + prevPathStr, ok := oldPrevPaths[itemID] if ok { prevPath, err = path.FromDataLayerPath(prevPathStr, false) if err != nil { el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path"). WithClues(ictx). - With("path_string", prevPathStr)) + With("prev_path_string", path.LoggableDir(prevPathStr))) } } else if item.GetRoot() != nil { // Root doesn't move or get renamed. @@ -774,11 +795,11 @@ func (c *Collections) UpdateCollections( // Moved folders don't cause delta results for any subfolders nested in // them. We need to go through and update paths to handle that. We only // update newPaths so we don't accidentally clobber previous deletes. - updatePath(newPaths, itemID, collectionPath.String()) + updatePath(newPrevPaths, itemID, collectionPath.String()) found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath) if err != nil { - return clues.Stack(err).WithClues(ictx) + return nil, clues.Stack(err).WithClues(ictx) } if found { @@ -801,7 +822,7 @@ func (c *Collections) UpdateCollections( invalidPrevDelta, nil) if err != nil { - return clues.Stack(err).WithClues(ictx) + return nil, clues.Stack(err).WithClues(ictx) } col.driveName = driveName @@ -823,35 +844,38 @@ func (c *Collections) UpdateCollections( case item.GetFile() != nil: // Deletions are handled above so this is just moves/renames. if len(ptr.Val(item.GetParentReference().GetId())) == 0 { - return clues.New("file without parent ID").WithClues(ictx) + return nil, clues.New("file without parent ID").WithClues(ictx) } // Get the collection for this item. parentID := ptr.Val(item.GetParentReference().GetId()) ictx = clues.Add(ictx, "parent_id", parentID) - collection, found := c.CollectionMap[driveID][parentID] - if !found { - return clues.New("item seen before parent folder").WithClues(ictx) + collection, ok := c.CollectionMap[driveID][parentID] + if !ok { + return nil, clues.New("item seen before parent folder").WithClues(ictx) } - // Delete the file from previous collection. This will - // only kick in if the file was moved multiple times - // within a single delta query - icID, found := itemCollection[driveID][itemID] - if found { - pcollection, found := c.CollectionMap[driveID][icID] + // This will only kick in if the file was moved multiple times + // within a single delta query. We delete the file from the previous + // collection so that it doesn't appear in two places. + prevParentContainerID, ok := currPrevPaths[itemID] + if ok { + prevColl, found := c.CollectionMap[driveID][prevParentContainerID] if !found { - return clues.New("previous collection not found").WithClues(ictx) + return nil, clues.New("previous collection not found"). + With("prev_parent_container_id", prevParentContainerID). + WithClues(ictx) } - removed := pcollection.Remove(itemID) - if !removed { - return clues.New("removing from prev collection").WithClues(ictx) + if ok := prevColl.Remove(itemID); !ok { + return nil, clues.New("removing item from prev collection"). + With("prev_parent_container_id", prevParentContainerID). + WithClues(ictx) } } - itemCollection[driveID][itemID] = parentID + currPrevPaths[itemID] = parentID if collection.Add(item) { c.NumItems++ @@ -872,11 +896,13 @@ func (c *Collections) UpdateCollections( } default: - return clues.New("item type not supported").WithClues(ictx) + el.AddRecoverable(ictx, clues.New("item is neither folder nor file"). + WithClues(ictx). + Label(fault.LabelForceNoBackupCreation)) } } - return el.Failure() + return newPrevPaths, el.Failure() } type dirScopeChecker interface { diff --git a/src/internal/m365/collection/drive/collections_test.go b/src/internal/m365/collection/drive/collections_test.go index 88a8f9a62..1b50d074a 100644 --- a/src/internal/m365/collection/drive/collections_test.go +++ b/src/internal/m365/collection/drive/collections_test.go @@ -8,7 +8,6 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -136,7 +135,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath) tests := []struct { - testCase string + name string items []models.DriveItemable inputFolderMap map[string]string scope selectors.OneDriveScope @@ -146,11 +145,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedContainerCount int expectedFileCount int expectedSkippedCount int - expectedMetadataPaths map[string]string + expectedPrevPaths map[string]string expectedExcludes map[string]struct{} }{ { - testCase: "Invalid item", + name: "Invalid item", items: []models.DriveItemable{ driveRootItem("root"), driveItem("item", "item", testBaseDrivePath, "root", false, false, false), @@ -162,13 +161,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), }, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "Single File", + name: "Single File", items: []models.DriveItemable{ driveRootItem("root"), driveItem("file", "file", testBaseDrivePath, "root", true, false, false), @@ -183,13 +182,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 1, expectedContainerCount: 1, // Root folder is skipped since it's always present. - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: getDelList("file"), }, { - testCase: "Single Folder", + name: "Single Folder", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -201,7 +200,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), "folder": expectedStatePath(data.NewState, folder), }, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), }, @@ -210,7 +209,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "Single Package", + name: "Single Package", items: []models.DriveItemable{ driveRootItem("root"), driveItem("package", "package", testBaseDrivePath, "root", false, false, true), @@ -222,7 +221,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), "package": expectedStatePath(data.NewState, pkg), }, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "package": expectedPath("/package"), }, @@ -231,7 +230,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections", + name: "1 root file, 1 folder, 1 package, 2 files, 3 collections", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -251,7 +250,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 5, expectedFileCount: 3, expectedContainerCount: 3, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "package": expectedPath("/package"), @@ -259,7 +258,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"), }, { - testCase: "contains folder selector", + name: "contains folder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -284,7 +283,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedContainerCount: 3, // just "folder" isn't added here because the include check is done on the // parent path since we only check later if something is a folder or not. - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), "folder2": expectedPath(folderSub + folder), @@ -292,7 +291,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("fileInFolder", "fileInFolder2"), }, { - testCase: "prefix subfolder selector", + name: "prefix subfolder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -315,14 +314,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 3, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "subfolder": expectedPath(folderSub), "folder2": expectedPath(folderSub + folder), }, expectedExcludes: getDelList("fileInFolder2"), }, { - testCase: "match subfolder selector", + name: "match subfolder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -343,13 +342,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 1, expectedContainerCount: 1, // No child folders for subfolder so nothing here. - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "subfolder": expectedPath(folderSub), }, expectedExcludes: getDelList("fileInSubfolder"), }, { - testCase: "not moved folder tree", + name: "not moved folder tree", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -367,7 +366,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), @@ -375,7 +374,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "moved folder tree", + name: "moved folder tree", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -393,7 +392,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), @@ -401,7 +400,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "moved folder tree with file no previous", + name: "moved folder tree with file no previous", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -418,14 +417,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder2"), }, expectedExcludes: getDelList("file"), }, { - testCase: "moved folder tree with file no previous 1", + name: "moved folder tree with file no previous 1", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -441,14 +440,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), }, expectedExcludes: getDelList("file"), }, { - testCase: "moved folder tree and subfolder 1", + name: "moved folder tree and subfolder 1", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -468,7 +467,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 0, expectedContainerCount: 3, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath("/subfolder"), @@ -476,7 +475,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "moved folder tree and subfolder 2", + name: "moved folder tree and subfolder 2", items: []models.DriveItemable{ driveRootItem("root"), driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false), @@ -496,7 +495,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 0, expectedContainerCount: 3, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath("/subfolder"), @@ -504,7 +503,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "move subfolder when moving parent", + name: "move subfolder when moving parent", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false), @@ -538,7 +537,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 5, expectedFileCount: 2, expectedContainerCount: 4, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "folder2": expectedPath("/folder2"), @@ -547,7 +546,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"), }, { - testCase: "moved folder tree multiple times", + name: "moved folder tree multiple times", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -567,7 +566,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder2"), "subfolder": expectedPath("/folder2/subfolder"), @@ -575,7 +574,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("file"), }, { - testCase: "deleted folder and package", + name: "deleted folder and package", items: []models.DriveItemable{ driveRootItem("root"), // root is always present, but not necessary here delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -596,13 +595,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "delete folder without previous", + name: "delete folder without previous", items: []models.DriveItemable{ driveRootItem("root"), delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -618,13 +617,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "delete folder tree move subfolder", + name: "delete folder tree move subfolder", items: []models.DriveItemable{ driveRootItem("root"), delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -645,14 +644,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "subfolder": expectedPath("/subfolder"), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "delete file", + name: "delete file", items: []models.DriveItemable{ driveRootItem("root"), delItem("item", testBaseDrivePath, "root", true, false, false), @@ -668,13 +667,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 1, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: getDelList("item"), }, { - testCase: "item before parent errors", + name: "item before parent errors", items: []models.DriveItemable{ driveRootItem("root"), driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false), @@ -689,13 +688,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ - "root": expectedPath(""), - }, - expectedExcludes: map[string]struct{}{}, + expectedPrevPaths: nil, + expectedExcludes: map[string]struct{}{}, }, { - testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware", + name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -716,7 +713,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 2, expectedContainerCount: 3, expectedSkippedCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "package": expectedPath("/package"), @@ -725,26 +722,23 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { }, } - for _, tt := range tests { - suite.Run(tt.testCase, func() { + for _, test := range tests { + suite.Run(test.name, func() { t := suite.T() ctx, flush := tester.NewContext(t) defer flush() var ( - excludes = map[string]struct{}{} - outputFolderMap = map[string]string{} - itemCollection = map[string]map[string]string{ - driveID: {}, - } - errs = fault.New(true) + excludes = map[string]struct{}{} + currPrevPaths = map[string]string{} + errs = fault.New(true) ) - maps.Copy(outputFolderMap, tt.inputFolderMap) + maps.Copy(currPrevPaths, test.inputFolderMap) c := NewCollections( - &itemBackupHandler{api.Drives{}, user, tt.scope}, + &itemBackupHandler{api.Drives{}, user, test.scope}, tenant, user, nil, @@ -752,25 +746,24 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { c.CollectionMap[driveID] = map[string]*Collection{} - err := c.UpdateCollections( + newPrevPaths, err := c.UpdateCollections( ctx, driveID, "General", - tt.items, - tt.inputFolderMap, - outputFolderMap, + test.items, + test.inputFolderMap, + currPrevPaths, excludes, - itemCollection, false, errs) - tt.expect(t, err, clues.ToCore(err)) - assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections") - assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count") - assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count") - assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count") - assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items") + test.expect(t, err, clues.ToCore(err)) + assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections") + assert.Equal(t, test.expectedItemCount, c.NumItems, "item count") + assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count") + assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count") + assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items") - for id, sp := range tt.expectedCollectionIDs { + for id, sp := range test.expectedCollectionIDs { if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) { // Skip collections we don't find so we don't get an NPE. continue @@ -781,8 +774,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id) } - assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths") - assert.Equal(t, tt.expectedExcludes, excludes, "exclude list") + assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths") + assert.Equal(t, test.expectedExcludes, excludes, "exclude list") }) } } @@ -1300,7 +1293,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1338,7 +1332,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1415,7 +1410,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &empty, // probably will never happen with graph + DeltaLink: &empty, // probably will never happen with graph + ResetDelta: true, }, }, }, @@ -1452,7 +1448,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - NextLink: &next, + NextLink: &next, + ResetDelta: true, }, { Values: []models.DriveItemable{ @@ -1460,7 +1457,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1502,7 +1500,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, driveID2: { @@ -1512,7 +1511,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false), driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -1564,7 +1564,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, driveID2: { @@ -1574,7 +1575,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath2, "root", false, true, false), driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -1632,87 +1634,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedFolderPaths: nil, expectedDelList: nil, }, - { - name: "OneDrive_OneItemPage_DeltaError", - drives: []models.Driveable{drive1}, - items: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID1: { - { - Err: getDeltaError(), - }, - { - Values: []models.DriveItemable{ - driveRootItem("root"), - driveItem("file", "file", driveBasePath1, "root", true, false, false), - }, - DeltaLink: &delta, - }, - }, - }, - canUsePreviousBackup: true, - errCheck: assert.NoError, - expectedCollections: map[string]map[data.CollectionState][]string{ - rootFolderPath1: {data.NotMovedState: {"file"}}, - }, - expectedDeltaURLs: map[string]string{ - driveID1: delta, - }, - expectedFolderPaths: map[string]map[string]string{ - driveID1: { - "root": rootFolderPath1, - }, - }, - expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), - doNotMergeItems: map[string]bool{ - rootFolderPath1: true, - }, - }, - { - name: "OneDrive_TwoItemPage_DeltaError", - drives: []models.Driveable{drive1}, - items: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID1: { - { - Err: getDeltaError(), - }, - { - Values: []models.DriveItemable{ - driveRootItem("root"), - driveItem("file", "file", driveBasePath1, "root", true, false, false), - }, - NextLink: &next, - }, - { - Values: []models.DriveItemable{ - driveRootItem("root"), - driveItem("folder", "folder", driveBasePath1, "root", false, true, false), - driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false), - }, - DeltaLink: &delta, - }, - }, - }, - canUsePreviousBackup: true, - errCheck: assert.NoError, - expectedCollections: map[string]map[data.CollectionState][]string{ - rootFolderPath1: {data.NotMovedState: {"file"}}, - expectedPath1("/folder"): {data.NewState: {"folder", "file2"}}, - }, - expectedDeltaURLs: map[string]string{ - driveID1: delta, - }, - expectedFolderPaths: map[string]map[string]string{ - driveID1: { - "root": rootFolderPath1, - "folder": folderPath1, - }, - }, - expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), - doNotMergeItems: map[string]bool{ - rootFolderPath1: true, - folderPath1: true, - }, - }, { name: "OneDrive_TwoItemPage_NoDeltaError", drives: []models.Driveable{drive1}, @@ -1765,16 +1686,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1812,16 +1731,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1878,7 +1795,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false), malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1908,13 +1826,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedSkippedCount: 2, }, { - name: "One Drive Delta Error Deleted Folder In New Results", + name: "One Drive Deleted Folder In New Results", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), @@ -1931,7 +1846,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { delItem("folder2", driveBasePath1, "root", false, true, false), delItem("file2", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -1966,19 +1882,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, { - name: "One Drive Delta Error Random Folder Delete", + name: "One Drive Random Folder Delete", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), delItem("folder", driveBasePath1, "root", false, true, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2009,19 +1923,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, { - name: "One Drive Delta Error Random Item Delete", + name: "One Drive Random Item Delete", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2067,7 +1979,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { delItem("folder", driveBasePath1, "root", false, true, false), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -2110,7 +2023,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2148,7 +2062,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("folder", driveBasePath1, "root", false, true, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2183,7 +2098,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2265,6 +2181,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { mbh := mock.DefaultOneDriveBH("a-user") mbh.DrivePagerV = mockDrivePager mbh.ItemPagerV = itemPagers + mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items) c := NewCollections( mbh, @@ -2491,121 +2408,6 @@ func delItem( return item } -func getDeltaError() error { - syncStateNotFound := "SyncStateNotFound" - me := odataerrors.NewMainError() - me.SetCode(&syncStateNotFound) - - deltaError := odataerrors.NewODataError() - deltaError.SetErrorEscaped(me) - - return deltaError -} - -func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() { - next := "next" - delta := "delta" - prevDelta := "prev-delta" - - table := []struct { - name string - items []apiMock.PagerResult[models.DriveItemable] - deltaURL string - prevDeltaSuccess bool - prevDelta string - err error - }{ - { - name: "delta on first run", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {DeltaLink: &delta}, - }, - prevDeltaSuccess: true, - prevDelta: prevDelta, - }, - { - name: "empty prev delta", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {DeltaLink: &delta}, - }, - prevDeltaSuccess: false, - prevDelta: "", - }, - { - name: "next then delta", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {NextLink: &next}, - {DeltaLink: &delta}, - }, - prevDeltaSuccess: true, - prevDelta: prevDelta, - }, - { - name: "invalid prev delta", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {Err: getDeltaError()}, - {DeltaLink: &delta}, // works on retry - }, - prevDelta: prevDelta, - prevDeltaSuccess: false, - }, - { - name: "fail a normal delta query", - items: []apiMock.PagerResult[models.DriveItemable]{ - {NextLink: &next}, - {Err: assert.AnError}, - }, - prevDelta: prevDelta, - prevDeltaSuccess: true, - err: assert.AnError, - }, - } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - itemPager := &apiMock.DeltaPager[models.DriveItemable]{ - ToReturn: test.items, - } - - collectorFunc := func( - ctx context.Context, - driveID, driveName string, - driveItems []models.DriveItemable, - oldPaths map[string]string, - newPaths map[string]string, - excluded map[string]struct{}, - itemCollection map[string]map[string]string, - doNotMergeItems bool, - errs *fault.Bus, - ) error { - return nil - } - - delta, _, _, err := collectItems( - ctx, - itemPager, - "", - "General", - collectorFunc, - map[string]string{}, - test.prevDelta, - fault.New(true)) - - require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err)) - require.Equal(t, test.deltaURL, delta.URL, "delta url") - require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset") - }) - } -} - func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() { driveID := "test-drive" collCount := 3 diff --git a/src/internal/m365/collection/drive/handlers.go b/src/internal/m365/collection/drive/handlers.go index 7b0064546..d341cb1ba 100644 --- a/src/internal/m365/collection/drive/handlers.go +++ b/src/internal/m365/collection/drive/handlers.go @@ -36,6 +36,7 @@ type BackupHandler interface { GetItemPermissioner GetItemer NewDrivePagerer + EnumerateDriveItemsDeltaer // PathPrefix constructs the service and category specific path prefix for // the given values. @@ -50,7 +51,7 @@ type BackupHandler interface { // ServiceCat returns the service and category used by this implementation. ServiceCat() (path.ServiceType, path.CategoryType) - NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable] + // FormatDisplayPath creates a human-readable string to represent the // provided path. FormatDisplayPath(driveName string, parentPath *path.Builder) string @@ -79,6 +80,17 @@ type GetItemer interface { ) (models.DriveItemable, error) } +type EnumerateDriveItemsDeltaer interface { + EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, + ) ( + []models.DriveItemable, + api.DeltaUpdate, + error, + ) +} + // --------------------------------------------------------------------------- // restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/item_collector.go b/src/internal/m365/collection/drive/item_collector.go deleted file mode 100644 index b2ff41831..000000000 --- a/src/internal/m365/collection/drive/item_collector.go +++ /dev/null @@ -1,142 +0,0 @@ -package drive - -import ( - "context" - - "github.com/microsoftgraph/msgraph-sdk-go/models" - "golang.org/x/exp/maps" - - "github.com/alcionai/corso/src/internal/m365/graph" - "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/logger" - "github.com/alcionai/corso/src/pkg/services/m365/api" -) - -// DeltaUpdate holds the results of a current delta token. It normally -// gets produced when aggregating the addition and removal of items in -// a delta-queryable folder. -// FIXME: This is same as exchange.api.DeltaUpdate -type DeltaUpdate struct { - // the deltaLink itself - URL string - // true if the old delta was marked as invalid - Reset bool -} - -// itemCollector functions collect the items found in a drive -type itemCollector func( - ctx context.Context, - driveID, driveName string, - driveItems []models.DriveItemable, - oldPaths map[string]string, - newPaths map[string]string, - excluded map[string]struct{}, - itemCollections map[string]map[string]string, - validPrevDelta bool, - errs *fault.Bus, -) error - -// collectItems will enumerate all items in the specified drive and hand them to the -// provided `collector` method -func collectItems( - ctx context.Context, - pager api.DeltaPager[models.DriveItemable], - driveID, driveName string, - collector itemCollector, - oldPaths map[string]string, - prevDelta string, - errs *fault.Bus, -) ( - DeltaUpdate, - map[string]string, // newPaths - map[string]struct{}, // excluded - error, -) { - var ( - newDeltaURL = "" - newPaths = map[string]string{} - excluded = map[string]struct{}{} - invalidPrevDelta = len(prevDelta) == 0 - - // itemCollection is used to identify which collection a - // file belongs to. This is useful to delete a file from the - // collection it was previously in, in case it was moved to a - // different collection within the same delta query - // drive ID -> item ID -> item ID - itemCollection = map[string]map[string]string{ - driveID: {}, - } - ) - - if !invalidPrevDelta { - maps.Copy(newPaths, oldPaths) - pager.SetNextLink(prevDelta) - } - - for { - // assume delta urls here, which allows single-token consumption - page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) - - if graph.IsErrInvalidDelta(err) { - logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) - - invalidPrevDelta = true - newPaths = map[string]string{} - - pager.Reset(ctx) - - continue - } - - if err != nil { - return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page") - } - - vals := page.GetValue() - - err = collector( - ctx, - driveID, - driveName, - vals, - oldPaths, - newPaths, - excluded, - itemCollection, - invalidPrevDelta, - errs) - if err != nil { - return DeltaUpdate{}, nil, nil, err - } - - nextLink, deltaLink := api.NextAndDeltaLink(page) - - if len(deltaLink) > 0 { - newDeltaURL = deltaLink - } - - // Check if there are more items - if len(nextLink) == 0 { - break - } - - logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink) - pager.SetNextLink(nextLink) - } - - return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil -} - -// newItem initializes a `models.DriveItemable` that can be used as input to `createItem` -func newItem(name string, folder bool) *models.DriveItem { - itemToCreate := models.NewDriveItem() - itemToCreate.SetName(&name) - - if folder { - itemToCreate.SetFolder(models.NewFolder()) - } else { - itemToCreate.SetFile(models.NewFile()) - } - - return itemToCreate -} diff --git a/src/internal/m365/collection/drive/item_handler.go b/src/internal/m365/collection/drive/item_handler.go index 4a62f35e3..5f48d313e 100644 --- a/src/internal/m365/collection/drive/item_handler.go +++ b/src/internal/m365/collection/drive/item_handler.go @@ -87,13 +87,6 @@ func (h itemBackupHandler) NewDrivePager( return h.ac.NewUserDrivePager(resourceOwner, fields) } -func (h itemBackupHandler) NewItemPager( - driveID, link string, - fields []string, -) api.DeltaPager[models.DriveItemable] { - return h.ac.NewDriveItemDeltaPager(driveID, link, fields) -} - func (h itemBackupHandler) AugmentItemInfo( dii details.ItemInfo, item models.DriveItemable, @@ -139,6 +132,13 @@ func (h itemBackupHandler) IncludesDir(dir string) bool { return h.scope.Matches(selectors.OneDriveFolder, dir) } +func (h itemBackupHandler) EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, +) ([]models.DriveItemable, api.DeltaUpdate, error) { + return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) +} + // --------------------------------------------------------------------------- // Restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/item_test.go b/src/internal/m365/collection/drive/item_test.go index 05dcf9e5a..aaf6362db 100644 --- a/src/internal/m365/collection/drive/item_test.go +++ b/src/internal/m365/collection/drive/item_test.go @@ -20,8 +20,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" - "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -60,83 +58,6 @@ func (suite *ItemIntegrationSuite) SetupSuite() { suite.userDriveID = ptr.Val(odDrives[0].GetId()) } -// TestItemReader is an integration test that makes a few assumptions -// about the test environment -// 1) It assumes the test user has a drive -// 2) It assumes the drive has a file it can use to test `driveItemReader` -// The test checks these in below -func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - var driveItem models.DriveItemable - // This item collector tries to find "a" drive item that is a non-empty - // file to test the reader function - itemCollector := func( - _ context.Context, - _, _ string, - items []models.DriveItemable, - _ map[string]string, - _ map[string]string, - _ map[string]struct{}, - _ map[string]map[string]string, - _ bool, - _ *fault.Bus, - ) error { - if driveItem != nil { - return nil - } - - for _, item := range items { - if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 { - driveItem = item - break - } - } - - return nil - } - - ip := suite.service.ac. - Drives(). - NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault()) - - _, _, _, err := collectItems( - ctx, - ip, - suite.userDriveID, - "General", - itemCollector, - map[string]string{}, - "", - fault.New(true)) - require.NoError(t, err, clues.ToCore(err)) - - // Test Requirement 2: Need a file - require.NotEmpty( - t, - driveItem, - "no file item found for user %s drive %s", - suite.user, - suite.userDriveID) - - bh := itemBackupHandler{ - suite.service.ac.Drives(), - suite.user, - (&selectors.OneDriveBackup{}).Folders(selectors.Any())[0], - } - - // Read data for the file - itemData, err := downloadItem(ctx, bh, driveItem) - require.NoError(t, err, clues.ToCore(err)) - - size, err := io.Copy(io.Discard, itemData) - require.NoError(t, err, clues.ToCore(err)) - require.NotZero(t, size) -} - // TestItemWriter is an integration test for uploading data to OneDrive // It creates a new folder with a new item and writes data to it func (suite *ItemIntegrationSuite) TestItemWriter() { @@ -171,7 +92,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { ctx, test.driveID, ptr.Val(root.GetId()), - newItem(newFolderName, true), + api.NewDriveItem(newFolderName, true), control.Copy) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, newFolder.GetId()) @@ -183,7 +104,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { ctx, test.driveID, ptr.Val(newFolder.GetId()), - newItem(newItemName, false), + api.NewDriveItem(newItemName, false), control.Copy) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, newItem.GetId()) @@ -317,7 +238,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "success", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -336,7 +257,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "success, content url set instead of download url", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@content.downloadUrl": url, }) @@ -355,7 +276,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "api getter returns error", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -371,7 +292,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "download url is empty", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) return di }, GetFunc: func(ctx context.Context, url string) (*http.Response, error) { @@ -386,7 +307,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "malware", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -408,7 +329,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "non-2xx http response", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -457,7 +378,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead url = "https://example.com" itemFunc = func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) diff --git a/src/internal/m365/collection/drive/library_handler.go b/src/internal/m365/collection/drive/library_handler.go index 74ec182d9..e5ee109ec 100644 --- a/src/internal/m365/collection/drive/library_handler.go +++ b/src/internal/m365/collection/drive/library_handler.go @@ -92,13 +92,6 @@ func (h libraryBackupHandler) NewDrivePager( return h.ac.NewSiteDrivePager(resourceOwner, fields) } -func (h libraryBackupHandler) NewItemPager( - driveID, link string, - fields []string, -) api.DeltaPager[models.DriveItemable] { - return h.ac.NewDriveItemDeltaPager(driveID, link, fields) -} - func (h libraryBackupHandler) AugmentItemInfo( dii details.ItemInfo, item models.DriveItemable, @@ -177,6 +170,13 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool { return h.scope.Matches(selectors.SharePointLibraryFolder, dir) } +func (h libraryBackupHandler) EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, +) ([]models.DriveItemable, api.DeltaUpdate, error) { + return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) +} + // --------------------------------------------------------------------------- // Restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/restore.go b/src/internal/m365/collection/drive/restore.go index 7a9017744..4718552d1 100644 --- a/src/internal/m365/collection/drive/restore.go +++ b/src/internal/m365/collection/drive/restore.go @@ -671,7 +671,7 @@ func createFolder( ctx, driveID, parentFolderID, - newItem(folderName, true), + api.NewDriveItem(folderName, true), control.Replace) // ErrItemAlreadyExistsConflict can only occur for folders if the @@ -692,7 +692,7 @@ func createFolder( ctx, driveID, parentFolderID, - newItem(folderName, true), + api.NewDriveItem(folderName, true), control.Copy) if err != nil { return nil, clues.Wrap(err, "creating folder") @@ -733,7 +733,7 @@ func restoreFile( } var ( - item = newItem(name, false) + item = api.NewDriveItem(name, false) collisionKey = api.DriveItemCollisionKey(item) collision api.DriveItemIDType shouldDeleteOriginal bool diff --git a/src/internal/m365/collection/drive/url_cache.go b/src/internal/m365/collection/drive/url_cache.go index 1a8cc7899..ef78d48f5 100644 --- a/src/internal/m365/collection/drive/url_cache.go +++ b/src/internal/m365/collection/drive/url_cache.go @@ -12,7 +12,6 @@ import ( "github.com/alcionai/corso/src/internal/common/str" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" - "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( @@ -47,7 +46,7 @@ type urlCache struct { refreshMu sync.Mutex deltaQueryCount int - itemPager api.DeltaPager[models.DriveItemable] + edid EnumerateDriveItemsDeltaer errs *fault.Bus } @@ -56,13 +55,10 @@ type urlCache struct { func newURLCache( driveID, prevDelta string, refreshInterval time.Duration, - itemPager api.DeltaPager[models.DriveItemable], + edid EnumerateDriveItemsDeltaer, errs *fault.Bus, ) (*urlCache, error) { - err := validateCacheParams( - driveID, - refreshInterval, - itemPager) + err := validateCacheParams(driveID, refreshInterval, edid) if err != nil { return nil, clues.Wrap(err, "cache params") } @@ -71,9 +67,9 @@ func newURLCache( idToProps: make(map[string]itemProps), lastRefreshTime: time.Time{}, driveID: driveID, + edid: edid, prevDelta: prevDelta, refreshInterval: refreshInterval, - itemPager: itemPager, errs: errs, }, nil @@ -83,7 +79,7 @@ func newURLCache( func validateCacheParams( driveID string, refreshInterval time.Duration, - itemPager api.DeltaPager[models.DriveItemable], + edid EnumerateDriveItemsDeltaer, ) error { if len(driveID) == 0 { return clues.New("drive id is empty") @@ -93,8 +89,8 @@ func validateCacheParams( return clues.New("invalid refresh interval") } - if itemPager == nil { - return clues.New("nil item pager") + if edid == nil { + return clues.New("nil item enumerator") } return nil @@ -160,44 +156,23 @@ func (uc *urlCache) refreshCache( // Issue a delta query to graph logger.Ctx(ctx).Info("refreshing url cache") - err := uc.deltaQuery(ctx) + items, du, err := uc.edid.EnumerateDriveItemsDelta(ctx, uc.driveID, uc.prevDelta) if err != nil { - // clear cache uc.idToProps = make(map[string]itemProps) + return clues.Stack(err) + } - return err + uc.deltaQueryCount++ + + if err := uc.updateCache(ctx, items, uc.errs); err != nil { + return clues.Stack(err) } logger.Ctx(ctx).Info("url cache refreshed") // Update last refresh time uc.lastRefreshTime = time.Now() - - return nil -} - -// deltaQuery performs a delta query on the drive and update the cache -func (uc *urlCache) deltaQuery( - ctx context.Context, -) error { - logger.Ctx(ctx).Debug("starting delta query") - // Reset item pager to remove any previous state - uc.itemPager.Reset(ctx) - - _, _, _, err := collectItems( - ctx, - uc.itemPager, - uc.driveID, - "", - uc.updateCache, - map[string]string{}, - uc.prevDelta, - uc.errs) - if err != nil { - return clues.Wrap(err, "delta query") - } - - uc.deltaQueryCount++ + uc.prevDelta = du.URL return nil } @@ -224,13 +199,7 @@ func (uc *urlCache) readCache( // It assumes that cacheMu is held by caller in write mode func (uc *urlCache) updateCache( ctx context.Context, - _, _ string, items []models.DriveItemable, - _ map[string]string, - _ map[string]string, - _ map[string]struct{}, - _ map[string]map[string]string, - _ bool, errs *fault.Bus, ) error { el := errs.Local() diff --git a/src/internal/m365/collection/drive/url_cache_test.go b/src/internal/m365/collection/drive/url_cache_test.go index 5b35ddff2..c8e23864f 100644 --- a/src/internal/m365/collection/drive/url_cache_test.go +++ b/src/internal/m365/collection/drive/url_cache_test.go @@ -1,7 +1,6 @@ package drive import ( - "context" "errors" "io" "math/rand" @@ -18,15 +17,19 @@ import ( "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/internal/m365/service/onedrive/mock" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/services/m365/api" - apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) +// --------------------------------------------------------------------------- +// integration +// --------------------------------------------------------------------------- + type URLCacheIntegrationSuite struct { tester.Suite ac api.Client @@ -68,11 +71,10 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() { // url cache func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { var ( - t = suite.T() - ac = suite.ac.Drives() - driveID = suite.driveID - newFolderName = testdata.DefaultRestoreConfig("folder").Location - driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault()) + t = suite.T() + ac = suite.ac.Drives() + driveID = suite.driveID + newFolderName = testdata.DefaultRestoreConfig("folder").Location ) ctx, flush := tester.NewContext(t) @@ -82,11 +84,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { root, err := ac.GetRootFolder(ctx, driveID) require.NoError(t, err, clues.ToCore(err)) - newFolder, err := ac.Drives().PostItemInContainer( + newFolder, err := ac.PostItemInContainer( ctx, driveID, ptr.Val(root.GetId()), - newItem(newFolderName, true), + api.NewDriveItem(newFolderName, true), control.Copy) require.NoError(t, err, clues.ToCore(err)) @@ -94,33 +96,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { nfid := ptr.Val(newFolder.GetId()) - collectorFunc := func( - context.Context, - string, - string, - []models.DriveItemable, - map[string]string, - map[string]string, - map[string]struct{}, - map[string]map[string]string, - bool, - *fault.Bus, - ) error { - return nil - } - // Get the previous delta to feed into url cache - prevDelta, _, _, err := collectItems( - ctx, - suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()), - suite.driveID, - "drive-name", - collectorFunc, - map[string]string{}, - "", - fault.New(true)) + _, du, err := ac.EnumerateDriveItemsDelta(ctx, suite.driveID, "") require.NoError(t, err, clues.ToCore(err)) - require.NotNil(t, prevDelta.URL) + require.NotEmpty(t, du.URL) // Create a bunch of files in the new folder var items []models.DriveItemable @@ -128,11 +107,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { for i := 0; i < 5; i++ { newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting) - item, err := ac.Drives().PostItemInContainer( + item, err := ac.PostItemInContainer( ctx, driveID, nfid, - newItem(newItemName, false), + api.NewDriveItem(newItemName, false), control.Copy) require.NoError(t, err, clues.ToCore(err)) @@ -142,9 +121,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { // Create a new URL cache with a long TTL uc, err := newURLCache( suite.driveID, - prevDelta.URL, + du.URL, 1*time.Hour, - driveItemPager, + suite.ac.Drives(), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -195,6 +174,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { require.Equal(t, 1, uc.deltaQueryCount) } +// --------------------------------------------------------------------------- +// unit +// --------------------------------------------------------------------------- + type URLCacheUnitSuite struct { tester.Suite } @@ -205,27 +188,20 @@ func TestURLCacheUnitSuite(t *testing.T) { func (suite *URLCacheUnitSuite) TestGetItemProperties() { deltaString := "delta" - next := "next" driveID := "drive1" table := []struct { name string - pagerResult map[string][]apiMock.PagerResult[models.DriveItemable] + pagerItems map[string][]models.DriveItemable + pagerErr map[string]error expectedItemProps map[string]itemProps expectedErr require.ErrorAssertionFunc cacheAssert func(*urlCache, time.Time) }{ { name: "single item in cache", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - }, - DeltaLink: &deltaString, - }, - }, + pagerItems: map[string][]models.DriveItemable{ + driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)}, }, expectedItemProps: map[string]itemProps{ "1": { @@ -242,18 +218,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "multiple items in cache", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("3", "file3", "root", "root", "https://dummy3.com", false), - fileItem("4", "file4", "root", "root", "https://dummy4.com", false), - fileItem("5", "file5", "root", "root", "https://dummy5.com", false), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("3", "file3", "root", "root", "https://dummy3.com", false), + fileItem("4", "file4", "root", "root", "https://dummy4.com", false), + fileItem("5", "file5", "root", "root", "https://dummy5.com", false), }, }, expectedItemProps: map[string]itemProps{ @@ -287,18 +258,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "duplicate items with potentially new urls", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("3", "file3", "root", "root", "https://dummy3.com", false), - fileItem("1", "file1", "root", "root", "https://test1.com", false), - fileItem("2", "file2", "root", "root", "https://test2.com", false), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("3", "file3", "root", "root", "https://dummy3.com", false), + fileItem("1", "file1", "root", "root", "https://test1.com", false), + fileItem("2", "file2", "root", "root", "https://test2.com", false), }, }, expectedItemProps: map[string]itemProps{ @@ -324,16 +290,11 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "deleted items", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("1", "file1", "root", "root", "https://dummy1.com", true), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("1", "file1", "root", "root", "https://dummy1.com", true), }, }, expectedItemProps: map[string]itemProps{ @@ -355,15 +316,8 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "item not found in cache", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - }, - DeltaLink: &deltaString, - }, - }, + pagerItems: map[string][]models.DriveItemable{ + driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)}, }, expectedItemProps: map[string]itemProps{ "2": {}, @@ -376,23 +330,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, }, { - name: "multi-page delta query error", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - }, - NextLink: &next, - }, - { - Values: []models.DriveItemable{ - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - }, - DeltaLink: &deltaString, - Err: errors.New("delta query error"), - }, - }, + name: "delta query error", + pagerItems: map[string][]models.DriveItemable{}, + pagerErr: map[string]error{ + driveID: errors.New("delta query error"), }, expectedItemProps: map[string]itemProps{ "1": {}, @@ -408,15 +349,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { { name: "folder item", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - driveItem("2", "folder2", "root", "root", false, true, false), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + driveItem("2", "folder2", "root", "root", false, true, false), }, }, expectedItemProps: map[string]itemProps{ @@ -437,15 +373,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { ctx, flush := tester.NewContext(t) defer flush() - itemPager := &apiMock.DeltaPager[models.DriveItemable]{ - ToReturn: test.pagerResult[driveID], + medi := mock.EnumeratesDriveItemsDelta{ + Items: test.pagerItems, + Err: test.pagerErr, + DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}}, } cache, err := newURLCache( driveID, "", 1*time.Hour, - itemPager, + &medi, fault.New(true)) require.NoError(suite.T(), err, clues.ToCore(err)) @@ -480,15 +418,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { // Test needsRefresh func (suite *URLCacheUnitSuite) TestNeedsRefresh() { - driveID := "drive1" - t := suite.T() - refreshInterval := 1 * time.Second + var ( + t = suite.T() + driveID = "drive1" + refreshInterval = 1 * time.Second + ) cache, err := newURLCache( driveID, "", refreshInterval, - &apiMock.DeltaPager[models.DriveItemable]{}, + &mock.EnumeratesDriveItemsDelta{}, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -510,14 +450,12 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() { require.False(t, cache.needsRefresh()) } -// Test newURLCache func (suite *URLCacheUnitSuite) TestNewURLCache() { - // table driven tests table := []struct { name string driveID string refreshInt time.Duration - itemPager api.DeltaPager[models.DriveItemable] + itemPager EnumerateDriveItemsDeltaer errors *fault.Bus expectedErr require.ErrorAssertionFunc }{ @@ -525,7 +463,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "invalid driveID", driveID: "", refreshInt: 1 * time.Hour, - itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, + itemPager: &mock.EnumeratesDriveItemsDelta{}, errors: fault.New(true), expectedErr: require.Error, }, @@ -533,12 +471,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "invalid refresh interval", driveID: "drive1", refreshInt: 100 * time.Millisecond, - itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, + itemPager: &mock.EnumeratesDriveItemsDelta{}, errors: fault.New(true), expectedErr: require.Error, }, { - name: "invalid itemPager", + name: "invalid item enumerator", driveID: "drive1", refreshInt: 1 * time.Hour, itemPager: nil, @@ -549,7 +487,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "valid", driveID: "drive1", refreshInt: 1 * time.Hour, - itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, + itemPager: &mock.EnumeratesDriveItemsDelta{}, errors: fault.New(true), expectedErr: require.NoError, }, diff --git a/src/internal/m365/collection/groups/backup_test.go b/src/internal/m365/collection/groups/backup_test.go index 899b6ceea..a372922ba 100644 --- a/src/internal/m365/collection/groups/backup_test.go +++ b/src/internal/m365/collection/groups/backup_test.go @@ -2,7 +2,6 @@ package groups import ( "context" - "fmt" "testing" "time" @@ -527,8 +526,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() { require.NotEmpty(t, c.FullPath().Folder(false)) - fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false)) - // TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection // interface. if !assert.Implements(t, (*data.LocationPather)(nil), c) { @@ -537,8 +534,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() { loc := c.(data.LocationPather).LocationPath().String() - fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String()) - require.NotEmpty(t, loc) delete(test.channelNames, loc) diff --git a/src/internal/m365/service/onedrive/mock/handlers.go b/src/internal/m365/service/onedrive/mock/handlers.go index f0e0286d5..f7d9ce293 100644 --- a/src/internal/m365/service/onedrive/mock/handlers.go +++ b/src/internal/m365/service/onedrive/mock/handlers.go @@ -8,11 +8,13 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/alcionai/corso/src/internal/common/ptr" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" + apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) // --------------------------------------------------------------------------- @@ -22,6 +24,8 @@ import ( type BackupHandler struct { ItemInfo details.ItemInfo + DriveItemEnumeration EnumeratesDriveItemsDelta + GI GetsItem GIP GetsItemPermission @@ -55,6 +59,7 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler { OneDrive: &details.OneDriveInfo{}, Extension: &details.ExtensionData{}, }, + DriveItemEnumeration: EnumeratesDriveItemsDelta{}, GI: GetsItem{Err: clues.New("not defined")}, GIP: GetsItemPermission{Err: clues.New("not defined")}, PathPrefixFn: defaultOneDrivePathPrefixer, @@ -124,10 +129,6 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl return h.DrivePagerV } -func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] { - return h.ItemPagerV[driveID] -} - func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string { return "/" + pb.String() } @@ -152,6 +153,13 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R return h.GetResps[c], h.GetErrs[c] } +func (h BackupHandler) EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, +) ([]models.DriveItemable, api.DeltaUpdate, error) { + return h.DriveItemEnumeration.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) +} + func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) { return h.GI.GetItem(ctx, "", "") } @@ -254,6 +262,65 @@ func (m GetsItem) GetItem( return m.Item, m.Err } +// --------------------------------------------------------------------------- +// Enumerates Drive Items +// --------------------------------------------------------------------------- + +type EnumeratesDriveItemsDelta struct { + Items map[string][]models.DriveItemable + DeltaUpdate map[string]api.DeltaUpdate + Err map[string]error +} + +func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta( + _ context.Context, + driveID, _ string, +) ( + []models.DriveItemable, + api.DeltaUpdate, + error, +) { + return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID] +} + +func PagerResultToEDID( + m map[string][]apiMock.PagerResult[models.DriveItemable], +) EnumeratesDriveItemsDelta { + edi := EnumeratesDriveItemsDelta{ + Items: map[string][]models.DriveItemable{}, + DeltaUpdate: map[string]api.DeltaUpdate{}, + Err: map[string]error{}, + } + + for driveID, results := range m { + var ( + err error + items = []models.DriveItemable{} + deltaUpdate api.DeltaUpdate + ) + + for _, pr := range results { + items = append(items, pr.Values...) + + if pr.DeltaLink != nil { + deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)} + } + + if pr.Err != nil { + err = pr.Err + } + + deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta + } + + edi.Items[driveID] = items + edi.Err[driveID] = err + edi.DeltaUpdate[driveID] = deltaUpdate + } + + return edi +} + // --------------------------------------------------------------------------- // Get Item Permissioner // --------------------------------------------------------------------------- diff --git a/src/internal/m365/service/sharepoint/backup_test.go b/src/internal/m365/service/sharepoint/backup_test.go index bcd37dd6b..12acf2dcd 100644 --- a/src/internal/m365/service/sharepoint/backup_test.go +++ b/src/internal/m365/service/sharepoint/backup_test.go @@ -90,12 +90,9 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { var ( paths = map[string]string{} - newPaths = map[string]string{} + currPaths = map[string]string{} excluded = map[string]struct{}{} - itemColls = map[string]map[string]string{ - driveID: {}, - } - collMap = map[string]map[string]*drive.Collection{ + collMap = map[string]map[string]*drive.Collection{ driveID: {}, } ) @@ -109,15 +106,14 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { c.CollectionMap = collMap - err := c.UpdateCollections( + _, err := c.UpdateCollections( ctx, driveID, "General", test.items, paths, - newPaths, + currPaths, excluded, - itemColls, true, fault.New(true)) diff --git a/src/pkg/fault/fault.go b/src/pkg/fault/fault.go index 488656fa4..1ce6162ce 100644 --- a/src/pkg/fault/fault.go +++ b/src/pkg/fault/fault.go @@ -384,20 +384,20 @@ func (pec printableErrCore) Values() []string { // funcs, and the function that spawned the local bus should always // return `local.Failure()` to ensure that hard failures are propagated // back upstream. -func (e *Bus) Local() *localBus { - return &localBus{ +func (e *Bus) Local() *LocalBus { + return &LocalBus{ mu: &sync.Mutex{}, bus: e, } } -type localBus struct { +type LocalBus struct { mu *sync.Mutex bus *Bus current error } -func (e *localBus) AddRecoverable(ctx context.Context, err error) { +func (e *LocalBus) AddRecoverable(ctx context.Context, err error) { if err == nil { return } @@ -422,7 +422,7 @@ func (e *localBus) AddRecoverable(ctx context.Context, err error) { // 2. Skipping avoids a permanent and consistent failure. If // the underlying reason is transient or otherwise recoverable, // the item should not be skipped. -func (e *localBus) AddSkip(ctx context.Context, s *Skipped) { +func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) { if s == nil { return } @@ -437,7 +437,7 @@ func (e *localBus) AddSkip(ctx context.Context, s *Skipped) { // It does not return the underlying bus.Failure(), only the failure // that was recorded within the local bus instance. This error should // get returned by any func which created a local bus. -func (e *localBus) Failure() error { +func (e *LocalBus) Failure() error { return e.current } diff --git a/src/pkg/selectors/exchange.go b/src/pkg/selectors/exchange.go index 68f45263c..987165199 100644 --- a/src/pkg/selectors/exchange.go +++ b/src/pkg/selectors/exchange.go @@ -697,7 +697,7 @@ func (s ExchangeScope) IncludesCategory(cat exchangeCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s ExchangeScope) IsAny(cat exchangeCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/groups.go b/src/pkg/selectors/groups.go index 584887bfb..e6399fbf1 100644 --- a/src/pkg/selectors/groups.go +++ b/src/pkg/selectors/groups.go @@ -699,7 +699,7 @@ func (s GroupsScope) IncludesCategory(cat groupsCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s GroupsScope) IsAny(cat groupsCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/onedrive.go b/src/pkg/selectors/onedrive.go index 5d1538a89..f97ceccaf 100644 --- a/src/pkg/selectors/onedrive.go +++ b/src/pkg/selectors/onedrive.go @@ -484,7 +484,7 @@ func (s OneDriveScope) Matches(cat oneDriveCategory, target string) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s OneDriveScope) IsAny(cat oneDriveCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/scopes.go b/src/pkg/selectors/scopes.go index aec624486..6e2eb86e9 100644 --- a/src/pkg/selectors/scopes.go +++ b/src/pkg/selectors/scopes.go @@ -694,7 +694,7 @@ func matchesPathValues[T scopeT, C categoryT]( return false } - if isAnyTarget(sc, cc) { + if IsAnyTarget(sc, cc) { // continue, not return: all path keys must match the entry to succeed continue } @@ -795,7 +795,7 @@ func isNoneTarget[T scopeT, C categoryT](s T, cat C) bool { // returns true if the category is included in the scope's category type, // and the value is set to Any(). -func isAnyTarget[T scopeT, C categoryT](s T, cat C) bool { +func IsAnyTarget[T scopeT, C categoryT](s T, cat C) bool { if !typeAndCategoryMatches(cat, s.categorizer()) { return false } diff --git a/src/pkg/selectors/scopes_test.go b/src/pkg/selectors/scopes_test.go index 6bf1e3ad9..0a44df160 100644 --- a/src/pkg/selectors/scopes_test.go +++ b/src/pkg/selectors/scopes_test.go @@ -125,14 +125,14 @@ func (suite *SelectorScopesSuite) TestGetCatValue() { func (suite *SelectorScopesSuite) TestIsAnyTarget() { t := suite.T() stub := stubScope("") - assert.True(t, isAnyTarget(stub, rootCatStub)) - assert.True(t, isAnyTarget(stub, leafCatStub)) - assert.False(t, isAnyTarget(stub, mockCategorizer("smarf"))) + assert.True(t, IsAnyTarget(stub, rootCatStub)) + assert.True(t, IsAnyTarget(stub, leafCatStub)) + assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf"))) stub = stubScope("none") - assert.False(t, isAnyTarget(stub, rootCatStub)) - assert.False(t, isAnyTarget(stub, leafCatStub)) - assert.False(t, isAnyTarget(stub, mockCategorizer("smarf"))) + assert.False(t, IsAnyTarget(stub, rootCatStub)) + assert.False(t, IsAnyTarget(stub, leafCatStub)) + assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf"))) } var reduceTestTable = []struct { diff --git a/src/pkg/selectors/sharepoint.go b/src/pkg/selectors/sharepoint.go index f35aa10b5..68f6655e5 100644 --- a/src/pkg/selectors/sharepoint.go +++ b/src/pkg/selectors/sharepoint.go @@ -625,7 +625,7 @@ func (s SharePointScope) IncludesCategory(cat sharePointCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s SharePointScope) IsAny(cat sharePointCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/services/m365/api/config.go b/src/pkg/services/m365/api/config.go index 0a0bb913d..8a5be9d23 100644 --- a/src/pkg/services/m365/api/config.go +++ b/src/pkg/services/m365/api/config.go @@ -101,7 +101,7 @@ func idAnd(ss ...string) []string { // exported // --------------------------------------------------------------------------- -func DriveItemSelectDefault() []string { +func DefaultDriveItemProps() []string { return idAnd( "content.downloadUrl", "createdBy", diff --git a/src/pkg/services/m365/api/delta.go b/src/pkg/services/m365/api/delta.go deleted file mode 100644 index dc24961f0..000000000 --- a/src/pkg/services/m365/api/delta.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// DeltaUpdate holds the results of a current delta token. It normally -// gets produced when aggregating the addition and removal of items in -// a delta-queryable folder. -type DeltaUpdate struct { - // the deltaLink itself - URL string - // true if the old delta was marked as invalid - Reset bool -} diff --git a/src/pkg/services/m365/api/drive.go b/src/pkg/services/m365/api/drive.go index e40d7497a..6a795e5cc 100644 --- a/src/pkg/services/m365/api/drive.go +++ b/src/pkg/services/m365/api/drive.go @@ -331,6 +331,10 @@ func (c Drives) PostItemLinkShareUpdate( return itm, nil } +// --------------------------------------------------------------------------- +// helper funcs +// --------------------------------------------------------------------------- + // DriveItemCollisionKeyy constructs a key from the item name. // collision keys are used to identify duplicate item conflicts for handling advanced restoration config. func DriveItemCollisionKey(item models.DriveItemable) string { @@ -340,3 +344,17 @@ func DriveItemCollisionKey(item models.DriveItemable) string { return ptr.Val(item.GetName()) } + +// NewDriveItem initializes a `models.DriveItemable` with either a folder or file entry. +func NewDriveItem(name string, folder bool) *models.DriveItem { + itemToCreate := models.NewDriveItem() + itemToCreate.SetName(&name) + + if folder { + itemToCreate.SetFolder(models.NewFolder()) + } else { + itemToCreate.SetFile(models.NewFile()) + } + + return itemToCreate +} diff --git a/src/pkg/services/m365/api/drive_pager.go b/src/pkg/services/m365/api/drive_pager.go index c592fa656..e5523d35f 100644 --- a/src/pkg/services/m365/api/drive_pager.go +++ b/src/pkg/services/m365/api/drive_pager.go @@ -15,6 +15,11 @@ import ( "github.com/alcionai/corso/src/pkg/logger" ) +type DriveItemIDType struct { + ItemID string + IsFolder bool +} + // --------------------------------------------------------------------------- // non-delta item pager // --------------------------------------------------------------------------- @@ -65,11 +70,6 @@ func (p *driveItemPageCtrl) ValidModTimes() bool { return true } -type DriveItemIDType struct { - ItemID string - IsFolder bool -} - func (c Drives) GetItemsInContainerByCollisionKey( ctx context.Context, driveID, containerID string, @@ -131,9 +131,9 @@ type DriveItemDeltaPageCtrl struct { options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration } -func (c Drives) NewDriveItemDeltaPager( - driveID, link string, - selectFields []string, +func (c Drives) newDriveItemDeltaPager( + driveID, prevDeltaLink string, + selectProps ...string, ) *DriveItemDeltaPageCtrl { preferHeaderItems := []string{ "deltashowremovedasdeleted", @@ -142,28 +142,32 @@ func (c Drives) NewDriveItemDeltaPager( "hierarchicalsharing", } - requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{ - Headers: newPreferHeaders(preferHeaderItems...), - QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{ - Select: selectFields, - }, + options := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{ + Headers: newPreferHeaders(preferHeaderItems...), + QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{}, + } + + if len(selectProps) > 0 { + options.QueryParameters.Select = selectProps + } + + builder := c.Stable. + Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(onedrive.RootID). + Delta() + + if len(prevDeltaLink) > 0 { + builder = drives.NewItemItemsItemDeltaRequestBuilder(prevDeltaLink, c.Stable.Adapter()) } res := &DriveItemDeltaPageCtrl{ gs: c.Stable, driveID: driveID, - options: requestConfig, - builder: c.Stable. - Client(). - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId(onedrive.RootID). - Delta(), - } - - if len(link) > 0 { - res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, c.Stable.Adapter()) + options: options, + builder: builder, } return res @@ -193,6 +197,27 @@ func (p *DriveItemDeltaPageCtrl) ValidModTimes() bool { return true } +// EnumerateDriveItems will enumerate all items in the specified drive and hand them to the +// provided `collector` method +func (c Drives) EnumerateDriveItemsDelta( + ctx context.Context, + driveID string, + prevDeltaLink string, +) ( + []models.DriveItemable, + DeltaUpdate, + error, +) { + pager := c.newDriveItemDeltaPager(driveID, prevDeltaLink, DefaultDriveItemProps()...) + + items, du, err := deltaEnumerateItems[models.DriveItemable](ctx, pager, prevDeltaLink) + if err != nil { + return nil, du, clues.Stack(err) + } + + return items, du, nil +} + // --------------------------------------------------------------------------- // user's drives pager // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/drive_pager_test.go b/src/pkg/services/m365/api/drive_pager_test.go index f28277eee..b75c3d320 100644 --- a/src/pkg/services/m365/api/drive_pager_test.go +++ b/src/pkg/services/m365/api/drive_pager_test.go @@ -178,3 +178,18 @@ func (suite *DrivePagerIntgSuite) TestDrives_GetItemIDsInContainer() { }) } } + +func (suite *DrivePagerIntgSuite) TestEnumerateDriveItems() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + items, du, err := suite.its. + ac. + Drives(). + EnumerateDriveItemsDelta(ctx, suite.its.user.driveID, "") + require.NoError(t, err, clues.ToCore(err)) + require.NotEmpty(t, items, "no items found in user's drive") + assert.NotEmpty(t, du.URL, "should have a delta link") +} diff --git a/src/pkg/services/m365/api/drive_test.go b/src/pkg/services/m365/api/drive_test.go index 28173c27a..1f9ccadca 100644 --- a/src/pkg/services/m365/api/drive_test.go +++ b/src/pkg/services/m365/api/drive_test.go @@ -17,6 +17,7 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type DriveAPIIntgSuite struct { @@ -50,20 +51,6 @@ func (suite *DriveAPIIntgSuite) TestDrives_CreatePagerAndGetPage() { assert.NotNil(t, a) } -// newItem initializes a `models.DriveItemable` that can be used as input to `createItem` -func newItem(name string, folder bool) *models.DriveItem { - itemToCreate := models.NewDriveItem() - itemToCreate.SetName(&name) - - if folder { - itemToCreate.SetFolder(models.NewFolder()) - } else { - itemToCreate.SetFile(models.NewFile()) - } - - return itemToCreate -} - func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { t := suite.T() @@ -78,12 +65,12 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { ctx, suite.its.user.driveID, suite.its.user.driveRootFolderID, - newItem(rc.Location, true), + api.NewDriveItem(rc.Location, true), control.Replace) require.NoError(t, err, clues.ToCore(err)) // generate a folder to use for collision testing - folder := newItem("collision", true) + folder := api.NewDriveItem("collision", true) origFolder, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -93,7 +80,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { require.NoError(t, err, clues.ToCore(err)) // generate an item to use for collision testing - file := newItem("collision.txt", false) + file := api.NewDriveItem("collision.txt", false) origFile, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -241,7 +228,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr ctx, suite.its.user.driveID, suite.its.user.driveRootFolderID, - newItem(rc.Location, true), + api.NewDriveItem(rc.Location, true), // skip instead of replace here to get // an ErrItemAlreadyExistsConflict, just in case. control.Skip) @@ -249,7 +236,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr // generate items within that folder for i := 0; i < 5; i++ { - file := newItem(fmt.Sprintf("collision_%d.txt", i), false) + file := api.NewDriveItem(fmt.Sprintf("collision_%d.txt", i), false) f, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -265,7 +252,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr ctx, suite.its.user.driveID, ptr.Val(folder.GetParentReference().GetId()), - newItem(rc.Location, true), + api.NewDriveItem(rc.Location, true), control.Replace) require.NoError(t, err, clues.ToCore(err)) require.NotEmpty(t, ptr.Val(resultFolder.GetId())) diff --git a/src/pkg/services/m365/api/item_pager.go b/src/pkg/services/m365/api/item_pager.go index 5effcb7a6..f991f2345 100644 --- a/src/pkg/services/m365/api/item_pager.go +++ b/src/pkg/services/m365/api/item_pager.go @@ -13,6 +13,20 @@ import ( "github.com/alcionai/corso/src/pkg/logger" ) +// --------------------------------------------------------------------------- +// common structs +// --------------------------------------------------------------------------- + +// DeltaUpdate holds the results of a current delta token. It normally +// gets produced when aggregating the addition and removal of items in +// a delta-queryable folder. +type DeltaUpdate struct { + // the deltaLink itself + URL string + // true if the old delta was marked as invalid + Reset bool +} + // --------------------------------------------------------------------------- // common interfaces // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/mock/pager.go b/src/pkg/services/m365/api/mock/pager.go index b1818ac17..bccf5b428 100644 --- a/src/pkg/services/m365/api/mock/pager.go +++ b/src/pkg/services/m365/api/mock/pager.go @@ -32,10 +32,11 @@ func (dnl *DeltaNextLinkValues[T]) GetOdataDeltaLink() *string { } type PagerResult[T any] struct { - Values []T - NextLink *string - DeltaLink *string - Err error + Values []T + NextLink *string + DeltaLink *string + ResetDelta bool + Err error } // --------------------------------------------------------------------------- From e5647a809daca2a509edd85331643f9e1ad88318 Mon Sep 17 00:00:00 2001 From: Keepers Date: Thu, 28 Sep 2023 16:36:38 -0600 Subject: [PATCH 03/26] add GroupByID to services (#4396) Adds a groupByID call to services, and adds CallConfig to the group by id api fn. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3988 #### Test Plan - [x] :green_heart: E2E --- src/internal/m365/service/groups/backup.go | 5 ++++- src/internal/m365/service/groups/enabled.go | 9 +++----- .../m365/service/groups/enabled_test.go | 19 ++++++++++------- src/pkg/services/m365/api/client.go | 12 +++++++++++ src/pkg/services/m365/api/groups.go | 5 +++-- src/pkg/services/m365/api/groups_test.go | 4 ++-- src/pkg/services/m365/groups.go | 21 +++++++++++++++++++ src/pkg/services/m365/groups_test.go | 18 ++++++++++++++++ 8 files changed, 75 insertions(+), 18 deletions(-) diff --git a/src/internal/m365/service/groups/backup.go b/src/internal/m365/service/groups/backup.go index 27f34f7b3..7dbbf8e13 100644 --- a/src/internal/m365/service/groups/backup.go +++ b/src/internal/m365/service/groups/backup.go @@ -55,7 +55,10 @@ func ProduceBackupCollections( "group_id", clues.Hide(bpc.ProtectedResource.ID()), "group_name", clues.Hide(bpc.ProtectedResource.Name())) - group, err := ac.Groups().GetByID(ctx, bpc.ProtectedResource.ID()) + group, err := ac.Groups().GetByID( + ctx, + bpc.ProtectedResource.ID(), + api.CallConfig{}) if err != nil { return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx) } diff --git a/src/internal/m365/service/groups/enabled.go b/src/internal/m365/service/groups/enabled.go index 87acc8c48..4580746e5 100644 --- a/src/internal/m365/service/groups/enabled.go +++ b/src/internal/m365/service/groups/enabled.go @@ -7,18 +7,15 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/alcionai/corso/src/pkg/filters" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) -type getByIDer interface { - GetByID(ctx context.Context, identifier string) (models.Groupable, error) -} - func IsServiceEnabled( ctx context.Context, - gbi getByIDer, + gbi api.GetByIDer[models.Groupable], resource string, ) (bool, error) { - resp, err := gbi.GetByID(ctx, resource) + resp, err := gbi.GetByID(ctx, resource, api.CallConfig{}) if err != nil { return false, clues.Wrap(err, "getting group").WithClues(ctx) } diff --git a/src/internal/m365/service/groups/enabled_test.go b/src/internal/m365/service/groups/enabled_test.go index c2447982e..d032be415 100644 --- a/src/internal/m365/service/groups/enabled_test.go +++ b/src/internal/m365/service/groups/enabled_test.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type EnabledUnitSuite struct { @@ -22,14 +23,18 @@ func TestEnabledUnitSuite(t *testing.T) { suite.Run(t, &EnabledUnitSuite{Suite: tester.NewUnitSuite(t)}) } -var _ getByIDer = mockGBI{} +var _ api.GetByIDer[models.Groupable] = mockGBI{} type mockGBI struct { group models.Groupable err error } -func (m mockGBI) GetByID(ctx context.Context, identifier string) (models.Groupable, error) { +func (m mockGBI) GetByID( + ctx context.Context, + identifier string, + _ api.CallConfig, +) (models.Groupable, error) { return m.group, m.err } @@ -56,13 +61,13 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { table := []struct { name string - mock func(context.Context) getByIDer + mock func(context.Context) api.GetByIDer[models.Groupable] expect assert.BoolAssertionFunc expectErr assert.ErrorAssertionFunc }{ { name: "ok", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ group: unified, } @@ -72,7 +77,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { }, { name: "non-unified group", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ group: nonUnified, } @@ -82,7 +87,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { }, { name: "group not found", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ err: graph.Stack(ctx, odErrMsg(string(graph.RequestResourceNotFound), "message")), } @@ -92,7 +97,7 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() { }, { name: "arbitrary error", - mock: func(ctx context.Context) getByIDer { + mock: func(ctx context.Context) api.GetByIDer[models.Groupable] { return mockGBI{ err: assert.AnError, } diff --git a/src/pkg/services/m365/api/client.go b/src/pkg/services/m365/api/client.go index 64b00f3dd..04f490f12 100644 --- a/src/pkg/services/m365/api/client.go +++ b/src/pkg/services/m365/api/client.go @@ -126,3 +126,15 @@ func (c Client) Get( type CallConfig struct { Expand []string } + +// --------------------------------------------------------------------------- +// common interfaces +// --------------------------------------------------------------------------- + +type GetByIDer[T any] interface { + GetByID( + ctx context.Context, + identifier string, + cc CallConfig, + ) (T, error) +} diff --git a/src/pkg/services/m365/api/groups.go b/src/pkg/services/m365/api/groups.go index 73beb3d2b..2aacdedf0 100644 --- a/src/pkg/services/m365/api/groups.go +++ b/src/pkg/services/m365/api/groups.go @@ -102,6 +102,7 @@ const filterGroupByDisplayNameQueryTmpl = "displayName eq '%s'" func (c Groups) GetByID( ctx context.Context, identifier string, + _ CallConfig, // matching standards ) (models.Groupable, error) { service, err := c.Service() if err != nil { @@ -234,9 +235,9 @@ func IsTeam(ctx context.Context, mg models.Groupable) bool { func (c Groups) GetIDAndName( ctx context.Context, groupID string, - _ CallConfig, // not currently supported + cc CallConfig, ) (string, string, error) { - s, err := c.GetByID(ctx, groupID) + s, err := c.GetByID(ctx, groupID, cc) if err != nil { return "", "", err } diff --git a/src/pkg/services/m365/api/groups_test.go b/src/pkg/services/m365/api/groups_test.go index c00b64a13..b60240cff 100644 --- a/src/pkg/services/m365/api/groups_test.go +++ b/src/pkg/services/m365/api/groups_test.go @@ -121,7 +121,7 @@ func (suite *GroupsIntgSuite) TestGroups_GetByID() { groupsAPI = suite.its.ac.Groups() ) - grp, err := groupsAPI.GetByID(ctx, groupID) + grp, err := groupsAPI.GetByID(ctx, groupID, api.CallConfig{}) require.NoError(t, err, clues.ToCore(err)) table := []struct { @@ -157,7 +157,7 @@ func (suite *GroupsIntgSuite) TestGroups_GetByID() { ctx, flush := tester.NewContext(t) defer flush() - _, err := groupsAPI.GetByID(ctx, test.id) + _, err := groupsAPI.GetByID(ctx, test.id, api.CallConfig{}) test.expectErr(t, err, clues.ToCore(err)) }) } diff --git a/src/pkg/services/m365/groups.go b/src/pkg/services/m365/groups.go index a32195c1c..5255620a7 100644 --- a/src/pkg/services/m365/groups.go +++ b/src/pkg/services/m365/groups.go @@ -28,6 +28,27 @@ type Group struct { IsTeam bool } +// GroupByID retrieves a specific group. +func GroupByID( + ctx context.Context, + acct account.Account, + id string, +) (*Group, error) { + ac, err := makeAC(ctx, acct, path.GroupsService) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + cc := api.CallConfig{} + + g, err := ac.Groups().GetByID(ctx, id, cc) + if err != nil { + return nil, clues.Stack(err) + } + + return parseGroup(ctx, g) +} + // GroupsCompat returns a list of groups in the specified M365 tenant. func GroupsCompat(ctx context.Context, acct account.Account) ([]*Group, error) { errs := fault.New(true) diff --git a/src/pkg/services/m365/groups_test.go b/src/pkg/services/m365/groups_test.go index 7c2cd4183..02091d42b 100644 --- a/src/pkg/services/m365/groups_test.go +++ b/src/pkg/services/m365/groups_test.go @@ -41,6 +41,24 @@ func (suite *GroupsIntgSuite) SetupSuite() { suite.acct = tconfig.NewM365Account(t) } +func (suite *GroupsIntgSuite) TestGroupByID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + gid := tconfig.M365TeamID(t) + + group, err := m365.GroupByID(ctx, suite.acct, gid) + require.NoError(t, err, clues.ToCore(err)) + require.NotNil(t, group) + + assert.Equal(t, gid, group.ID, "must match expected id") + assert.NotEmpty(t, group.DisplayName) +} + func (suite *GroupsIntgSuite) TestGroups() { t := suite.T() From a5f93f7a10e510682e1958ed4fad6fce2ea52792 Mon Sep 17 00:00:00 2001 From: Keepers Date: Thu, 28 Sep 2023 17:20:44 -0600 Subject: [PATCH 04/26] correctly attach store/provider flags (#4391) corrects the storage and provider flag positioning to attach to the child command instead of the parent. Also corrects unit tests for flags to ensure flags are preset as expected. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :bug: Bugfix #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/cli/backup/backup.go | 6 +- src/cli/backup/exchange_test.go | 212 ++++++++++------------------ src/cli/backup/groups_test.go | 219 +++++++++-------------------- src/cli/backup/onedrive_test.go | 186 +++++++++--------------- src/cli/backup/sharepoint_test.go | 188 +++++++++---------------- src/cli/export/export.go | 4 +- src/cli/export/groups_test.go | 72 ++++------ src/cli/export/onedrive_test.go | 86 +++++------ src/cli/export/sharepoint_test.go | 97 +++++-------- src/cli/flags/testdata/flags.go | 17 ++- src/cli/restore/exchange_test.go | 108 ++++++-------- src/cli/restore/groups_test.go | 100 ++++++------- src/cli/restore/onedrive_test.go | 89 +++++------- src/cli/restore/restore.go | 6 +- src/cli/restore/sharepoint_test.go | 101 ++++++------- src/cli/testdata/cli.go | 88 ++++++++++++ 16 files changed, 651 insertions(+), 928 deletions(-) diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index 71cb4595c..8b6808a01 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -48,12 +48,12 @@ func AddCommands(cmd *cobra.Command) { for _, sc := range subCommandFuncs { subCommand := sc() - flags.AddAllProviderFlags(subCommand) - flags.AddAllStorageFlags(subCommand) backupC.AddCommand(subCommand) for _, addBackupTo := range serviceCommands { - addBackupTo(subCommand) + sc := addBackupTo(subCommand) + flags.AddAllProviderFlags(sc) + flags.AddAllStorageFlags(sc) } } } diff --git a/src/cli/backup/exchange_test.go b/src/cli/backup/exchange_test.go index b04f27f07..87b6f49c8 100644 --- a/src/cli/backup/exchange_test.go +++ b/src/cli/backup/exchange_test.go @@ -1,7 +1,6 @@ package backup import ( - "bytes" "fmt" "strconv" "testing" @@ -14,6 +13,7 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/tester" @@ -92,76 +92,46 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { func (suite *ExchangeUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput), + "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput), + "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, + "--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize, - // Test arg parsing for few args - args := []string{ - exchangeServiceCommand, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - - "--" + flags.MailBoxFN, flagsTD.FlgInputs(flagsTD.MailboxInput), - "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.ExchangeCategoryDataInput), - - "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, - "--" + flags.DeltaPageSizeFN, flagsTD.DeltaPageSize, - - // bool flags - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, - "--" + flags.DisableDeltaFN, - "--" + flags.EnableImmutableIDFN, - "--" + flags.DisableConcurrencyLimiterFN, - } - - args = append(args, flagsTD.PreparedProviderFlags()...) - args = append(args, flagsTD.PreparedStorageFlags()...) - - cmd.SetArgs(args) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + // bool flags + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + "--" + flags.DisableDeltaFN, + "--" + flags.EnableImmutableIDFN, + "--" + flags.DisableConcurrencyLimiterFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeExchangeOpts(cmd) co := utils.Control() assert.ElementsMatch(t, flagsTD.MailboxInput, opts.Users) - // no assertion for category data input - assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch)) assert.Equal(t, flagsTD.DeltaPageSize, strconv.Itoa(int(co.DeltaPageSize))) - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.DisableDelta) assert.True(t, co.ToggleFeatures.ExchangeImmutableIDs) assert.True(t, co.ToggleFeatures.DisableConcurrencyLimiter) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -169,36 +139,25 @@ func (suite *ExchangeUnitSuite) TestBackupCreateFlags() { func (suite *ExchangeUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -207,41 +166,28 @@ func (suite *ExchangeUnitSuite) TestBackupListFlags() { func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -249,36 +195,24 @@ func (suite *ExchangeUnitSuite) TestBackupDetailsFlags() { func (suite *ExchangeUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } diff --git a/src/cli/backup/groups_test.go b/src/cli/backup/groups_test.go index 8829915c4..996a9126f 100644 --- a/src/cli/backup/groups_test.go +++ b/src/cli/backup/groups_test.go @@ -1,7 +1,6 @@ package backup import ( - "bytes" "strconv" "testing" @@ -13,6 +12,7 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/control" @@ -128,70 +128,38 @@ func (suite *GroupsUnitSuite) TestValidateGroupsBackupCreateFlags() { func (suite *GroupsUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - // Test arg parsing for few args - args := []string{ - groupsServiceCommand, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - - "--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput), - "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput), - - "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, - - // bool flags - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, - "--" + flags.DisableDeltaFN, - } - - args = append(args, flagsTD.PreparedProviderFlags()...) - args = append(args, flagsTD.PreparedStorageFlags()...) - - cmd.SetArgs(args) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.GroupFN, flagsTD.FlgInputs(flagsTD.GroupsInput), + "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.GroupsCategoryDataInput), + "--" + flags.FetchParallelismFN, flagsTD.FetchParallelism, + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + "--" + flags.DisableDeltaFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeGroupsOpts(cmd) co := utils.Control() assert.ElementsMatch(t, flagsTD.GroupsInput, opts.Groups) - // no assertion for category data input - assert.Equal(t, flagsTD.FetchParallelism, strconv.Itoa(co.Parallelism.ItemFetch)) - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) assert.True(t, co.ToggleFeatures.DisableDelta) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -199,37 +167,25 @@ func (suite *GroupsUnitSuite) TestBackupCreateFlags() { func (suite *GroupsUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -238,41 +194,28 @@ func (suite *GroupsUnitSuite) TestBackupListFlags() { func (suite *GroupsUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -280,48 +223,24 @@ func (suite *GroupsUnitSuite) TestBackupDetailsFlags() { func (suite *GroupsUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - // Test arg parsing for few args - args := []string{ - groupsServiceCommand, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - } - - args = append(args, flagsTD.PreparedProviderFlags()...) - args = append(args, flagsTD.PreparedStorageFlags()...) - - cmd.SetArgs(args) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } diff --git a/src/cli/backup/onedrive_test.go b/src/cli/backup/onedrive_test.go index 340f598dc..6d0e0b202 100644 --- a/src/cli/backup/onedrive_test.go +++ b/src/cli/backup/onedrive_test.go @@ -1,7 +1,6 @@ package backup import ( - "bytes" "fmt" "testing" @@ -13,6 +12,7 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/tester" @@ -92,48 +92,33 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() { func (suite *OneDriveUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput), - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.UserFN, flagsTD.FlgInputs(flagsTD.UsersInput), + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeOneDriveOpts(cmd) co := utils.Control() assert.ElementsMatch(t, flagsTD.UsersInput, opts.Users) - // no assertion for category data input - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -141,37 +126,25 @@ func (suite *OneDriveUnitSuite) TestBackupCreateFlags() { func (suite *OneDriveUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -180,41 +153,28 @@ func (suite *OneDriveUnitSuite) TestBackupListFlags() { func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -222,36 +182,24 @@ func (suite *OneDriveUnitSuite) TestBackupDetailsFlags() { func (suite *OneDriveUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } diff --git a/src/cli/backup/sharepoint_test.go b/src/cli/backup/sharepoint_test.go index fd724d83b..f09bbe878 100644 --- a/src/cli/backup/sharepoint_test.go +++ b/src/cli/backup/sharepoint_test.go @@ -1,7 +1,6 @@ package backup import ( - "bytes" "fmt" "strings" "testing" @@ -14,6 +13,7 @@ import ( "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/common/idname" @@ -94,51 +94,36 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { func (suite *SharePointUnitSuite) TestBackupCreateFlags() { t := suite.T() - cmd := &cobra.Command{Use: createCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput), - "--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput), - "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput), - "--" + flags.FailFastFN, - "--" + flags.DisableIncrementalsFN, - "--" + flags.ForceItemDataDownloadFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: createCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.SiteIDFN, flagsTD.FlgInputs(flagsTD.SiteIDInput), + "--" + flags.SiteFN, flagsTD.FlgInputs(flagsTD.WebURLInput), + "--" + flags.CategoryDataFN, flagsTD.FlgInputs(flagsTD.SharepointCategoryDataInput), + "--" + flags.FailFastFN, + "--" + flags.DisableIncrementalsFN, + "--" + flags.ForceItemDataDownloadFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) opts := utils.MakeSharePointOpts(cmd) co := utils.Control() assert.ElementsMatch(t, []string{strings.Join(flagsTD.SiteIDInput, ",")}, opts.SiteID) assert.ElementsMatch(t, flagsTD.WebURLInput, opts.WebURL) - // no assertion for category data input - - // bool flags assert.Equal(t, control.FailFast, co.FailureHandling) assert.True(t, co.ToggleFeatures.DisableIncrementals) assert.True(t, co.ToggleFeatures.ForceItemDataDownload) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -146,37 +131,25 @@ func (suite *SharePointUnitSuite) TestBackupCreateFlags() { func (suite *SharePointUnitSuite) TestBackupListFlags() { t := suite.T() - cmd := &cobra.Command{Use: listCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: listCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedBackupListFlags(), - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedBackupListFlags(), + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertBackupListFlags(t, cmd) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) @@ -185,41 +158,28 @@ func (suite *SharePointUnitSuite) TestBackupListFlags() { func (suite *SharePointUnitSuite) TestBackupDetailsFlags() { t := suite.T() - cmd := &cobra.Command{Use: detailsCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.SkipReduceFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: detailsCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.SkipReduceFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) co := utils.Control() assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - assert.True(t, co.SkipReduce) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } @@ -227,36 +187,24 @@ func (suite *SharePointUnitSuite) TestBackupDetailsFlags() { func (suite *SharePointUnitSuite) TestBackupDeleteFlags() { t := suite.T() - cmd := &cobra.Command{Use: deleteCommand} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, + cmd := cliTD.SetUpCmdHasFlags( + t, + &cobra.Command{Use: deleteCommand}, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) - - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) } diff --git a/src/cli/export/export.go b/src/cli/export/export.go index db48f466a..8415caea3 100644 --- a/src/cli/export/export.go +++ b/src/cli/export/export.go @@ -27,11 +27,11 @@ var exportCommands = []func(cmd *cobra.Command) *cobra.Command{ // AddCommands attaches all `corso export * *` commands to the parent. func AddCommands(cmd *cobra.Command) { subCommand := exportCmd() - flags.AddAllStorageFlags(subCommand) cmd.AddCommand(subCommand) for _, addExportTo := range exportCommands { - addExportTo(subCommand) + sc := addExportTo(subCommand) + flags.AddAllStorageFlags(sc) } } diff --git a/src/cli/export/groups_test.go b/src/cli/export/groups_test.go index 0f53bb6f8..3b75f0252 100644 --- a/src/cli/export/groups_test.go +++ b/src/cli/export/groups_test.go @@ -1,17 +1,15 @@ package export import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,55 +37,41 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: exportCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - flagsTD.RestoreDestination, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - - "--" + flags.FormatFN, flagsTD.FormatType, - - // bool flags - "--" + flags.ArchiveFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + flagsTD.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.FormatFN, flagsTD.FormatType, + "--" + flags.ArchiveFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeGroupsOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive) assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format) - flagsTD.AssertStorageFlags(t, cmd) }) } diff --git a/src/cli/export/onedrive_test.go b/src/cli/export/onedrive_test.go index 2049234ae..0afe6c437 100644 --- a/src/cli/export/onedrive_test.go +++ b/src/cli/export/onedrive_test.go @@ -1,17 +1,15 @@ package export import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,67 +37,55 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: exportCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - flagsTD.RestoreDestination, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - - "--" + flags.FormatFN, flagsTD.FormatType, - - // bool flags - "--" + flags.ArchiveFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + flagsTD.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output + "--" + flags.FormatFN, flagsTD.FormatType, - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + // bool flags + "--" + flags.ArchiveFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) + + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeOneDriveOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.Equal(t, flagsTD.CorsoPassphrase, flags.CorsoPassphraseFV) - flagsTD.AssertStorageFlags(t, cmd) }) } diff --git a/src/cli/export/sharepoint_test.go b/src/cli/export/sharepoint_test.go index affb060e1..4850173ca 100644 --- a/src/cli/export/sharepoint_test.go +++ b/src/cli/export/sharepoint_test.go @@ -1,17 +1,15 @@ package export import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,63 +37,50 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: exportCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - flagsTD.RestoreDestination, - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.LibraryFN, flagsTD.LibraryInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), - "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), - "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), - "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), - - "--" + flags.FormatFN, flagsTD.FormatType, - - // bool flags - "--" + flags.ArchiveFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + flagsTD.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.LibraryFN, flagsTD.LibraryInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), + "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), + "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), + "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), + "--" + flags.FormatFN, flagsTD.FormatType, + "--" + flags.ArchiveFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeSharePointOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) @@ -103,16 +88,12 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem) assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder) - assert.ElementsMatch(t, flagsTD.PageInput, opts.Page) assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder) - assert.Equal(t, flagsTD.Archive, opts.ExportCfg.Archive) assert.Equal(t, flagsTD.FormatType, opts.ExportCfg.Format) - flagsTD.AssertStorageFlags(t, cmd) }) } diff --git a/src/cli/flags/testdata/flags.go b/src/cli/flags/testdata/flags.go index c8339cf73..7dec134f4 100644 --- a/src/cli/flags/testdata/flags.go +++ b/src/cli/flags/testdata/flags.go @@ -86,7 +86,7 @@ var ( DisableConcurrencyLimiter = true ) -func WithFlags( +func WithFlags2( cc *cobra.Command, command string, flagSets ...[]string, @@ -99,3 +99,18 @@ func WithFlags( cc.SetArgs(args) } + +func WithFlags( + command string, + flagSets ...[]string, +) func(*cobra.Command) { + return func(cc *cobra.Command) { + args := []string{command} + + for _, sl := range flagSets { + args = append(args, sl...) + } + + cc.SetArgs(args) + } +} diff --git a/src/cli/restore/exchange_test.go b/src/cli/restore/exchange_test.go index d7ffb1b98..c16eac331 100644 --- a/src/cli/restore/exchange_test.go +++ b/src/cli/restore/exchange_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,80 +37,64 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addExchangeCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - exchangeServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - - "--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput), - "--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput), - "--" + flags.ContactNameFN, flagsTD.ContactNameInput, - - "--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput), - "--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput), - "--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput, - "--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput, - "--" + flags.EmailSenderFN, flagsTD.EmailSenderInput, - "--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput, - - "--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput), - "--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput), - "--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput, - "--" + flags.EventRecursFN, flagsTD.EventRecursInput, - "--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput, - "--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput, - "--" + flags.EventSubjectFN, flagsTD.EventSubjectInput, - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addExchangeCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + exchangeServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.ContactFN, flagsTD.FlgInputs(flagsTD.ContactInput), + "--" + flags.ContactFolderFN, flagsTD.FlgInputs(flagsTD.ContactFldInput), + "--" + flags.ContactNameFN, flagsTD.ContactNameInput, + "--" + flags.EmailFN, flagsTD.FlgInputs(flagsTD.EmailInput), + "--" + flags.EmailFolderFN, flagsTD.FlgInputs(flagsTD.EmailFldInput), + "--" + flags.EmailReceivedAfterFN, flagsTD.EmailReceivedAfterInput, + "--" + flags.EmailReceivedBeforeFN, flagsTD.EmailReceivedBeforeInput, + "--" + flags.EmailSenderFN, flagsTD.EmailSenderInput, + "--" + flags.EmailSubjectFN, flagsTD.EmailSubjectInput, + "--" + flags.EventFN, flagsTD.FlgInputs(flagsTD.EventInput), + "--" + flags.EventCalendarFN, flagsTD.FlgInputs(flagsTD.EventCalInput), + "--" + flags.EventOrganizerFN, flagsTD.EventOrganizerInput, + "--" + flags.EventRecursFN, flagsTD.EventRecursInput, + "--" + flags.EventStartsAfterFN, flagsTD.EventStartsAfterInput, + "--" + flags.EventStartsBeforeFN, flagsTD.EventStartsBeforeInput, + "--" + flags.EventSubjectFN, flagsTD.EventSubjectInput, + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeExchangeOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.ElementsMatch(t, flagsTD.ContactInput, opts.Contact) assert.ElementsMatch(t, flagsTD.ContactFldInput, opts.ContactFolder) assert.Equal(t, flagsTD.ContactNameInput, opts.ContactName) - assert.ElementsMatch(t, flagsTD.EmailInput, opts.Email) assert.ElementsMatch(t, flagsTD.EmailFldInput, opts.EmailFolder) assert.Equal(t, flagsTD.EmailReceivedAfterInput, opts.EmailReceivedAfter) assert.Equal(t, flagsTD.EmailReceivedBeforeInput, opts.EmailReceivedBefore) assert.Equal(t, flagsTD.EmailSenderInput, opts.EmailSender) assert.Equal(t, flagsTD.EmailSubjectInput, opts.EmailSubject) - assert.ElementsMatch(t, flagsTD.EventInput, opts.Event) assert.ElementsMatch(t, flagsTD.EventCalInput, opts.EventCalendar) assert.Equal(t, flagsTD.EventOrganizerInput, opts.EventOrganizer) @@ -120,11 +102,9 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() { assert.Equal(t, flagsTD.EventStartsAfterInput, opts.EventStartsAfter) assert.Equal(t, flagsTD.EventStartsBeforeInput, opts.EventStartsBefore) assert.Equal(t, flagsTD.EventSubjectInput, opts.EventSubject) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/restore/groups_test.go b/src/cli/restore/groups_test.go index f2045e53c..c6753170b 100644 --- a/src/cli/restore/groups_test.go +++ b/src/cli/restore/groups_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,65 +37,51 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addGroupsCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - groupsServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - - "--" + flags.LibraryFN, flagsTD.LibraryInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), - "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), - "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), - "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, - - // bool flags - "--" + flags.NoPermissionsFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addGroupsCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + groupsServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.LibraryFN, flagsTD.LibraryInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), + "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), + "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), + "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + "--" + flags.NoPermissionsFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeGroupsOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) @@ -105,14 +89,10 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - - // bool flags assert.True(t, flags.NoPermissionsFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/restore/onedrive_test.go b/src/cli/restore/onedrive_test.go index 5a94705d8..77fb49c65 100644 --- a/src/cli/restore/onedrive_test.go +++ b/src/cli/restore/onedrive_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,73 +37,56 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addOneDriveCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - oneDriveServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, - - // bool flags - "--" + flags.NoPermissionsFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addOneDriveCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + oneDriveServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + "--" + flags.NoPermissionsFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeOneDriveOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) assert.Equal(t, flagsTD.FileCreatedAfterInput, opts.FileCreatedAfter) assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - - // bool flags assert.True(t, flags.NoPermissionsFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/restore/restore.go b/src/cli/restore/restore.go index 9dad4ca1c..7db7dc5a7 100644 --- a/src/cli/restore/restore.go +++ b/src/cli/restore/restore.go @@ -25,12 +25,12 @@ var restoreCommands = []func(cmd *cobra.Command) *cobra.Command{ // AddCommands attaches all `corso restore * *` commands to the parent. func AddCommands(cmd *cobra.Command) { subCommand := restoreCmd() - flags.AddAllProviderFlags(subCommand) - flags.AddAllStorageFlags(subCommand) cmd.AddCommand(subCommand) for _, addRestoreTo := range restoreCommands { - addRestoreTo(subCommand) + sc := addRestoreTo(subCommand) + flags.AddAllProviderFlags(sc) + flags.AddAllStorageFlags(sc) } } diff --git a/src/cli/restore/sharepoint_test.go b/src/cli/restore/sharepoint_test.go index 638b03bee..ef28f399a 100644 --- a/src/cli/restore/sharepoint_test.go +++ b/src/cli/restore/sharepoint_test.go @@ -1,17 +1,15 @@ package restore import ( - "bytes" "testing" - "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/cli/flags" flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" + cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/tester" ) @@ -39,64 +37,51 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() + parent := &cobra.Command{Use: restoreCommand} - cmd := &cobra.Command{Use: test.use} - - // persistent flags not added by addCommands - flags.AddRunModeFlag(cmd, true) - - c := addSharePointCommands(cmd) - require.NotNil(t, c) - - // non-persistent flags not added by addCommands - flags.AddAllProviderFlags(c) - flags.AddAllStorageFlags(c) - - cmds := cmd.Commands() - require.Len(t, cmds, 1) - - child := cmds[0] - assert.Equal(t, test.expectUse, child.Use) - assert.Equal(t, test.expectShort, child.Short) - tester.AreSameFunc(t, test.expectRunE, child.RunE) - - flagsTD.WithFlags( - cmd, - sharePointServiceCommand, - []string{ - "--" + flags.RunModeFN, flags.RunModeFlagTest, - "--" + flags.BackupFN, flagsTD.BackupInput, - "--" + flags.LibraryFN, flagsTD.LibraryInput, - "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), - "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), - "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, - "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, - "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, - "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, - "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), - "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), - "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), - "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), - - "--" + flags.CollisionsFN, flagsTD.Collisions, - "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, - - // bool flags - "--" + flags.NoPermissionsFN, + cmd := cliTD.SetUpCmdHasFlags( + t, + parent, + addSharePointCommands, + []cliTD.UseCobraCommandFn{ + flags.AddAllProviderFlags, + flags.AddAllStorageFlags, }, - flagsTD.PreparedProviderFlags(), - flagsTD.PreparedStorageFlags()) + flagsTD.WithFlags( + sharePointServiceCommand, + []string{ + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, flagsTD.BackupInput, + "--" + flags.LibraryFN, flagsTD.LibraryInput, + "--" + flags.FileFN, flagsTD.FlgInputs(flagsTD.FileNameInput), + "--" + flags.FolderFN, flagsTD.FlgInputs(flagsTD.FolderPathInput), + "--" + flags.FileCreatedAfterFN, flagsTD.FileCreatedAfterInput, + "--" + flags.FileCreatedBeforeFN, flagsTD.FileCreatedBeforeInput, + "--" + flags.FileModifiedAfterFN, flagsTD.FileModifiedAfterInput, + "--" + flags.FileModifiedBeforeFN, flagsTD.FileModifiedBeforeInput, + "--" + flags.ListItemFN, flagsTD.FlgInputs(flagsTD.ListItemInput), + "--" + flags.ListFolderFN, flagsTD.FlgInputs(flagsTD.ListFolderInput), + "--" + flags.PageFN, flagsTD.FlgInputs(flagsTD.PageInput), + "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), + "--" + flags.CollisionsFN, flagsTD.Collisions, + "--" + flags.DestinationFN, flagsTD.Destination, + "--" + flags.ToResourceFN, flagsTD.ToResource, + "--" + flags.NoPermissionsFN, + }, + flagsTD.PreparedProviderFlags(), + flagsTD.PreparedStorageFlags())) - cmd.SetOut(new(bytes.Buffer)) // drop output - cmd.SetErr(new(bytes.Buffer)) // drop output - - err := cmd.Execute() - assert.NoError(t, err, clues.ToCore(err)) + cliTD.CheckCmdChild( + t, + parent, + 3, + test.expectUse, + test.expectShort, + test.expectRunE) opts := utils.MakeSharePointOpts(cmd) - assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) + assert.Equal(t, flagsTD.BackupInput, flags.BackupIDFV) assert.Equal(t, flagsTD.LibraryInput, opts.Library) assert.ElementsMatch(t, flagsTD.FileNameInput, opts.FileName) assert.ElementsMatch(t, flagsTD.FolderPathInput, opts.FolderPath) @@ -104,20 +89,14 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() { assert.Equal(t, flagsTD.FileCreatedBeforeInput, opts.FileCreatedBefore) assert.Equal(t, flagsTD.FileModifiedAfterInput, opts.FileModifiedAfter) assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) - assert.ElementsMatch(t, flagsTD.ListItemInput, opts.ListItem) assert.ElementsMatch(t, flagsTD.ListFolderInput, opts.ListFolder) - assert.ElementsMatch(t, flagsTD.PageInput, opts.Page) assert.ElementsMatch(t, flagsTD.PageFolderInput, opts.PageFolder) - assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) - - // bool flags assert.True(t, flags.NoPermissionsFV) - flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) }) diff --git a/src/cli/testdata/cli.go b/src/cli/testdata/cli.go index 1c955165f..16a983360 100644 --- a/src/cli/testdata/cli.go +++ b/src/cli/testdata/cli.go @@ -1,11 +1,20 @@ package testdata import ( + "bytes" "fmt" + "strings" + "testing" "time" + "github.com/alcionai/clues" "github.com/google/uuid" "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/internal/tester" ) // StubRootCmd builds a stub cobra command to be used as @@ -27,3 +36,82 @@ func StubRootCmd(args ...string) *cobra.Command { return c } + +type UseCobraCommandFn func(*cobra.Command) + +func SetUpCmdHasFlags( + t *testing.T, + parentCmd *cobra.Command, + addChildCommand func(*cobra.Command) *cobra.Command, + addFlags []UseCobraCommandFn, + setArgs UseCobraCommandFn, +) *cobra.Command { + parentCmd.PersistentPreRun = func(c *cobra.Command, args []string) { + t.Log("testing args:") + + for _, arg := range args { + t.Log(arg) + } + } + + // persistent flags not added by addCommands + flags.AddRunModeFlag(parentCmd, true) + + cmd := addChildCommand(parentCmd) + require.NotNil(t, cmd) + + cul := cmd.UseLine() + require.Truef( + t, + strings.HasPrefix(cul, parentCmd.Use+" "+cmd.Use), + "child command has expected usage format 'parent child', got %q", + cul) + + for _, af := range addFlags { + af(cmd) + } + + setArgs(parentCmd) + + parentCmd.SetOut(new(bytes.Buffer)) // drop output + parentCmd.SetErr(new(bytes.Buffer)) // drop output + + err := parentCmd.Execute() + assert.NoError(t, err, clues.ToCore(err)) + + return cmd +} + +type CobraRunEFn func(cmd *cobra.Command, args []string) error + +func CheckCmdChild( + t *testing.T, + cmd *cobra.Command, + expectChildCount int, + expectUse string, + expectShort string, + expectRunE CobraRunEFn, +) { + var ( + cmds = cmd.Commands() + child *cobra.Command + ) + + for _, cc := range cmds { + if cc.Use == expectUse { + child = cc + break + } + } + + require.Len( + t, + cmds, + expectChildCount, + "parent command should have the correct child command count") + + require.NotNil(t, child, "should have found expected child command") + + assert.Equal(t, expectShort, child.Short) + tester.AreSameFunc(t, expectRunE, child.RunE) +} From 9e0d464854cb00aad82f28cae30ce1f2ad970968 Mon Sep 17 00:00:00 2001 From: Keepers Date: Thu, 28 Sep 2023 17:53:15 -0600 Subject: [PATCH 05/26] sanity test refactor (#4370) refactoring the sanity tests with three goals: 1. move from env vars to cli commands so that unsupported commands fail loudly. 2. set up support for groups restore and export testing. 3. introduce some code re-use throughout. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :robot: Supportability/Tests #### Issue(s) * #3988 #### Test Plan - [x] :green_heart: E2E --- .../actions/backup-restore-test/action.yml | 53 +++- .github/actions/slack-message/action.yml | 4 +- .github/workflows/sanity-test.yaml | 32 +- src/cmd/sanity_test/common/common.go | 62 ++++ src/cmd/sanity_test/common/filepath.go | 38 +++ src/cmd/sanity_test/common/sanitree.go | 69 +++++ src/cmd/sanity_test/common/utils.go | 4 +- src/cmd/sanity_test/export/groups.go | 16 + src/cmd/sanity_test/export/onedrive.go | 45 +-- src/cmd/sanity_test/export/sharepoint.go | 45 +-- src/cmd/sanity_test/restore/exchange.go | 244 +++++---------- src/cmd/sanity_test/restore/groups.go | 16 + src/cmd/sanity_test/restore/onedrive.go | 111 +++---- src/cmd/sanity_test/restore/sharepoint.go | 23 +- src/cmd/sanity_test/sanity_tests.go | 289 ++++++++++++++---- src/pkg/services/m365/api/client.go | 2 +- src/pkg/services/m365/api/drive.go | 20 ++ src/pkg/services/m365/api/mail.go | 20 ++ 18 files changed, 716 insertions(+), 377 deletions(-) create mode 100644 src/cmd/sanity_test/common/filepath.go create mode 100644 src/cmd/sanity_test/common/sanitree.go create mode 100644 src/cmd/sanity_test/export/groups.go create mode 100644 src/cmd/sanity_test/restore/groups.go diff --git a/.github/actions/backup-restore-test/action.yml b/.github/actions/backup-restore-test/action.yml index 299243e6a..2603cab27 100644 --- a/.github/actions/backup-restore-test/action.yml +++ b/.github/actions/backup-restore-test/action.yml @@ -45,6 +45,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Backup ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-${{ inputs.service }}-${{inputs.kind }}.log ./corso backup create '${{ inputs.service }}' \ @@ -61,6 +64,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Restore ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log ./corso restore '${{ inputs.service }}' \ @@ -85,11 +91,14 @@ runs: SANITY_TEST_KIND: restore SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }} SANITY_TEST_SERVICE: ${{ inputs.service }} - TEST_DATA: ${{ inputs.test-folder }} - BASE_BACKUP: ${{ inputs.base-backup }} + SANITY_TEST_DATA: ${{ inputs.test-folder }} + SANITY_BASE_BACKUP: ${{ inputs.base-backup }} run: | + echo "---------------------------" + echo Sanity Test Restore ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log - ./sanity-test + ./sanity-test restore ${{ inputs.service }} - name: Export ${{ inputs.service }} ${{ inputs.kind }} if: inputs.with-export == true @@ -97,6 +106,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Export ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log ./corso export '${{ inputs.service }}' \ @@ -116,11 +128,14 @@ runs: SANITY_TEST_KIND: export SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }} SANITY_TEST_SERVICE: ${{ inputs.service }} - TEST_DATA: ${{ inputs.test-folder }} - BASE_BACKUP: ${{ inputs.base-backup }} + SANITY_TEST_DATA: ${{ inputs.test-folder }} + SANITY_BASE_BACKUP: ${{ inputs.base-backup }} run: | + echo "---------------------------" + echo Sanity-Test Export ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log - ./sanity-test + ./sanity-test export ${{ inputs.service }} - name: Export archive ${{ inputs.service }} ${{ inputs.kind }} if: inputs.with-export == true @@ -128,6 +143,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Export Archive ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log ./corso export '${{ inputs.service }}' \ @@ -150,16 +168,22 @@ runs: SANITY_TEST_KIND: export SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped SANITY_TEST_SERVICE: ${{ inputs.service }} - TEST_DATA: ${{ inputs.test-folder }} - BASE_BACKUP: ${{ inputs.base-backup }} + SANITY_TEST_DATA: ${{ inputs.test-folder }} + SANITY_BASE_BACKUP: ${{ inputs.base-backup }} run: | + echo "---------------------------" + echo Sanity-Test Export Archive ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log - ./sanity-test + ./sanity-test export ${{ inputs.service }} - name: List ${{ inputs.service }} ${{ inputs.kind }} shell: bash working-directory: src run: | + echo "---------------------------" + echo Backup list ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-${{ inputs.service }}-${{inputs.kind }}.log ./corso backup list ${{ inputs.service }} \ @@ -178,6 +202,9 @@ runs: shell: bash working-directory: src run: | + echo "---------------------------" + echo Backup List w/ Backup ${{ inputs.service }} ${{ inputs.kind }} + echo "---------------------------" set -euo pipefail CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-backup-list-single-${{ inputs.service }}-${{inputs.kind }}.log ./corso backup list ${{ inputs.service }} \ @@ -193,7 +220,13 @@ runs: exit 1 fi - # Upload the original go test output as an artifact for later review. + - if: always() + shell: bash + run: | + echo "---------------------------" + echo Logging Results + echo "---------------------------" + - name: Upload test log if: always() uses: actions/upload-artifact@v3 diff --git a/.github/actions/slack-message/action.yml b/.github/actions/slack-message/action.yml index 57091d430..d79ab6180 100644 --- a/.github/actions/slack-message/action.yml +++ b/.github/actions/slack-message/action.yml @@ -31,7 +31,7 @@ runs: - name: use url or blank val shell: bash run: | - echo "STEP=${{ github.action || '' }}" >> $GITHUB_ENV + echo "STEP=${{ env.trimmed_ref || '' }}" >> $GITHUB_ENV echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV @@ -51,7 +51,7 @@ runs: "type": "section", "text": { "type": "mrkdwn", - "text": "${{ inputs.msg }} :: ${{ env.JOB }} - ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}" + "text": "${{ inputs.msg }}\n${{ env.JOB }} :: ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}" } } ] diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index 096924dba..53a0546d5 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -181,7 +181,7 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: exchange - kind: initial + kind: first-backup backup-args: '--mailbox "${{ env.TEST_USER }}" --data "email"' restore-args: '--email-folder ${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.repo-init.outputs.result }}' @@ -249,7 +249,7 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: onedrive - kind: initial + kind: first-backup backup-args: '--user "${{ env.TEST_USER }}"' restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-onedrive.outputs.result }}' @@ -305,7 +305,7 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: sharepoint - kind: initial + kind: first-backup backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"' restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' @@ -362,12 +362,34 @@ jobs: uses: ./.github/actions/backup-restore-test with: service: groups - kind: initial + kind: first-backup backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' log-dir: ${{ env.CORSO_LOG_DIR }} - # TODO: incrementals + # generate some more enteries for incremental check + # - name: Groups - Create new data (for incremental) + # working-directory: ./src/cmd/factory + # run: | + # go run . sharepoint files \ + # --site ${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }} \ + # --user ${{ env.TEST_USER }} \ + # --secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \ + # --tenant ${{ secrets.TENANT_ID }} \ + # --destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }} \ + # --count 4 + + # - name: Groups - Incremental backup + # id: groups-incremental + # uses: ./.github/actions/backup-restore-test + # with: + # service: groups + # kind: incremental + # backup-args: '--site "${{ secrets.CORSO_M365_TEST_GROUPS_SITE_URL }}"' + # restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' + # test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' + # log-dir: ${{ env.CORSO_LOG_DIR }} + # with-export: true ########################################################################################################################################## diff --git a/src/cmd/sanity_test/common/common.go b/src/cmd/sanity_test/common/common.go index 344d6dc19..c3a24a489 100644 --- a/src/cmd/sanity_test/common/common.go +++ b/src/cmd/sanity_test/common/common.go @@ -1,6 +1,68 @@ package common +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + type PermissionInfo struct { EntityID string Roles []string } + +const ( + sanityBaseBackup = "SANITY_BASE_BACKUP" + sanityTestData = "SANITY_TEST_DATA" + sanityTestFolder = "SANITY_TEST_FOLDER" + sanityTestService = "SANITY_TEST_SERVICE" +) + +type Envs struct { + BaseBackupFolder string + DataFolder string + FolderName string + Service string + SiteID string + StartTime time.Time + UserID string +} + +func EnvVars(ctx context.Context) Envs { + folder := strings.TrimSpace(os.Getenv(sanityTestFolder)) + startTime, _ := MustGetTimeFromName(ctx, folder) + + e := Envs{ + BaseBackupFolder: os.Getenv(sanityBaseBackup), + DataFolder: os.Getenv(sanityTestData), + FolderName: folder, + SiteID: tconfig.GetM365SiteID(ctx), + Service: os.Getenv(sanityTestService), + StartTime: startTime, + UserID: tconfig.GetM365UserID(ctx), + } + + fmt.Printf("\n-----\nenvs %+v\n-----\n", e) + + logger.Ctx(ctx).Info("envs", e) + + return e +} + +func GetAC() (api.Client, error) { + creds := account.M365Config{ + M365: credentials.GetM365(), + AzureTenantID: os.Getenv(account.AzureTenantID), + } + + return api.NewClient(creds, control.DefaultOptions()) +} diff --git a/src/cmd/sanity_test/common/filepath.go b/src/cmd/sanity_test/common/filepath.go new file mode 100644 index 000000000..fd47c5b2d --- /dev/null +++ b/src/cmd/sanity_test/common/filepath.go @@ -0,0 +1,38 @@ +package common + +import ( + "os" + "path/filepath" + "time" + + "github.com/alcionai/clues" +) + +func FilepathWalker( + folderName string, + exportFileSizes map[string]int64, + startTime time.Time, +) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return clues.Stack(err) + } + + if info.IsDir() { + return nil + } + + relPath, err := filepath.Rel(folderName, path) + if err != nil { + return clues.Stack(err) + } + + exportFileSizes[relPath] = info.Size() + + if startTime.After(info.ModTime()) { + startTime = info.ModTime() + } + + return nil + } +} diff --git a/src/cmd/sanity_test/common/sanitree.go b/src/cmd/sanity_test/common/sanitree.go new file mode 100644 index 000000000..b0dc8ac29 --- /dev/null +++ b/src/cmd/sanity_test/common/sanitree.go @@ -0,0 +1,69 @@ +package common + +import ( + "context" + + "golang.org/x/exp/maps" +) + +// Sanitree is used to build out a hierarchical tree of items +// for comparison against each other. Primarily so that a restore +// can compare two subtrees easily. +type Sanitree[T any] struct { + Container T + ContainerID string + ContainerName string + // non-containers only + ContainsItems int + // name -> node + Children map[string]*Sanitree[T] +} + +func AssertEqualTrees[T any]( + ctx context.Context, + expect, other *Sanitree[T], +) { + if expect == nil && other == nil { + return + } + + Assert( + ctx, + func() bool { return expect != nil && other != nil }, + "non nil nodes", + expect, + other) + + Assert( + ctx, + func() bool { return expect.ContainerName == other.ContainerName }, + "container names match", + expect.ContainerName, + other.ContainerName) + + Assert( + ctx, + func() bool { return expect.ContainsItems == other.ContainsItems }, + "count of items in container matches", + expect.ContainsItems, + other.ContainsItems) + + Assert( + ctx, + func() bool { return len(expect.Children) == len(other.Children) }, + "count of child containers matches", + len(expect.Children), + len(other.Children)) + + for name, s := range expect.Children { + ch, ok := other.Children[name] + Assert( + ctx, + func() bool { return ok }, + "found matching child container", + name, + maps.Keys(other.Children)) + + AssertEqualTrees(ctx, s, ch) + } +} diff --git a/src/cmd/sanity_test/common/utils.go b/src/cmd/sanity_test/common/utils.go index e14fa86c6..89ddc6711 100644 --- a/src/cmd/sanity_test/common/utils.go +++ b/src/cmd/sanity_test/common/utils.go @@ -22,7 +22,7 @@ func Assert( return } - header = "Error: " + header + header = "TEST FAILURE: " + header expected := fmt.Sprintf("* Expected: %+v", expect) got := fmt.Sprintf("* Current: %+v", current) @@ -37,7 +37,7 @@ func Assert( func Fatal(ctx context.Context, msg string, err error) { logger.CtxErr(ctx, err).Error("test failure: " + msg) - fmt.Println(msg+": ", err) + fmt.Println("TEST FAILURE: "+msg+": ", err) os.Exit(1) } diff --git a/src/cmd/sanity_test/export/groups.go b/src/cmd/sanity_test/export/groups.go new file mode 100644 index 000000000..6da5796e2 --- /dev/null +++ b/src/cmd/sanity_test/export/groups.go @@ -0,0 +1,16 @@ +package export + +import ( + "context" + + "github.com/alcionai/corso/src/cmd/sanity_test/common" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +func CheckGroupsExport( + ctx context.Context, + ac api.Client, + envs common.Envs, +) { + // TODO +} diff --git a/src/cmd/sanity_test/export/onedrive.go b/src/cmd/sanity_test/export/onedrive.go index 3d5564bcc..5e78ece04 100644 --- a/src/cmd/sanity_test/export/onedrive.go +++ b/src/cmd/sanity_test/export/onedrive.go @@ -3,28 +3,21 @@ package export import ( "context" "fmt" - "os" "path/filepath" "time" - "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" - "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) func CheckOneDriveExport( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - userID, folderName, dataFolder string, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Users(). - ByUserId(userID). - Drive(). - Get(ctx, nil) + drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } @@ -36,37 +29,19 @@ func CheckOneDriveExport( startTime = time.Now() ) - err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error { - if err != nil { - return clues.Stack(err) - } - - if info.IsDir() { - return nil - } - - relPath, err := filepath.Rel(folderName, path) - if err != nil { - return clues.Stack(err) - } - - exportFileSizes[relPath] = info.Size() - if startTime.After(info.ModTime()) { - startTime = info.ModTime() - } - - return nil - }) + err = filepath.Walk( + envs.FolderName, + common.FilepathWalker(envs.FolderName, exportFileSizes, startTime)) if err != nil { fmt.Println("Error walking the path:", err) } _ = restore.PopulateDriveDetails( ctx, - client, + ac, ptr.Val(drive.GetId()), - folderName, - dataFolder, + envs.FolderName, + envs.DataFolder, fileSizes, map[string][]common.PermissionInfo{}, startTime) diff --git a/src/cmd/sanity_test/export/sharepoint.go b/src/cmd/sanity_test/export/sharepoint.go index 55ab8ed5c..d53236f34 100644 --- a/src/cmd/sanity_test/export/sharepoint.go +++ b/src/cmd/sanity_test/export/sharepoint.go @@ -3,28 +3,21 @@ package export import ( "context" "fmt" - "os" "path/filepath" "time" - "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" - "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) func CheckSharePointExport( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - siteID, folderName, dataFolder string, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Sites(). - BySiteId(siteID). - Drive(). - Get(ctx, nil) + drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } @@ -36,37 +29,19 @@ func CheckSharePointExport( startTime = time.Now() ) - err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error { - if err != nil { - return clues.Stack(err) - } - - if info.IsDir() { - return nil - } - - relPath, err := filepath.Rel(folderName, path) - if err != nil { - return clues.Stack(err) - } - - exportFileSizes[relPath] = info.Size() - if startTime.After(info.ModTime()) { - startTime = info.ModTime() - } - - return nil - }) + err = filepath.Walk( + envs.FolderName, + common.FilepathWalker(envs.FolderName, exportFileSizes, startTime)) if err != nil { fmt.Println("Error walking the path:", err) } _ = restore.PopulateDriveDetails( ctx, - client, + ac, ptr.Val(drive.GetId()), - folderName, - dataFolder, + envs.FolderName, + envs.DataFolder, fileSizes, map[string][]common.PermissionInfo{}, startTime) diff --git a/src/cmd/sanity_test/restore/exchange.go b/src/cmd/sanity_test/restore/exchange.go index 2dc65e6e1..dd51e5b40 100644 --- a/src/cmd/sanity_test/restore/exchange.go +++ b/src/cmd/sanity_test/restore/exchange.go @@ -3,99 +3,43 @@ package restore import ( "context" "fmt" - stdpath "path" - "strings" - "time" "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/microsoftgraph/msgraph-sdk-go/users" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/pkg/filters" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) // CheckEmailRestoration verifies that the emails count in restored folder is equivalent to // emails in actual m365 account func CheckEmailRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - testUser, folderName, dataFolder, baseBackupFolder string, - startTime time.Time, + ac api.Client, + envs common.Envs, ) { var ( - restoreFolder models.MailFolderable - itemCount = make(map[string]int32) - restoreItemCount = make(map[string]int32) - builder = client.Users().ByUserId(testUser).MailFolders() + folderNameToItemCount = make(map[string]int32) + folderNameToRestoreItemCount = make(map[string]int32) ) - for { - result, err := builder.Get(ctx, nil) - if err != nil { - common.Fatal(ctx, "getting mail folders", err) - } + restoredTree := buildSanitree(ctx, ac, envs.UserID, envs.FolderName) + dataTree := buildSanitree(ctx, ac, envs.UserID, envs.DataFolder) - values := result.GetValue() - - for _, v := range values { - itemName := ptr.Val(v.GetDisplayName()) - - if itemName == folderName { - restoreFolder = v - continue - } - - if itemName == dataFolder || itemName == baseBackupFolder { - // otherwise, recursively aggregate all child folders. - getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount) - - itemCount[itemName] = ptr.Val(v.GetTotalItemCount()) - } - } - - link, ok := ptr.ValOK(result.GetOdataNextLink()) - if !ok { - break - } - - builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter()) - } - - folderID := ptr.Val(restoreFolder.GetId()) - folderName = ptr.Val(restoreFolder.GetDisplayName()) ctx = clues.Add( ctx, - "restore_folder_id", folderID, - "restore_folder_name", folderName) + "restore_folder_id", restoredTree.ContainerID, + "restore_folder_name", restoredTree.ContainerName, + "original_folder_id", dataTree.ContainerID, + "original_folder_name", dataTree.ContainerName) - childFolder, err := client. - Users(). - ByUserId(testUser). - MailFolders(). - ByMailFolderId(folderID). - ChildFolders(). - Get(ctx, nil) - if err != nil { - common.Fatal(ctx, "getting restore folder child folders", err) - } + verifyEmailData(ctx, folderNameToRestoreItemCount, folderNameToItemCount) - for _, fld := range childFolder.GetValue() { - restoreDisplayName := ptr.Val(fld.GetDisplayName()) - - // check if folder is the data folder we loaded or the base backup to verify - // the incremental backup worked fine - if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) { - count, _ := ptr.ValOK(fld.GetTotalItemCount()) - - restoreItemCount[restoreDisplayName] = count - checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount) - } - } - - verifyEmailData(ctx, restoreItemCount, itemCount) + common.AssertEqualTrees[models.MailFolderable]( + ctx, + dataTree, + restoredTree.Children[envs.DataFolder]) } func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) { @@ -111,109 +55,71 @@ func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[ } } -// getAllSubFolder will recursively check for all subfolders and get the corresponding -// email count. -func getAllMailSubFolders( +func buildSanitree( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - testUser string, - r models.MailFolderable, - parentFolder, - dataFolder string, - messageCount map[string]int32, -) { - var ( - folderID = ptr.Val(r.GetId()) - count int32 = 99 - options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ - QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{ - Top: &count, - }, - } - ) - - ctx = clues.Add(ctx, "parent_folder_id", folderID) - - childFolder, err := client. - Users(). - ByUserId(testUser). - MailFolders(). - ByMailFolderId(folderID). - ChildFolders(). - Get(ctx, options) + ac api.Client, + userID, folderName string, +) *common.Sanitree[models.MailFolderable] { + gcc, err := ac.Mail().GetContainerByName( + ctx, + userID, + api.MsgFolderRoot, + folderName) if err != nil { - common.Fatal(ctx, "getting mail subfolders", err) + common.Fatal( + ctx, + fmt.Sprintf("finding folder by name %q", folderName), + err) } - for _, child := range childFolder.GetValue() { - var ( - childDisplayName = ptr.Val(child.GetDisplayName()) - childFolderCount = ptr.Val(child.GetChildFolderCount()) - //nolint:forbidigo - fullFolderName = stdpath.Join(parentFolder, childDisplayName) - ) + mmf, ok := gcc.(models.MailFolderable) + if !ok { + common.Fatal( + ctx, + "mail folderable required", + clues.New("casting "+*gcc.GetDisplayName()+" to models.MailFolderable")) + } - if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { - messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount()) - // recursively check for subfolders - if childFolderCount > 0 { - parentFolder := fullFolderName + root := &common.Sanitree[models.MailFolderable]{ + Container: mmf, + ContainerID: ptr.Val(mmf.GetId()), + ContainerName: ptr.Val(mmf.GetDisplayName()), + ContainsItems: int(ptr.Val(mmf.GetTotalItemCount())), + Children: map[string]*common.Sanitree[models.MailFolderable]{}, + } - getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount) - } - } - } -} - -// checkAllSubFolder will recursively traverse inside the restore folder and -// verify that data matched in all subfolders -func checkAllSubFolder( - ctx context.Context, - client *msgraphsdk.GraphServiceClient, - r models.MailFolderable, - testUser, - parentFolder, - dataFolder string, - restoreMessageCount map[string]int32, -) { - var ( - folderID = ptr.Val(r.GetId()) - count int32 = 99 - options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{ - QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{ - Top: &count, - }, - } - ) - - childFolder, err := client. - Users(). - ByUserId(testUser). - MailFolders(). - ByMailFolderId(folderID). - ChildFolders(). - Get(ctx, options) - if err != nil { - common.Fatal(ctx, "getting mail subfolders", err) - } - - for _, child := range childFolder.GetValue() { - var ( - childDisplayName = ptr.Val(child.GetDisplayName()) - //nolint:forbidigo - fullFolderName = stdpath.Join(parentFolder, childDisplayName) - ) - - if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) { - childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount()) - restoreMessageCount[fullFolderName] = childTotalCount - } - - childFolderCount := ptr.Val(child.GetChildFolderCount()) - - if childFolderCount > 0 { - parentFolder := fullFolderName - checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount) + recurseSubfolders(ctx, ac, root, userID) + + return root +} + +func recurseSubfolders( + ctx context.Context, + ac api.Client, + parent *common.Sanitree[models.MailFolderable], + userID string, +) { + childFolders, err := ac.Mail().GetContainerChildren( + ctx, + userID, + parent.ContainerID) + if err != nil { + common.Fatal(ctx, "getting subfolders", err) + } + + for _, child := range childFolders { + c := &common.Sanitree[models.MailFolderable]{ + Container: child, + ContainerID: ptr.Val(child.GetId()), + ContainerName: ptr.Val(child.GetDisplayName()), + ContainsItems: int(ptr.Val(child.GetTotalItemCount())), + Children: map[string]*common.Sanitree[models.MailFolderable]{}, + } + + parent.Children[c.ContainerName] = c + + if ptr.Val(child.GetChildFolderCount()) > 0 { + recurseSubfolders(ctx, ac, c, userID) } } } diff --git a/src/cmd/sanity_test/restore/groups.go b/src/cmd/sanity_test/restore/groups.go new file mode 100644 index 000000000..190b4481d --- /dev/null +++ b/src/cmd/sanity_test/restore/groups.go @@ -0,0 +1,16 @@ +package restore + +import ( + "context" + + "github.com/alcionai/corso/src/cmd/sanity_test/common" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +func CheckGroupsRestoration( + ctx context.Context, + ac api.Client, + envs common.Envs, +) { + // TODO +} diff --git a/src/cmd/sanity_test/restore/onedrive.go b/src/cmd/sanity_test/restore/onedrive.go index 14fa3b8cd..1efddc87d 100644 --- a/src/cmd/sanity_test/restore/onedrive.go +++ b/src/cmd/sanity_test/restore/onedrive.go @@ -7,12 +7,12 @@ import ( "time" "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "golang.org/x/exp/slices" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( @@ -21,34 +21,29 @@ const ( func CheckOneDriveRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - userID, folderName, dataFolder string, - startTime time.Time, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Users(). - ByUserId(userID). - Drive(). - Get(ctx, nil) + drive, err := ac.Users().GetDefaultDrive(ctx, envs.UserID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } checkDriveRestoration( ctx, - client, + ac, path.OneDriveService, - folderName, + envs.FolderName, ptr.Val(drive.GetId()), ptr.Val(drive.GetName()), - dataFolder, - startTime, + envs.DataFolder, + envs.StartTime, false) } func checkDriveRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, service path.ServiceType, folderName, driveID, @@ -70,7 +65,7 @@ func checkDriveRestoration( restoreFolderID := PopulateDriveDetails( ctx, - client, + ac, driveID, folderName, dataFolder, @@ -78,7 +73,14 @@ func checkDriveRestoration( folderPermissions, startTime) - getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime) + getRestoredDrive( + ctx, + ac, + driveID, + restoreFolderID, + restoreFile, + restoredFolderPermissions, + startTime) checkRestoredDriveItemPermissions( ctx, @@ -105,7 +107,7 @@ func checkDriveRestoration( func PopulateDriveDetails( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, folderName, dataFolder string, fileSizes map[string]int64, folderPermissions map[string][]common.PermissionInfo, @@ -113,18 +115,12 @@ func PopulateDriveDetails( ) string { var restoreFolderID string - response, err := client. - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId("root"). - Children(). - Get(ctx, nil) + children, err := ac.Drives().GetFolderChildren(ctx, driveID, "root") if err != nil { common.Fatal(ctx, "getting drive by id", err) } - for _, driveItem := range response.GetValue() { + for _, driveItem := range children { var ( itemID = ptr.Val(driveItem.GetId()) itemName = ptr.Val(driveItem.GetName()) @@ -156,8 +152,17 @@ func PopulateDriveDetails( continue } - folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID) - getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime) + folderPermissions[itemName] = permissionIn(ctx, ac, driveID, itemID) + + getOneDriveChildFolder( + ctx, + ac, + driveID, + itemID, + itemName, + fileSizes, + folderPermissions, + startTime) } return restoreFolderID @@ -228,18 +233,18 @@ func checkRestoredDriveItemPermissions( func getOneDriveChildFolder( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, itemID, parentName string, fileSizes map[string]int64, folderPermission map[string][]common.PermissionInfo, startTime time.Time, ) { - response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil) + children, err := ac.Drives().GetFolderChildren(ctx, driveID, itemID) if err != nil { common.Fatal(ctx, "getting child folder", err) } - for _, driveItem := range response.GetValue() { + for _, driveItem := range children { var ( itemID = ptr.Val(driveItem.GetId()) itemName = ptr.Val(driveItem.GetName()) @@ -268,31 +273,33 @@ func getOneDriveChildFolder( continue } - folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID) - getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime) + folderPermission[fullName] = permissionIn(ctx, ac, driveID, itemID) + getOneDriveChildFolder( + ctx, + ac, + driveID, + itemID, + fullName, + fileSizes, + folderPermission, + startTime) } } func getRestoredDrive( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, restoreFolderID string, restoreFile map[string]int64, restoreFolder map[string][]common.PermissionInfo, startTime time.Time, ) { - restored, err := client. - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId(restoreFolderID). - Children(). - Get(ctx, nil) + children, err := ac.Drives().GetFolderChildren(ctx, driveID, restoreFolderID) if err != nil { common.Fatal(ctx, "getting child folder", err) } - for _, item := range restored.GetValue() { + for _, item := range children { var ( itemID = ptr.Val(item.GetId()) itemName = ptr.Val(item.GetName()) @@ -308,8 +315,16 @@ func getRestoredDrive( continue } - restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID) - getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime) + restoreFolder[itemName] = permissionIn(ctx, ac, driveID, itemID) + getOneDriveChildFolder( + ctx, + ac, + driveID, + itemID, + itemName, + restoreFile, + restoreFolder, + startTime) } } @@ -319,18 +334,12 @@ func getRestoredDrive( func permissionIn( ctx context.Context, - client *msgraphsdk.GraphServiceClient, + ac api.Client, driveID, itemID string, ) []common.PermissionInfo { pi := []common.PermissionInfo{} - pcr, err := client. - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId(itemID). - Permissions(). - Get(ctx, nil) + pcr, err := ac.Drives().GetItemPermission(ctx, driveID, itemID) if err != nil { common.Fatal(ctx, "getting permission", err) } diff --git a/src/cmd/sanity_test/restore/sharepoint.go b/src/cmd/sanity_test/restore/sharepoint.go index a5146d7a4..62c761dff 100644 --- a/src/cmd/sanity_test/restore/sharepoint.go +++ b/src/cmd/sanity_test/restore/sharepoint.go @@ -2,38 +2,31 @@ package restore import ( "context" - "time" - - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) func CheckSharePointRestoration( ctx context.Context, - client *msgraphsdk.GraphServiceClient, - siteID, userID, folderName, dataFolder string, - startTime time.Time, + ac api.Client, + envs common.Envs, ) { - drive, err := client. - Sites(). - BySiteId(siteID). - Drive(). - Get(ctx, nil) + drive, err := ac.Sites().GetDefaultDrive(ctx, envs.SiteID) if err != nil { common.Fatal(ctx, "getting the drive:", err) } checkDriveRestoration( ctx, - client, + ac, path.SharePointService, - folderName, + envs.FolderName, ptr.Val(drive.GetId()), ptr.Val(drive.GetName()), - dataFolder, - startTime, + envs.DataFolder, + envs.StartTime, true) } diff --git a/src/cmd/sanity_test/sanity_tests.go b/src/cmd/sanity_test/sanity_tests.go index 84bce47a0..cf47744a4 100644 --- a/src/cmd/sanity_test/sanity_tests.go +++ b/src/cmd/sanity_test/sanity_tests.go @@ -2,21 +2,40 @@ package main import ( "context" + "fmt" "os" - "strings" - "time" "github.com/alcionai/clues" - msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" + "github.com/spf13/cobra" + "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cmd/sanity_test/common" "github.com/alcionai/corso/src/cmd/sanity_test/export" "github.com/alcionai/corso/src/cmd/sanity_test/restore" "github.com/alcionai/corso/src/internal/m365/graph" - "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/logger" ) +// --------------------------------------------------------------------------- +// root command +// --------------------------------------------------------------------------- + +func rootCMD() *cobra.Command { + return &cobra.Command{ + Use: "sanity-test", + Short: "run the sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRoot, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Println("running", cmd.UseLine()) + }, + } +} + +func sanityTestRoot(cmd *cobra.Command, args []string) error { + return print.Only(cmd.Context(), clues.New("must specify a kind of test")) +} + func main() { ls := logger.Settings{ File: logger.GetLogFile(""), @@ -29,60 +48,226 @@ func main() { _ = log.Sync() // flush all logs in the buffer }() + // TODO: only needed for exchange graph.InitializeConcurrencyLimiter(ctx, true, 4) - adapter, err := graph.CreateAdapter( - tconfig.GetM365TenantID(ctx), - os.Getenv("AZURE_CLIENT_ID"), - os.Getenv("AZURE_CLIENT_SECRET")) - if err != nil { - common.Fatal(ctx, "creating adapter", err) - } + root := rootCMD() - var ( - client = msgraphsdk.NewGraphServiceClient(adapter) - testUser = tconfig.GetM365UserID(ctx) - testSite = tconfig.GetM365SiteID(ctx) - testKind = os.Getenv("SANITY_TEST_KIND") // restore or export (cli arg?) - testService = os.Getenv("SANITY_TEST_SERVICE") - folder = strings.TrimSpace(os.Getenv("SANITY_TEST_FOLDER")) - dataFolder = os.Getenv("TEST_DATA") - baseBackupFolder = os.Getenv("BASE_BACKUP") - ) + restCMD := restoreCMD() - ctx = clues.Add( - ctx, - "resource_owner", testUser, - "service", testService, - "sanity_restore_folder", folder) + restCMD.AddCommand(restoreExchangeCMD()) + restCMD.AddCommand(restoreOneDriveCMD()) + restCMD.AddCommand(restoreSharePointCMD()) + restCMD.AddCommand(restoreGroupsCMD()) + root.AddCommand(restCMD) - logger.Ctx(ctx).Info("starting sanity test check") + expCMD := exportCMD() - switch testKind { - case "restore": - startTime, _ := common.MustGetTimeFromName(ctx, folder) - clues.Add(ctx, "sanity_restore_start_time", startTime.Format(time.RFC3339)) + expCMD.AddCommand(exportOneDriveCMD()) + expCMD.AddCommand(exportSharePointCMD()) + expCMD.AddCommand(exportGroupsCMD()) + root.AddCommand(expCMD) - switch testService { - case "exchange": - restore.CheckEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime) - case "onedrive": - restore.CheckOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime) - case "sharepoint": - restore.CheckSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime) - default: - common.Fatal(ctx, "unknown service for restore sanity tests", nil) - } - case "export": - switch testService { - case "onedrive": - export.CheckOneDriveExport(ctx, client, testUser, folder, dataFolder) - case "sharepoint": - export.CheckSharePointExport(ctx, client, testSite, folder, dataFolder) - default: - common.Fatal(ctx, "unknown service for export sanity tests", nil) - } - default: - common.Fatal(ctx, "unknown test kind (expected restore or export)", nil) + if err := root.Execute(); err != nil { + os.Exit(1) } } + +// --------------------------------------------------------------------------- +// restore/export command +// --------------------------------------------------------------------------- + +func exportCMD() *cobra.Command { + return &cobra.Command{ + Use: "restore", + Short: "run the post-export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExport, + } +} + +func sanityTestExport(cmd *cobra.Command, args []string) error { + return print.Only(cmd.Context(), clues.New("must specify a service")) +} + +func restoreCMD() *cobra.Command { + return &cobra.Command{ + Use: "restore", + Short: "run the post-restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestore, + } +} + +func sanityTestRestore(cmd *cobra.Command, args []string) error { + return print.Only(cmd.Context(), clues.New("must specify a service")) +} + +// --------------------------------------------------------------------------- +// service commands - export +// --------------------------------------------------------------------------- + +func exportGroupsCMD() *cobra.Command { + return &cobra.Command{ + Use: "groups", + Short: "run the groups export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExportGroups, + } +} + +func sanityTestExportGroups(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + export.CheckGroupsExport(ctx, ac, envs) + + return nil +} + +func exportOneDriveCMD() *cobra.Command { + return &cobra.Command{ + Use: "onedrive", + Short: "run the onedrive export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExportOneDrive, + } +} + +func sanityTestExportOneDrive(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + export.CheckOneDriveExport(ctx, ac, envs) + + return nil +} + +func exportSharePointCMD() *cobra.Command { + return &cobra.Command{ + Use: "sharepoint", + Short: "run the sharepoint export sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestExportSharePoint, + } +} + +func sanityTestExportSharePoint(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + export.CheckSharePointExport(ctx, ac, envs) + + return nil +} + +// --------------------------------------------------------------------------- +// service commands - restore +// --------------------------------------------------------------------------- + +func restoreExchangeCMD() *cobra.Command { + return &cobra.Command{ + Use: "exchange", + Short: "run the exchange restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreExchange, + } +} + +func sanityTestRestoreExchange(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckEmailRestoration(ctx, ac, envs) + + return nil +} + +func restoreOneDriveCMD() *cobra.Command { + return &cobra.Command{ + Use: "onedrive", + Short: "run the onedrive restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreOneDrive, + } +} + +func sanityTestRestoreOneDrive(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckOneDriveRestoration(ctx, ac, envs) + + return nil +} + +func restoreSharePointCMD() *cobra.Command { + return &cobra.Command{ + Use: "sharepoint", + Short: "run the sharepoint restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreSharePoint, + } +} + +func sanityTestRestoreSharePoint(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckSharePointRestoration(ctx, ac, envs) + + return nil +} + +func restoreGroupsCMD() *cobra.Command { + return &cobra.Command{ + Use: "groups", + Short: "run the groups restore sanity tests", + DisableAutoGenTag: true, + RunE: sanityTestRestoreGroups, + } +} + +func sanityTestRestoreGroups(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + envs := common.EnvVars(ctx) + + ac, err := common.GetAC() + if err != nil { + return print.Only(ctx, err) + } + + restore.CheckGroupsRestoration(ctx, ac, envs) + + return nil +} diff --git a/src/pkg/services/m365/api/client.go b/src/pkg/services/m365/api/client.go index 04f490f12..a3f1fcee7 100644 --- a/src/pkg/services/m365/api/client.go +++ b/src/pkg/services/m365/api/client.go @@ -24,7 +24,7 @@ import ( type Client struct { Credentials account.M365Config - // The Stable service is re-usable for any non-paged request. + // The Stable service is re-usable for any request. // This allows us to maintain performance across async requests. Stable graph.Servicer diff --git a/src/pkg/services/m365/api/drive.go b/src/pkg/services/m365/api/drive.go index 6a795e5cc..374fa545c 100644 --- a/src/pkg/services/m365/api/drive.go +++ b/src/pkg/services/m365/api/drive.go @@ -84,6 +84,26 @@ func (c Drives) GetRootFolder( return root, nil } +// TODO: pagination controller needed for completion. +func (c Drives) GetFolderChildren( + ctx context.Context, + driveID, folderID string, +) ([]models.DriveItemable, error) { + response, err := c.Stable. + Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(folderID). + Children(). + Get(ctx, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting folder children") + } + + return response.GetValue(), nil +} + // --------------------------------------------------------------------------- // Items // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/mail.go b/src/pkg/services/m365/api/mail.go index 63c3684dd..59ad150ac 100644 --- a/src/pkg/services/m365/api/mail.go +++ b/src/pkg/services/m365/api/mail.go @@ -223,6 +223,26 @@ func (c Mail) PatchFolder( return nil } +// TODO: needs pager implementation for completion +func (c Mail) GetContainerChildren( + ctx context.Context, + userID, containerID string, +) ([]models.MailFolderable, error) { + resp, err := c.Stable. + Client(). + Users(). + ByUserId(userID). + MailFolders(). + ByMailFolderId(containerID). + ChildFolders(). + Get(ctx, nil) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting container child folders") + } + + return resp.GetValue(), nil +} + // --------------------------------------------------------------------------- // items // --------------------------------------------------------------------------- From 5258ef0f3603f149027fa37b0251cd9f45c831c7 Mon Sep 17 00:00:00 2001 From: Keepers Date: Thu, 28 Sep 2023 18:45:16 -0600 Subject: [PATCH 06/26] assert correct error on s3 conn bad configs e2e (#4387) #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :bug: Bugfix - [x] :robot: Supportability/Tests #### Test Plan - [x] :green_heart: E2E --- src/cli/backup/helpers_test.go | 4 +- src/cli/config/account.go | 2 +- src/cli/config/config.go | 3 +- src/cli/config/config_test.go | 17 ++-- src/cli/repo/filesystem.go | 28 ++++--- src/cli/repo/filesystem_e2e_test.go | 6 +- src/cli/repo/repo.go | 5 ++ src/cli/repo/s3.go | 12 +-- src/cli/repo/s3_e2e_test.go | 117 ++++++++++++++------------- src/cli/restore/exchange_e2e_test.go | 4 +- src/cmd/s3checker/s3checker.go | 4 +- src/internal/kopia/conn.go | 2 +- src/internal/kopia/filesystem.go | 3 +- src/internal/kopia/s3.go | 4 +- src/pkg/repository/repository.go | 5 +- src/pkg/storage/common_test.go | 15 ++-- src/pkg/storage/filesystem.go | 6 +- src/pkg/storage/s3.go | 44 +++++----- src/pkg/storage/s3_test.go | 30 +++---- src/pkg/storage/storage.go | 9 ++- src/pkg/storage/storage_test.go | 12 +-- 21 files changed, 175 insertions(+), 157 deletions(-) diff --git a/src/cli/backup/helpers_test.go b/src/cli/backup/helpers_test.go index e7a59f361..8589d70d0 100644 --- a/src/cli/backup/helpers_test.go +++ b/src/cli/backup/helpers_test.go @@ -140,11 +140,9 @@ func prepM365Test( recorder = strings.Builder{} ) - sc, err := st.StorageConfig() + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) - force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgStorageProvider: storage.ProviderS3.String(), diff --git a/src/cli/config/account.go b/src/cli/config/account.go index 8d87880d9..22a481b57 100644 --- a/src/cli/config/account.go +++ b/src/cli/config/account.go @@ -54,7 +54,7 @@ func configureAccount( if matchFromConfig { providerType := vpr.GetString(account.AccountProviderTypeKey) if providerType != account.ProviderM365.String() { - return acct, clues.New("unsupported account provider: " + providerType) + return acct, clues.New("unsupported account provider: [" + providerType + "]") } if err := mustMatchConfig(vpr, m365Overrides(overrides)); err != nil { diff --git a/src/cli/config/config.go b/src/cli/config/config.go index df8342ed1..6eab83fea 100644 --- a/src/cli/config/config.go +++ b/src/cli/config/config.go @@ -279,8 +279,7 @@ func getStorageAndAccountWithViper( // possibly read the prior config from a .corso file if readFromFile { - err = vpr.ReadInConfig() - if err != nil { + if err := vpr.ReadInConfig(); err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); !ok { return config, clues.Wrap(err, "reading corso config file: "+vpr.ConfigFileUsed()) } diff --git a/src/cli/config/config_test.go b/src/cli/config/config_test.go index bccc79601..5d9fc42ce 100644 --- a/src/cli/config/config_test.go +++ b/src/cli/config/config_test.go @@ -356,10 +356,9 @@ func (suite *ConfigSuite) TestReadFromFlags() { m365Config, _ := repoDetails.Account.M365Config() - sc, err := repoDetails.Storage.StorageConfig() + s3Cfg, err := repoDetails.Storage.ToS3Config() require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) - s3Cfg := sc.(*storage.S3Config) commonConfig, _ := repoDetails.Storage.CommonConfig() pass := commonConfig.Corso.CorsoPassphrase @@ -425,17 +424,21 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount() { err = writeRepoConfigWithViper(vpr, s3Cfg, m365, repository.Options{}, "repoid") require.NoError(t, err, "writing repo config", clues.ToCore(err)) + require.Equal( + t, + account.ProviderM365.String(), + vpr.GetString(account.AccountProviderTypeKey), + "viper should have m365 as the account provider") + err = vpr.ReadInConfig() require.NoError(t, err, "reading repo config", clues.ToCore(err)) cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, true, true, nil) require.NoError(t, err, "getting storage and account from config", clues.ToCore(err)) - sc, err := cfg.Storage.StorageConfig() + readS3Cfg, err := cfg.Storage.ToS3Config() require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) - readS3Cfg := sc.(*storage.S3Config) - assert.Equal(t, readS3Cfg.Bucket, s3Cfg.Bucket) assert.Equal(t, readS3Cfg.Endpoint, s3Cfg.Endpoint) assert.Equal(t, readS3Cfg.Prefix, s3Cfg.Prefix) @@ -482,11 +485,9 @@ func (suite *ConfigIntegrationSuite) TestGetStorageAndAccount_noFileOnlyOverride cfg, err := getStorageAndAccountWithViper(vpr, storage.ProviderS3, false, true, overrides) require.NoError(t, err, "getting storage and account from config", clues.ToCore(err)) - sc, err := cfg.Storage.StorageConfig() + readS3Cfg, err := cfg.Storage.ToS3Config() require.NoError(t, err, "reading s3 config from storage", clues.ToCore(err)) - readS3Cfg := sc.(*storage.S3Config) - assert.Equal(t, readS3Cfg.Bucket, bkt) assert.Equal(t, cfg.RepoID, "") assert.Equal(t, readS3Cfg.Endpoint, end) diff --git a/src/cli/repo/filesystem.go b/src/cli/repo/filesystem.go index ef03d3657..40e8b05a5 100644 --- a/src/cli/repo/filesystem.go +++ b/src/cli/repo/filesystem.go @@ -96,13 +96,11 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { cfg.Account.ID(), opt) - sc, err := cfg.Storage.StorageConfig() + storageCfg, err := cfg.Storage.ToFilesystemConfig() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration")) } - storageCfg := sc.(*storage.FilesystemConfig) - m365, err := cfg.Account.M365Config() if err != nil { return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) @@ -123,14 +121,20 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { return nil } - return Only(ctx, clues.Wrap(err, "Failed to initialize a new filesystem repository")) + return Only(ctx, clues.Stack(ErrInitializingRepo, err)) } defer utils.CloseRepo(ctx, r) Infof(ctx, "Initialized a repository at path %s", storageCfg.Path) - if err = config.WriteRepoConfig(ctx, sc, m365, opt.Repo, r.GetID()); err != nil { + err = config.WriteRepoConfig( + ctx, + storageCfg, + m365, + opt.Repo, + r.GetID()) + if err != nil { return Only(ctx, clues.Wrap(err, "Failed to write repository configuration")) } @@ -181,13 +185,11 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error { repoID = events.RepoIDNotFound } - sc, err := cfg.Storage.StorageConfig() + storageCfg, err := cfg.Storage.ToFilesystemConfig() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration")) } - storageCfg := sc.(*storage.FilesystemConfig) - m365, err := cfg.Account.M365Config() if err != nil { return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) @@ -206,14 +208,20 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error { } if err := r.Connect(ctx); err != nil { - return Only(ctx, clues.Wrap(err, "Failed to connect to the filesystem repository")) + return Only(ctx, clues.Stack(ErrConnectingRepo, err)) } defer utils.CloseRepo(ctx, r) Infof(ctx, "Connected to repository at path %s", storageCfg.Path) - if err = config.WriteRepoConfig(ctx, sc, m365, opts.Repo, r.GetID()); err != nil { + err = config.WriteRepoConfig( + ctx, + storageCfg, + m365, + opts.Repo, + r.GetID()) + if err != nil { return Only(ctx, clues.Wrap(err, "Failed to write repository configuration")) } diff --git a/src/cli/repo/filesystem_e2e_test.go b/src/cli/repo/filesystem_e2e_test.go index 514d0120b..d7a28047c 100644 --- a/src/cli/repo/filesystem_e2e_test.go +++ b/src/cli/repo/filesystem_e2e_test.go @@ -56,9 +56,8 @@ func (suite *FilesystemE2ESuite) TestInitFilesystemCmd() { st := storeTD.NewFilesystemStorage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToFilesystemConfig() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.FilesystemConfig) force := map[string]string{ tconfig.TestCfgStorageProvider: storage.ProviderFilesystem.String(), @@ -113,9 +112,8 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() { defer flush() st := storeTD.NewFilesystemStorage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToFilesystemConfig() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.FilesystemConfig) force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), diff --git a/src/cli/repo/repo.go b/src/cli/repo/repo.go index f5430613b..04b3a8413 100644 --- a/src/cli/repo/repo.go +++ b/src/cli/repo/repo.go @@ -20,6 +20,11 @@ const ( maintenanceCommand = "maintenance" ) +var ( + ErrConnectingRepo = clues.New("connecting repository") + ErrInitializingRepo = clues.New("initializing repository") +) + var repoCommands = []func(cmd *cobra.Command) *cobra.Command{ addS3Commands, addFilesystemCommands, diff --git a/src/cli/repo/s3.go b/src/cli/repo/s3.go index a450def04..253be0dfe 100644 --- a/src/cli/repo/s3.go +++ b/src/cli/repo/s3.go @@ -111,13 +111,11 @@ func initS3Cmd(cmd *cobra.Command, args []string) error { cfg.Account.ID(), opt) - sc, err := cfg.Storage.StorageConfig() + s3Cfg, err := cfg.Storage.ToS3Config() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration")) } - s3Cfg := sc.(*storage.S3Config) - if strings.HasPrefix(s3Cfg.Endpoint, "http://") || strings.HasPrefix(s3Cfg.Endpoint, "https://") { invalidEndpointErr := "endpoint doesn't support specifying protocol. " + "pass --disable-tls flag to use http:// instead of default https://" @@ -145,7 +143,7 @@ func initS3Cmd(cmd *cobra.Command, args []string) error { return nil } - return Only(ctx, clues.Wrap(err, "Failed to initialize a new S3 repository")) + return Only(ctx, clues.Stack(ErrInitializingRepo, err)) } defer utils.CloseRepo(ctx, r) @@ -194,13 +192,11 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error { repoID = events.RepoIDNotFound } - sc, err := cfg.Storage.StorageConfig() + s3Cfg, err := cfg.Storage.ToS3Config() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration")) } - s3Cfg := sc.(*storage.S3Config) - m365, err := cfg.Account.M365Config() if err != nil { return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config")) @@ -226,7 +222,7 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error { } if err := r.Connect(ctx); err != nil { - return Only(ctx, clues.Wrap(err, "Failed to connect to the S3 repository")) + return Only(ctx, clues.Stack(ErrConnectingRepo, err)) } defer utils.CloseRepo(ctx, r) diff --git a/src/cli/repo/s3_e2e_test.go b/src/cli/repo/s3_e2e_test.go index 0bca84ca6..a9f50e277 100644 --- a/src/cli/repo/s3_e2e_test.go +++ b/src/cli/repo/s3_e2e_test.go @@ -8,10 +8,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "golang.org/x/exp/maps" "github.com/alcionai/corso/src/cli" "github.com/alcionai/corso/src/cli/config" cliTD "github.com/alcionai/corso/src/cli/testdata" + "github.com/alcionai/corso/src/internal/common/str" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" @@ -64,9 +66,8 @@ func (suite *S3E2ESuite) TestInitS3Cmd() { st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) if !test.hasConfigFile { @@ -102,10 +103,9 @@ func (suite *S3E2ESuite) TestInitMultipleTimes() { defer flush() st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() - require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) + cfg, err := st.ToS3Config() + require.NoError(t, err, clues.ToCore(err)) vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) @@ -134,11 +134,9 @@ func (suite *S3E2ESuite) TestInitS3Cmd_missingBucket() { st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) - force := map[string]string{ tconfig.TestCfgBucket: "", } @@ -189,9 +187,9 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { defer flush() st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() + + cfg, err := st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), @@ -234,58 +232,63 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { } } -func (suite *S3E2ESuite) TestConnectS3Cmd_BadBucket() { - t := suite.T() - ctx, flush := tester.NewContext(t) +func (suite *S3E2ESuite) TestConnectS3Cmd_badInputs() { + table := []struct { + name string + bucket string + prefix string + expectErr func(t *testing.T, err error) + }{ + { + name: "bucket", + bucket: "wrong", + expectErr: func(t *testing.T, err error) { + assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err)) + }, + }, + { + name: "prefix", + prefix: "wrong", + expectErr: func(t *testing.T, err error) { + assert.ErrorIs(t, err, storage.ErrVerifyingConfigStorage, clues.ToCore(err)) + }, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() - defer flush() + ctx, flush := tester.NewContext(t) + defer flush() - st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() - require.NoError(t, err, clues.ToCore(err)) + st := storeTD.NewPrefixedS3Storage(t) + cfg, err := st.ToS3Config() + require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) + bucket := str.First(test.bucket, cfg.Bucket) + prefix := str.First(test.prefix, cfg.Prefix) - vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) + over := map[string]string{} + acct := tconfig.NewM365Account(t) - ctx = config.SetViper(ctx, vpr) + maps.Copy(over, acct.Config) + over[account.AccountProviderTypeKey] = account.ProviderM365.String() + over[storage.StorageProviderTypeKey] = storage.ProviderS3.String() - cmd := cliTD.StubRootCmd( - "repo", "connect", "s3", - "--config-file", configFP, - "--bucket", "wrong", - "--prefix", cfg.Prefix) - cli.BuildCommandTree(cmd) + vpr, configFP := tconfig.MakeTempTestConfigClone(t, over) + ctx = config.SetViper(ctx, vpr) - // run the command - err = cmd.ExecuteContext(ctx) - require.Error(t, err, clues.ToCore(err)) -} - -func (suite *S3E2ESuite) TestConnectS3Cmd_BadPrefix() { - t := suite.T() - ctx, flush := tester.NewContext(t) - - defer flush() - - st := storeTD.NewPrefixedS3Storage(t) - sc, err := st.StorageConfig() - require.NoError(t, err, clues.ToCore(err)) - - cfg := sc.(*storage.S3Config) - - vpr, configFP := tconfig.MakeTempTestConfigClone(t, nil) - - ctx = config.SetViper(ctx, vpr) - - cmd := cliTD.StubRootCmd( - "repo", "connect", "s3", - "--config-file", configFP, - "--bucket", cfg.Bucket, - "--prefix", "wrong") - cli.BuildCommandTree(cmd) - - // run the command - err = cmd.ExecuteContext(ctx) - require.Error(t, err, clues.ToCore(err)) + cmd := cliTD.StubRootCmd( + "repo", "connect", "s3", + "--config-file", configFP, + "--bucket", bucket, + "--prefix", prefix) + cli.BuildCommandTree(cmd) + + // run the command + err = cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) + test.expectErr(t, err) + }) + } } diff --git a/src/cli/restore/exchange_e2e_test.go b/src/cli/restore/exchange_e2e_test.go index effbf14d8..36c6b8973 100644 --- a/src/cli/restore/exchange_e2e_test.go +++ b/src/cli/restore/exchange_e2e_test.go @@ -66,11 +66,9 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() { suite.acct = tconfig.NewM365Account(t) suite.st = storeTD.NewPrefixedS3Storage(t) - sc, err := suite.st.StorageConfig() + cfg, err := suite.st.ToS3Config() require.NoError(t, err, clues.ToCore(err)) - cfg := sc.(*storage.S3Config) - force := map[string]string{ tconfig.TestCfgAccountProvider: account.ProviderM365.String(), tconfig.TestCfgStorageProvider: storage.ProviderS3.String(), diff --git a/src/cmd/s3checker/s3checker.go b/src/cmd/s3checker/s3checker.go index 0c42b8aa5..7aa11e79a 100644 --- a/src/cmd/s3checker/s3checker.go +++ b/src/cmd/s3checker/s3checker.go @@ -197,13 +197,11 @@ func handleCheckerCommand(cmd *cobra.Command, args []string, f flags) error { return clues.Wrap(err, "getting storage config") } - sc, err := repoDetails.Storage.StorageConfig() + cfg, err := repoDetails.Storage.ToS3Config() if err != nil { return clues.Wrap(err, "getting S3 config") } - cfg := sc.(*storage.S3Config) - endpoint := defaultS3Endpoint if len(cfg.Endpoint) > 0 { endpoint = cfg.Endpoint diff --git a/src/internal/kopia/conn.go b/src/internal/kopia/conn.go index 001fd3f0f..ee8a9132e 100644 --- a/src/internal/kopia/conn.go +++ b/src/internal/kopia/conn.go @@ -205,7 +205,7 @@ func (w *conn) commonConnect( bst, password, kopiaOpts); err != nil { - return clues.Wrap(err, "connecting to repo").WithClues(ctx) + return clues.Wrap(err, "connecting to kopia repo").WithClues(ctx) } if err := w.open(ctx, cfgFile, password); err != nil { diff --git a/src/internal/kopia/filesystem.go b/src/internal/kopia/filesystem.go index 3081ac286..e67afa85e 100644 --- a/src/internal/kopia/filesystem.go +++ b/src/internal/kopia/filesystem.go @@ -16,12 +16,11 @@ func filesystemStorage( repoOpts repository.Options, s storage.Storage, ) (blob.Storage, error) { - cfg, err := s.StorageConfig() + fsCfg, err := s.ToFilesystemConfig() if err != nil { return nil, clues.Stack(err).WithClues(ctx) } - fsCfg := cfg.(*storage.FilesystemConfig) opts := filesystem.Options{ Path: fsCfg.Path, } diff --git a/src/internal/kopia/s3.go b/src/internal/kopia/s3.go index f4a379ada..b7dbbd5cf 100644 --- a/src/internal/kopia/s3.go +++ b/src/internal/kopia/s3.go @@ -20,13 +20,11 @@ func s3BlobStorage( repoOpts repository.Options, s storage.Storage, ) (blob.Storage, error) { - sc, err := s.StorageConfig() + cfg, err := s.ToS3Config() if err != nil { return nil, clues.Stack(err).WithClues(ctx) } - cfg := sc.(*storage.S3Config) - endpoint := defaultS3Endpoint if len(cfg.Endpoint) > 0 { endpoint = cfg.Endpoint diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index 2f836570c..277eb1bba 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -187,6 +187,8 @@ func (r *repository) Initialize( } }() + observe.Message(ctx, "Initializing repository") + kopiaRef := kopia.NewConn(r.Storage) if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts); err != nil { // replace common internal errors so that sdk users can check results with errors.Is() @@ -237,8 +239,7 @@ func (r *repository) Connect(ctx context.Context) (err error) { } }() - progressBar := observe.MessageWithCompletion(ctx, "Connecting to repository") - defer close(progressBar) + observe.Message(ctx, "Connecting to repository") kopiaRef := kopia.NewConn(r.Storage) if err := kopiaRef.Connect(ctx, r.Opts.Repo); err != nil { diff --git a/src/pkg/storage/common_test.go b/src/pkg/storage/common_test.go index 02668e611..e8b4a89ba 100644 --- a/src/pkg/storage/common_test.go +++ b/src/pkg/storage/common_test.go @@ -7,16 +7,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/credentials" "github.com/alcionai/corso/src/pkg/storage" ) -type CommonCfgSuite struct { - suite.Suite +type CommonCfgUnitSuite struct { + tester.Suite } -func TestCommonCfgSuite(t *testing.T) { - suite.Run(t, new(CommonCfgSuite)) +func TestCommonCfgUnitSuite(t *testing.T) { + suite.Run(t, &CommonCfgUnitSuite{Suite: tester.NewUnitSuite(t)}) } var goodCommonConfig = storage.CommonConfig{ @@ -25,7 +26,7 @@ var goodCommonConfig = storage.CommonConfig{ }, } -func (suite *CommonCfgSuite) TestCommonConfig_Config() { +func (suite *CommonCfgUnitSuite) TestCommonConfig_Config() { cfg := goodCommonConfig c, err := cfg.StringConfig() assert.NoError(suite.T(), err, clues.ToCore(err)) @@ -43,7 +44,7 @@ func (suite *CommonCfgSuite) TestCommonConfig_Config() { } } -func (suite *CommonCfgSuite) TestStorage_CommonConfig() { +func (suite *CommonCfgUnitSuite) TestStorage_CommonConfig() { t := suite.T() in := goodCommonConfig @@ -55,7 +56,7 @@ func (suite *CommonCfgSuite) TestStorage_CommonConfig() { assert.Equal(t, in.CorsoPassphrase, out.CorsoPassphrase) } -func (suite *CommonCfgSuite) TestStorage_CommonConfig_InvalidCases() { +func (suite *CommonCfgUnitSuite) TestStorage_CommonConfig_InvalidCases() { // missing required properties table := []struct { name string diff --git a/src/pkg/storage/filesystem.go b/src/pkg/storage/filesystem.go index ca4cfe098..08dacc62c 100644 --- a/src/pkg/storage/filesystem.go +++ b/src/pkg/storage/filesystem.go @@ -20,6 +20,10 @@ type FilesystemConfig struct { Path string } +func (s Storage) ToFilesystemConfig() (*FilesystemConfig, error) { + return buildFilesystemConfigFromMap(s.Config) +} + func buildFilesystemConfigFromMap(config map[string]string) (*FilesystemConfig, error) { c := &FilesystemConfig{} @@ -69,7 +73,7 @@ func (c *FilesystemConfig) ApplyConfigOverrides( if matchFromConfig { providerType := cast.ToString(g.Get(StorageProviderTypeKey)) if providerType != ProviderFilesystem.String() { - return clues.New("unsupported storage provider in config file: " + providerType) + return clues.New("unsupported storage provider in config file: [" + providerType + "]") } // This is matching override values from config file. diff --git a/src/pkg/storage/s3.go b/src/pkg/storage/s3.go index c689e77cd..7f2e8688f 100644 --- a/src/pkg/storage/s3.go +++ b/src/pkg/storage/s3.go @@ -62,6 +62,28 @@ var s3constToTomlKeyMap = map[string]string{ StorageProviderTypeKey: StorageProviderTypeKey, } +func (s Storage) ToS3Config() (*S3Config, error) { + return buildS3ConfigFromMap(s.Config) +} + +func buildS3ConfigFromMap(config map[string]string) (*S3Config, error) { + c := &S3Config{} + + if len(config) > 0 { + c.AccessKey = orEmptyString(config[keyS3AccessKey]) + c.SecretKey = orEmptyString(config[keyS3SecretKey]) + c.SessionToken = orEmptyString(config[keyS3SessionToken]) + + c.Bucket = orEmptyString(config[keyS3Bucket]) + c.Endpoint = orEmptyString(config[keyS3Endpoint]) + c.Prefix = orEmptyString(config[keyS3Prefix]) + c.DoNotUseTLS = str.ParseBool(config[keyS3DoNotUseTLS]) + c.DoNotVerifyTLS = str.ParseBool(config[keyS3DoNotVerifyTLS]) + } + + return c, c.validate() +} + func (c *S3Config) normalize() S3Config { return S3Config{ Bucket: common.NormalizeBucket(c.Bucket), @@ -91,24 +113,6 @@ func (c *S3Config) StringConfig() (map[string]string, error) { return cfg, cn.validate() } -func buildS3ConfigFromMap(config map[string]string) (*S3Config, error) { - c := &S3Config{} - - if len(config) > 0 { - c.AccessKey = orEmptyString(config[keyS3AccessKey]) - c.SecretKey = orEmptyString(config[keyS3SecretKey]) - c.SessionToken = orEmptyString(config[keyS3SessionToken]) - - c.Bucket = orEmptyString(config[keyS3Bucket]) - c.Endpoint = orEmptyString(config[keyS3Endpoint]) - c.Prefix = orEmptyString(config[keyS3Prefix]) - c.DoNotUseTLS = str.ParseBool(config[keyS3DoNotUseTLS]) - c.DoNotVerifyTLS = str.ParseBool(config[keyS3DoNotVerifyTLS]) - } - - return c, c.validate() -} - func (c S3Config) validate() error { check := map[string]string{ Bucket: c.Bucket, @@ -169,11 +173,11 @@ func (c *S3Config) ApplyConfigOverrides( if matchFromConfig { providerType := cast.ToString(kvg.Get(StorageProviderTypeKey)) if providerType != ProviderS3.String() { - return clues.New("unsupported storage provider: " + providerType) + return clues.New("unsupported storage provider: [" + providerType + "]") } if err := mustMatchConfig(kvg, s3constToTomlKeyMap, s3Overrides(overrides)); err != nil { - return clues.Wrap(err, "verifying s3 configs in corso config file") + return clues.Stack(err) } } } diff --git a/src/pkg/storage/s3_test.go b/src/pkg/storage/s3_test.go index 2a4b239f9..1e3a2e0ba 100644 --- a/src/pkg/storage/s3_test.go +++ b/src/pkg/storage/s3_test.go @@ -8,15 +8,16 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/credentials" ) -type S3CfgSuite struct { - suite.Suite +type S3CfgUnitSuite struct { + tester.Suite } -func TestS3CfgSuite(t *testing.T) { - suite.Run(t, new(S3CfgSuite)) +func TestS3CfgUnitSuite(t *testing.T) { + suite.Run(t, &S3CfgUnitSuite{Suite: tester.NewUnitSuite(t)}) } var ( @@ -41,7 +42,7 @@ var ( } ) -func (suite *S3CfgSuite) TestS3Config_Config() { +func (suite *S3CfgUnitSuite) TestS3Config_Config() { s3 := goodS3Config c, err := s3.StringConfig() @@ -60,16 +61,16 @@ func (suite *S3CfgSuite) TestS3Config_Config() { } } -func (suite *S3CfgSuite) TestStorage_S3Config() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config() { t := suite.T() - in := goodS3Config + s, err := NewStorage(ProviderS3, &in) assert.NoError(t, err, clues.ToCore(err)) - sc, err := s.StorageConfig() + + out, err := s.ToS3Config() assert.NoError(t, err, clues.ToCore(err)) - out := sc.(*S3Config) assert.Equal(t, in.Bucket, out.Bucket) assert.Equal(t, in.Endpoint, out.Endpoint) assert.Equal(t, in.Prefix, out.Prefix) @@ -84,7 +85,7 @@ func makeTestS3Cfg(bkt, end, pre, access, secret, session string) S3Config { } } -func (suite *S3CfgSuite) TestStorage_S3Config_invalidCases() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config_invalidCases() { // missing required properties table := []struct { name string @@ -118,13 +119,14 @@ func (suite *S3CfgSuite) TestStorage_S3Config_invalidCases() { st, err := NewStorage(ProviderUnknown, &goodS3Config) assert.NoError(t, err, clues.ToCore(err)) test.amend(st) - _, err = st.StorageConfig() - assert.Error(t, err) + + _, err = st.ToS3Config() + assert.Error(t, err, clues.ToCore(err)) }) } } -func (suite *S3CfgSuite) TestStorage_S3Config_StringConfig() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config_StringConfig() { table := []struct { name string input S3Config @@ -178,7 +180,7 @@ func (suite *S3CfgSuite) TestStorage_S3Config_StringConfig() { } } -func (suite *S3CfgSuite) TestStorage_S3Config_Normalize() { +func (suite *S3CfgUnitSuite) TestStorage_S3Config_Normalize() { const ( prefixedBkt = "s3://bkt" normalBkt = "bkt" diff --git a/src/pkg/storage/storage.go b/src/pkg/storage/storage.go index 11b8863a1..c695ea992 100644 --- a/src/pkg/storage/storage.go +++ b/src/pkg/storage/storage.go @@ -9,6 +9,8 @@ import ( "github.com/alcionai/corso/src/internal/common" ) +var ErrVerifyingConfigStorage = clues.New("verifying configs in corso config file") + type ProviderType int //go:generate stringer -type=ProviderType -linecomment @@ -102,7 +104,7 @@ func (s Storage) StorageConfig() (Configurer, error) { return buildFilesystemConfigFromMap(s.Config) } - return nil, clues.New("unsupported storage provider: " + s.Provider.String()) + return nil, clues.New("unsupported storage provider: [" + s.Provider.String() + "]") } func NewStorageConfig(provider ProviderType) (Configurer, error) { @@ -113,7 +115,7 @@ func NewStorageConfig(provider ProviderType) (Configurer, error) { return &FilesystemConfig{}, nil } - return nil, clues.New("unsupported storage provider: " + provider.String()) + return nil, clues.New("unsupported storage provider: [" + provider.String() + "]") } type Getter interface { @@ -167,7 +169,8 @@ func mustMatchConfig( vv := cast.ToString(g.Get(tomlK)) if v != vv { - return clues.New("value of " + k + " (" + v + ") does not match corso configuration value (" + vv + ")") + err := clues.New("value of " + k + " (" + v + ") does not match corso configuration value (" + vv + ")") + return clues.Stack(ErrVerifyingConfigStorage, err) } } diff --git a/src/pkg/storage/storage_test.go b/src/pkg/storage/storage_test.go index 0d2cfbec6..095ea363c 100644 --- a/src/pkg/storage/storage_test.go +++ b/src/pkg/storage/storage_test.go @@ -6,6 +6,8 @@ import ( "github.com/alcionai/clues" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" ) type testConfig struct { @@ -17,15 +19,15 @@ func (c testConfig) StringConfig() (map[string]string, error) { return map[string]string{"expect": c.expect}, c.err } -type StorageSuite struct { - suite.Suite +type StorageUnitSuite struct { + tester.Suite } -func TestStorageSuite(t *testing.T) { - suite.Run(t, new(StorageSuite)) +func TestStorageUnitSuite(t *testing.T) { + suite.Run(t, &StorageUnitSuite{Suite: tester.NewUnitSuite(t)}) } -func (suite *StorageSuite) TestNewStorage() { +func (suite *StorageUnitSuite) TestNewStorage() { table := []struct { name string p ProviderType From d5cdf3736971a94722592ce497f752d16f313d4f Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Thu, 28 Sep 2023 18:50:15 -0700 Subject: [PATCH 07/26] Lock updating info in lazyItem (#4401) Don't allow concurrent reads/writes to info in lazyItem so we can make stronger assumptions about state --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/data/item.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/internal/data/item.go b/src/internal/data/item.go index 2403e63aa..862699a5d 100644 --- a/src/internal/data/item.go +++ b/src/internal/data/item.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "io" + "sync" "time" "github.com/alcionai/clues" @@ -110,6 +111,7 @@ func NewLazyItem( // made. type lazyItem struct { ctx context.Context + mu sync.Mutex id string errs *fault.Bus itemGetter ItemDataGetter @@ -127,12 +129,18 @@ type lazyItem struct { delInFlight bool } -func (i lazyItem) ID() string { +func (i *lazyItem) ID() string { return i.id } func (i *lazyItem) ToReader() io.ReadCloser { return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { + // Don't allow getting Item info while trying to initialize said info. + // GetData could be a long running call, but in theory nothing should happen + // with the item until a reader is returned anyway. + i.mu.Lock() + defer i.mu.Unlock() + reader, info, delInFlight, err := i.itemGetter.GetData(i.ctx, i.errs) if err != nil { return nil, clues.Stack(err) @@ -159,11 +167,14 @@ func (i *lazyItem) ToReader() io.ReadCloser { }) } -func (i lazyItem) Deleted() bool { +func (i *lazyItem) Deleted() bool { return false } -func (i lazyItem) Info() (details.ItemInfo, error) { +func (i *lazyItem) Info() (details.ItemInfo, error) { + i.mu.Lock() + defer i.mu.Unlock() + if i.delInFlight { return details.ItemInfo{}, clues.Stack(ErrNotFound).WithClues(i.ctx) } else if i.info == nil { @@ -174,6 +185,6 @@ func (i lazyItem) Info() (details.ItemInfo, error) { return *i.info, nil } -func (i lazyItem) ModTime() time.Time { +func (i *lazyItem) ModTime() time.Time { return i.modTime } From 5521177aee2a43e8caad55acc8feb2963113523b Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Thu, 28 Sep 2023 20:29:15 -0700 Subject: [PATCH 08/26] Create generic items without Info() (#4365) Create generic Item implementations that don't implement the ItemInfo interface. These implementations can be used for things like metadata files. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * #4191 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/data/item.go | 120 +++++++++++++++++++++++++-------- src/internal/data/item_test.go | 25 +++++++ 2 files changed, 117 insertions(+), 28 deletions(-) diff --git a/src/internal/data/item.go b/src/internal/data/item.go index 862699a5d..6d316ad6b 100644 --- a/src/internal/data/item.go +++ b/src/internal/data/item.go @@ -16,16 +16,23 @@ import ( ) var ( + _ Item = &unindexedPrefetchedItem{} + _ ItemModTime = &unindexedPrefetchedItem{} + _ Item = &prefetchedItem{} _ ItemInfo = &prefetchedItem{} _ ItemModTime = &prefetchedItem{} + + _ Item = &unindexedLazyItem{} + _ ItemModTime = &unindexedLazyItem{} + _ Item = &lazyItem{} _ ItemInfo = &lazyItem{} _ ItemModTime = &lazyItem{} ) func NewDeletedItem(itemID string) Item { - return &prefetchedItem{ + return &unindexedPrefetchedItem{ id: itemID, deleted: true, // TODO(ashmrtn): This really doesn't need to be set since deleted items are @@ -35,24 +42,26 @@ func NewDeletedItem(itemID string) Item { } } -func NewPrefetchedItem( +func NewUnindexedPrefetchedItem( reader io.ReadCloser, itemID string, - info details.ItemInfo, + modTime time.Time, ) Item { - return &prefetchedItem{ + return &unindexedPrefetchedItem{ id: itemID, reader: reader, - info: info, - modTime: info.Modified(), + modTime: modTime, } } -// prefetchedItem represents a single item retrieved from the remote service. -type prefetchedItem struct { +// unindexedPrefetchedItem represents a single item retrieved from the remote +// service. +// +// This item doesn't implement ItemInfo so it's safe to use for items like +// metadata that shouldn't appear in backup details. +type unindexedPrefetchedItem struct { id string reader io.ReadCloser - info details.ItemInfo // modTime is the modified time of the item. It should match the modTime in // info if info is present. Here as a separate field so that deleted items // don't error out by trying to source it from info. @@ -63,26 +72,50 @@ type prefetchedItem struct { deleted bool } -func (i prefetchedItem) ID() string { +func (i unindexedPrefetchedItem) ID() string { return i.id } -func (i *prefetchedItem) ToReader() io.ReadCloser { +func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser { return i.reader } -func (i prefetchedItem) Deleted() bool { +func (i unindexedPrefetchedItem) Deleted() bool { return i.deleted } +func (i unindexedPrefetchedItem) ModTime() time.Time { + return i.modTime +} + +func NewPrefetchedItem( + reader io.ReadCloser, + itemID string, + info details.ItemInfo, +) Item { + return &prefetchedItem{ + unindexedPrefetchedItem: unindexedPrefetchedItem{ + id: itemID, + reader: reader, + modTime: info.Modified(), + }, + info: info, + } +} + +// prefetchedItem represents a single item retrieved from the remote service. +// +// This item implements ItemInfo so it should be used for things that need to +// appear in backup details. +type prefetchedItem struct { + unindexedPrefetchedItem + info details.ItemInfo +} + func (i prefetchedItem) Info() (details.ItemInfo, error) { return i.info, nil } -func (i prefetchedItem) ModTime() time.Time { - return i.modTime -} - type ItemDataGetter interface { GetData( context.Context, @@ -90,14 +123,14 @@ type ItemDataGetter interface { ) (io.ReadCloser, *details.ItemInfo, bool, error) } -func NewLazyItem( +func NewUnindexedLazyItem( ctx context.Context, itemGetter ItemDataGetter, itemID string, modTime time.Time, errs *fault.Bus, ) Item { - return &lazyItem{ + return &unindexedLazyItem{ ctx: ctx, id: itemID, itemGetter: itemGetter, @@ -106,10 +139,13 @@ func NewLazyItem( } } -// lazyItem represents a single item retrieved from the remote service. It -// lazily fetches the item's data when the first call to ToReader().Read() is +// unindexedLazyItem represents a single item retrieved from the remote service. +// It lazily fetches the item's data when the first call to ToReader().Read() is // made. -type lazyItem struct { +// +// This item doesn't implement ItemInfo so it's safe to use for items like +// metadata that shouldn't appear in backup details. +type unindexedLazyItem struct { ctx context.Context mu sync.Mutex id string @@ -129,11 +165,11 @@ type lazyItem struct { delInFlight bool } -func (i *lazyItem) ID() string { +func (i *unindexedLazyItem) ID() string { return i.id } -func (i *lazyItem) ToReader() io.ReadCloser { +func (i *unindexedLazyItem) ToReader() io.ReadCloser { return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { // Don't allow getting Item info while trying to initialize said info. // GetData could be a long running call, but in theory nothing should happen @@ -167,10 +203,42 @@ func (i *lazyItem) ToReader() io.ReadCloser { }) } -func (i *lazyItem) Deleted() bool { +func (i *unindexedLazyItem) Deleted() bool { return false } +func (i *unindexedLazyItem) ModTime() time.Time { + return i.modTime +} + +func NewLazyItem( + ctx context.Context, + itemGetter ItemDataGetter, + itemID string, + modTime time.Time, + errs *fault.Bus, +) Item { + return &lazyItem{ + unindexedLazyItem: unindexedLazyItem{ + ctx: ctx, + id: itemID, + itemGetter: itemGetter, + modTime: modTime, + errs: errs, + }, + } +} + +// lazyItem represents a single item retrieved from the remote service. It +// lazily fetches the item's data when the first call to ToReader().Read() is +// made. +// +// This item implements ItemInfo so it should be used for things that need to +// appear in backup details. +type lazyItem struct { + unindexedLazyItem +} + func (i *lazyItem) Info() (details.ItemInfo, error) { i.mu.Lock() defer i.mu.Unlock() @@ -184,7 +252,3 @@ func (i *lazyItem) Info() (details.ItemInfo, error) { return *i.info, nil } - -func (i *lazyItem) ModTime() time.Time { - return i.modTime -} diff --git a/src/internal/data/item_test.go b/src/internal/data/item_test.go index 864e70890..9484613e4 100644 --- a/src/internal/data/item_test.go +++ b/src/internal/data/item_test.go @@ -49,6 +49,31 @@ func TestItemUnitSuite(t *testing.T) { suite.Run(t, &ItemUnitSuite{Suite: tester.NewUnitSuite(t)}) } +func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() { + prefetch := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader([]byte{})), + "foo", + time.Time{}) + _, ok := prefetch.(data.ItemInfo) + assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()") +} + +func (suite *ItemUnitSuite) TestUnindexedLazyItem() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + lazy := data.NewUnindexedLazyItem( + ctx, + nil, + "foo", + time.Time{}, + fault.New(true)) + _, ok := lazy.(data.ItemInfo) + assert.False(t, ok, "unindexedLazyItem implements Info()") +} + func (suite *ItemUnitSuite) TestDeletedItem() { var ( t = suite.T() From a806ab59bfb53e6d760f1013a9e5fd00897a2d8c Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Thu, 28 Sep 2023 21:15:45 -0700 Subject: [PATCH 09/26] Use generic unindex item struct (#4366) Switch all metadata files (of all types) to use the generic unindexed item type. Transitioned items include: * previous paths and deltas for all services * site list for groups * drive .meta files --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * #4191 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- .../m365/collection/drive/collection.go | 18 +++--- .../collection/drive/metadata/metadata.go | 15 ----- .../m365/graph/metadata_collection.go | 62 ++++++------------- .../m365/graph/metadata_collection_test.go | 15 ++++- src/internal/m365/helper_test.go | 4 -- src/internal/streamstore/streamstore.go | 32 ++-------- 6 files changed, 47 insertions(+), 99 deletions(-) diff --git a/src/internal/m365/collection/drive/collection.go b/src/internal/m365/collection/drive/collection.go index 0cdf79c0e..19de8e0dc 100644 --- a/src/internal/m365/collection/drive/collection.go +++ b/src/internal/m365/collection/drive/collection.go @@ -33,11 +33,7 @@ const ( MaxOneNoteFileSize = 2 * 1024 * 1024 * 1024 ) -var ( - _ data.BackupCollection = &Collection{} - _ data.Item = &metadata.Item{} - _ data.ItemModTime = &metadata.Item{} -) +var _ data.BackupCollection = &Collection{} // Collection represents a set of OneDrive objects retrieved from M365 type Collection struct { @@ -588,13 +584,15 @@ func (oc *Collection) streamDriveItem( return progReader, nil }) - oc.data <- &metadata.Item{ - ItemID: metaFileName + metaSuffix, - Data: metaReader, + // We wrap the reader with a lazy reader so that the progress bar is only + // initialized if the file is read. Since we're not actually lazily reading + // data just use the eager item implementation. + oc.data <- data.NewUnindexedPrefetchedItem( + metaReader, + metaFileName+metaSuffix, // Metadata file should always use the latest time as // permissions change does not update mod time. - Mod: time.Now(), - } + time.Now()) // Item read successfully, add to collection if isFile { diff --git a/src/internal/m365/collection/drive/metadata/metadata.go b/src/internal/m365/collection/drive/metadata/metadata.go index 06a31d432..7e91a2e5b 100644 --- a/src/internal/m365/collection/drive/metadata/metadata.go +++ b/src/internal/m365/collection/drive/metadata/metadata.go @@ -1,7 +1,6 @@ package metadata import ( - "io" "time" ) @@ -41,17 +40,3 @@ type Metadata struct { Permissions []Permission `json:"permissions,omitempty"` LinkShares []LinkShare `json:"linkShares,omitempty"` } - -type Item struct { - ItemID string - Data io.ReadCloser - Mod time.Time -} - -// Deleted implements an interface function. However, OneDrive items are marked -// as deleted by adding them to the exclude list so this can always return -// false. -func (i *Item) Deleted() bool { return false } -func (i *Item) ID() string { return i.ItemID } -func (i *Item) ToReader() io.ReadCloser { return i.Data } -func (i *Item) ModTime() time.Time { return i.Mod } diff --git a/src/internal/m365/graph/metadata_collection.go b/src/internal/m365/graph/metadata_collection.go index 7b382fe16..7e06faaba 100644 --- a/src/internal/m365/graph/metadata_collection.go +++ b/src/internal/m365/graph/metadata_collection.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "io" + "time" "github.com/alcionai/clues" @@ -16,7 +17,7 @@ import ( var ( _ data.BackupCollection = &MetadataCollection{} - _ data.Item = &MetadataItem{} + _ data.Item = &metadataItem{} ) // MetadataCollection in a simple collection that assumes all items to be @@ -24,7 +25,7 @@ var ( // created. This collection has no logic for lazily fetching item data. type MetadataCollection struct { fullPath path.Path - items []MetadataItem + items []metadataItem statusUpdater support.StatusUpdater } @@ -40,23 +41,29 @@ func NewMetadataEntry(fileName string, mData any) MetadataCollectionEntry { return MetadataCollectionEntry{fileName, mData} } -func (mce MetadataCollectionEntry) toMetadataItem() (MetadataItem, error) { +func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) { if len(mce.fileName) == 0 { - return MetadataItem{}, clues.New("missing metadata filename") + return metadataItem{}, clues.New("missing metadata filename") } if mce.data == nil { - return MetadataItem{}, clues.New("missing metadata") + return metadataItem{}, clues.New("missing metadata") } buf := &bytes.Buffer{} encoder := json.NewEncoder(buf) if err := encoder.Encode(mce.data); err != nil { - return MetadataItem{}, clues.Wrap(err, "serializing metadata") + return metadataItem{}, clues.Wrap(err, "serializing metadata") } - return NewMetadataItem(mce.fileName, buf.Bytes()), nil + return metadataItem{ + Item: data.NewUnindexedPrefetchedItem( + io.NopCloser(buf), + mce.fileName, + time.Now()), + size: int64(buf.Len()), + }, nil } // MakeMetadataCollection creates a metadata collection that has a file @@ -71,7 +78,7 @@ func MakeMetadataCollection( return nil, nil } - items := make([]MetadataItem, 0, len(metadata)) + items := make([]metadataItem, 0, len(metadata)) for _, md := range metadata { item, err := md.toMetadataItem() @@ -89,7 +96,7 @@ func MakeMetadataCollection( func NewMetadataCollection( p path.Path, - items []MetadataItem, + items []metadataItem, statusUpdater support.StatusUpdater, ) *MetadataCollection { return &MetadataCollection{ @@ -148,7 +155,7 @@ func (md MetadataCollection) Items( defer close(res) for _, item := range md.items { - totalBytes += int64(len(item.data)) + totalBytes += item.size res <- item } }() @@ -156,36 +163,7 @@ func (md MetadataCollection) Items( return res } -// MetadataItem is an in-memory data.Item implementation. MetadataItem does -// not implement additional interfaces like data.ItemInfo, so it should only -// be used for items with a small amount of content that don't need to be added -// to backup details. -// -// Currently the expected use-case for this struct are storing metadata for a -// backup like delta tokens or a mapping of container IDs to container paths. -type MetadataItem struct { - // uuid is an ID that can be used to refer to the item. - uuid string - // data is a buffer of data that the item refers to. - data []byte -} - -func NewMetadataItem(uuid string, itemData []byte) MetadataItem { - return MetadataItem{ - uuid: uuid, - data: itemData, - } -} - -func (mi MetadataItem) ID() string { - return mi.uuid -} - -// TODO(ashmrtn): Fill in once we know how to handle this. -func (mi MetadataItem) Deleted() bool { - return false -} - -func (mi MetadataItem) ToReader() io.ReadCloser { - return io.NopCloser(bytes.NewReader(mi.data)) +type metadataItem struct { + data.Item + size int64 } diff --git a/src/internal/m365/graph/metadata_collection_test.go b/src/internal/m365/graph/metadata_collection_test.go index 41e15f0bf..0423cdf40 100644 --- a/src/internal/m365/graph/metadata_collection_test.go +++ b/src/internal/m365/graph/metadata_collection_test.go @@ -1,9 +1,11 @@ package graph import ( + "bytes" "encoding/json" "io" "testing" + "time" "github.com/alcionai/clues" "github.com/google/uuid" @@ -11,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/fault" @@ -63,10 +66,18 @@ func (suite *MetadataCollectionUnitSuite) TestItems() { len(itemData), "Requires same number of items and data") - items := []MetadataItem{} + items := []metadataItem{} for i := 0; i < len(itemNames); i++ { - items = append(items, NewMetadataItem(itemNames[i], itemData[i])) + items = append( + items, + metadataItem{ + Item: data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(itemData[i])), + itemNames[i], + time.Time{}), + size: int64(len(itemData[i])), + }) } p, err := path.Build( diff --git a/src/internal/m365/helper_test.go b/src/internal/m365/helper_test.go index d6b7c256c..b875f7c91 100644 --- a/src/internal/m365/helper_test.go +++ b/src/internal/m365/helper_test.go @@ -751,10 +751,6 @@ func compareDriveItem( } if isMeta { - var itemType *metadata.Item - - assert.IsType(t, itemType, item) - var ( itemMeta metadata.Metadata expectedMeta metadata.Metadata diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index 35a3b9706..eb5673196 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "io" + "time" "github.com/alcionai/clues" @@ -128,7 +129,7 @@ type streamCollection struct { // folderPath indicates what level in the hierarchy this collection // represents folderPath path.Path - item *streamItem + item data.Item } func (dc *streamCollection) FullPath() path.Path { @@ -157,27 +158,6 @@ func (dc *streamCollection) Items(context.Context, *fault.Bus) <-chan data.Item return items } -// --------------------------------------------------------------------------- -// item -// --------------------------------------------------------------------------- - -type streamItem struct { - name string - data []byte -} - -func (di *streamItem) ID() string { - return di.name -} - -func (di *streamItem) ToReader() io.ReadCloser { - return io.NopCloser(bytes.NewReader(di.data)) -} - -func (di *streamItem) Deleted() bool { - return false -} - // --------------------------------------------------------------------------- // common reader/writer/deleter // --------------------------------------------------------------------------- @@ -204,10 +184,10 @@ func collect( dc := streamCollection{ folderPath: p, - item: &streamItem{ - name: col.itemName, - data: bs, - }, + item: data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(bs)), + col.itemName, + time.Now()), } return &dc, nil From 1c520db3bca7ae53b80fb1c397f66da9535e42c1 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Thu, 28 Sep 2023 22:17:11 -0700 Subject: [PATCH 10/26] Use generic item in SharePoint lists/pages (#4367) #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * #4191 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- .../m365/collection/site/collection.go | 70 +++---------------- .../m365/collection/site/collection_test.go | 49 +++++-------- .../m365/service/sharepoint/api/pages_test.go | 8 ++- 3 files changed, 30 insertions(+), 97 deletions(-) diff --git a/src/internal/m365/collection/site/collection.go b/src/internal/m365/collection/site/collection.go index 95d77acb2..422ed4b2a 100644 --- a/src/internal/m365/collection/site/collection.go +++ b/src/internal/m365/collection/site/collection.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "io" - "time" "github.com/alcionai/clues" "github.com/microsoft/kiota-abstractions-go/serialization" @@ -40,12 +39,7 @@ const ( Pages DataCategory = 2 ) -var ( - _ data.BackupCollection = &Collection{} - _ data.Item = &Item{} - _ data.ItemInfo = &Item{} - _ data.ItemModTime = &Item{} -) +var _ data.BackupCollection = &Collection{} // Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported // by the oneDrive.Collection as the calls are identical for populating the Collection @@ -120,43 +114,6 @@ func (sc *Collection) Items( return sc.data } -type Item struct { - id string - data io.ReadCloser - info *details.SharePointInfo - modTime time.Time - - // true if the item was marked by graph as deleted. - deleted bool -} - -func NewItem(name string, d io.ReadCloser) *Item { - return &Item{ - id: name, - data: d, - } -} - -func (sd *Item) ID() string { - return sd.id -} - -func (sd *Item) ToReader() io.ReadCloser { - return sd.data -} - -func (sd Item) Deleted() bool { - return sd.deleted -} - -func (sd *Item) Info() (details.ItemInfo, error) { - return details.ItemInfo{SharePoint: sd.info}, nil -} - -func (sd *Item) ModTime() time.Time { - return sd.modTime -} - func (sc *Collection) finishPopulation( ctx context.Context, metrics support.CollectionMetrics, @@ -251,20 +208,13 @@ func (sc *Collection) retrieveLists( size := int64(len(byteArray)) if size > 0 { - t := time.Now() - if t1 := lst.GetLastModifiedDateTime(); t1 != nil { - t = *t1 - } - metrics.Bytes += size metrics.Successes++ - sc.data <- &Item{ - id: ptr.Val(lst.GetId()), - data: io.NopCloser(bytes.NewReader(byteArray)), - info: ListToSPInfo(lst, size), - modTime: t, - } + sc.data <- data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + ptr.Val(lst.GetId()), + details.ItemInfo{SharePoint: ListToSPInfo(lst, size)}) progress <- struct{}{} } @@ -322,12 +272,10 @@ func (sc *Collection) retrievePages( if size > 0 { metrics.Bytes += size metrics.Successes++ - sc.data <- &Item{ - id: ptr.Val(pg.GetId()), - data: io.NopCloser(bytes.NewReader(byteArray)), - info: pageToSPInfo(pg, root, size), - modTime: ptr.OrNow(pg.GetLastModifiedDateTime()), - } + sc.data <- data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + ptr.Val(pg.GetId()), + details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)}) progress <- struct{}{} } diff --git a/src/internal/m365/collection/site/collection_test.go b/src/internal/m365/collection/site/collection_test.go index 0be5c2dc8..3d0336217 100644 --- a/src/internal/m365/collection/site/collection_test.go +++ b/src/internal/m365/collection/site/collection_test.go @@ -19,6 +19,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" @@ -58,21 +59,6 @@ func TestSharePointCollectionSuite(t *testing.T) { }) } -func (suite *SharePointCollectionSuite) TestCollection_Item_Read() { - t := suite.T() - m := []byte("test message") - name := "aFile" - sc := &Item{ - id: name, - data: io.NopCloser(bytes.NewReader(m)), - } - readData, err := io.ReadAll(sc.ToReader()) - require.NoError(t, err, clues.ToCore(err)) - - assert.Equal(t, name, sc.id) - assert.Equal(t, readData, m) -} - // TestListCollection tests basic functionality to create // SharePoint collection and to use the data stream channel. func (suite *SharePointCollectionSuite) TestCollection_Items() { @@ -88,7 +74,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { name, itemName string scope selectors.SharePointScope getDir func(t *testing.T) path.Path - getItem func(t *testing.T, itemName string) *Item + getItem func(t *testing.T, itemName string) data.Item }{ { name: "List", @@ -106,7 +92,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { return dir }, - getItem: func(t *testing.T, name string) *Item { + getItem: func(t *testing.T, name string) data.Item { ow := kioser.NewJsonSerializationWriter() listing := spMock.ListDefault(name) listing.SetDisplayName(&name) @@ -117,11 +103,10 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { byteArray, err := ow.GetSerializedContent() require.NoError(t, err, clues.ToCore(err)) - data := &Item{ - id: name, - data: io.NopCloser(bytes.NewReader(byteArray)), - info: ListToSPInfo(listing, int64(len(byteArray))), - } + data := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + name, + details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) return data }, @@ -142,16 +127,15 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { return dir }, - getItem: func(t *testing.T, itemName string) *Item { + getItem: func(t *testing.T, itemName string) data.Item { byteArray := spMock.Page(itemName) page, err := betaAPI.CreatePageFromBytes(byteArray) require.NoError(t, err, clues.ToCore(err)) - data := &Item{ - id: itemName, - data: io.NopCloser(bytes.NewReader(byteArray)), - info: betaAPI.PageInfo(page, int64(len(byteArray))), - } + data := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + itemName, + details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))}) return data }, @@ -210,11 +194,10 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() { byteArray, err := service.Serialize(listing) require.NoError(t, err, clues.ToCore(err)) - listData := &Item{ - id: testName, - data: io.NopCloser(bytes.NewReader(byteArray)), - info: ListToSPInfo(listing, int64(len(byteArray))), - } + listData := data.NewPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), + testName, + details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) destName := testdata.DefaultRestoreConfig("").Location diff --git a/src/internal/m365/service/sharepoint/api/pages_test.go b/src/internal/m365/service/sharepoint/api/pages_test.go index a834f10ea..f462805d2 100644 --- a/src/internal/m365/service/sharepoint/api/pages_test.go +++ b/src/internal/m365/service/sharepoint/api/pages_test.go @@ -4,13 +4,14 @@ import ( "bytes" "io" "testing" + "time" "github.com/alcionai/clues" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/alcionai/corso/src/internal/m365/collection/site" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/service/sharepoint/api" spMock "github.com/alcionai/corso/src/internal/m365/service/sharepoint/mock" @@ -108,9 +109,10 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() { //nolint:lll byteArray := spMock.Page("Byte Test") - pageData := site.NewItem( + pageData := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(byteArray)), testName, - io.NopCloser(bytes.NewReader(byteArray))) + time.Now()) info, err := api.RestoreSitePage( ctx, From 375019a988682a781bdf6196d4533067c29a2e83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Sep 2023 05:37:14 +0000 Subject: [PATCH 11/26] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20postcss=20fro?= =?UTF-8?q?m=208.4.30=20to=208.4.31=20in=20/website=20(#4404)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [postcss](https://github.com/postcss/postcss) from 8.4.30 to 8.4.31.
Release notes

Sourced from postcss's releases.

8.4.31

  • Fixed \r parsing to fix CVE-2023-44270.
Changelog

Sourced from postcss's changelog.

8.4.31

  • Fixed \r parsing to fix CVE-2023-44270.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=postcss&package-manager=npm_and_yarn&previous-version=8.4.30&new-version=8.4.31)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- website/package-lock.json | 14 +++++++------- website/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index 267aa055c..f4ff67600 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -33,7 +33,7 @@ "@docusaurus/module-type-aliases": "2.4.3", "@iconify/react": "^4.1.1", "autoprefixer": "^10.4.16", - "postcss": "^8.4.30", + "postcss": "^8.4.31", "tailwindcss": "^3.3.3" } }, @@ -10743,9 +10743,9 @@ } }, "node_modules/postcss": { - "version": "8.4.30", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.30.tgz", - "integrity": "sha512-7ZEao1g4kd68l97aWG/etQKPKq07us0ieSZ2TnFDk11i0ZfDW2AwKHYU8qv4MZKqN2fdBfg+7q0ES06UA73C1g==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", @@ -22738,9 +22738,9 @@ "integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ==" }, "postcss": { - "version": "8.4.30", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.30.tgz", - "integrity": "sha512-7ZEao1g4kd68l97aWG/etQKPKq07us0ieSZ2TnFDk11i0ZfDW2AwKHYU8qv4MZKqN2fdBfg+7q0ES06UA73C1g==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "requires": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", diff --git a/website/package.json b/website/package.json index 08ddd9305..ab903d36d 100644 --- a/website/package.json +++ b/website/package.json @@ -39,7 +39,7 @@ "@docusaurus/module-type-aliases": "2.4.3", "@iconify/react": "^4.1.1", "autoprefixer": "^10.4.16", - "postcss": "^8.4.30", + "postcss": "^8.4.31", "tailwindcss": "^3.3.3" }, "browserslist": { From 05060e7d1f91360f67f83595b421b34324957300 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Fri, 29 Sep 2023 08:52:05 -0700 Subject: [PATCH 12/26] Create exported readers for injecting and retrieving serialization version info (#4378) Previously kopia wrapper has been transparently injecting and stripping out a "serialization version" for all items persisted in kopia. The version thus far has been hard-coded to `1` and has been stored in big endian format in the first 4 bytes of every kopia file This PR is a step towards getting serialization versions on a per-service/per-item basis. It exposes serialization readers that inject and strip out info to other packages This PR also slightly changes the serialization version format. The changes are two-fold: * the MSB is now used to represent if the item was deleted between the time it was discovered and when its data was requested * the serialization version number is set to uint16. This has no impact on existing persisted data since all versions are currently `1` (don't use more than 16-bits). This size can be expanded in the future if needed, but is mostly to enforce the idea that higher order bits shouldn't be used for version numbers right now --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #4328 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- .../common/readers/serialization_version.go | 187 +++++++++ .../readers/serialization_version_test.go | 362 ++++++++++++++++++ 2 files changed, 549 insertions(+) create mode 100644 src/internal/common/readers/serialization_version.go create mode 100644 src/internal/common/readers/serialization_version_test.go diff --git a/src/internal/common/readers/serialization_version.go b/src/internal/common/readers/serialization_version.go new file mode 100644 index 000000000..a6713f959 --- /dev/null +++ b/src/internal/common/readers/serialization_version.go @@ -0,0 +1,187 @@ +package readers + +import ( + "bytes" + "encoding/binary" + "io" + "os" + "unsafe" + + "github.com/alcionai/clues" +) + +// persistedSerializationVersion is the size of the serialization version in +// storage. +// +// The current on-disk format of this field is written in big endian. The +// highest bit denotes if the item is empty because it was deleted between the +// time we told the storage about it and when we needed to get data for it. The +// lowest two bytes are the version number. All other bits are reserved for +// future use. +// +// MSB 31 30 16 8 0 LSB +// +----------+----+---------+--------+-------+ +// | del flag | reserved | version number | +// +----------+----+---------+--------+-------+ +type persistedSerializationVersion = uint32 + +// SerializationVersion is the in-memory size of the version number that gets +// added to the persisted serialization version. +// +// Right now it's only a uint16 but we can expand it to be larger so long as the +// expanded size doesn't clash with the flags in the high-order bits. +type SerializationVersion uint16 + +// DefaultSerializationVersion is the current (default) version number for all +// services. As services evolve their storage format they should begin tracking +// their own version numbers separate from other services. +const DefaultSerializationVersion SerializationVersion = 1 + +const ( + versionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0))) + delInFlightMask persistedSerializationVersion = 1 << ((versionFormatSize * 8) - 1) +) + +// SerializationFormat is a struct describing serialization format versions and +// flags to add for this item. +type SerializationFormat struct { + Version SerializationVersion + DelInFlight bool +} + +// NewVersionedBackupReader creates a reader that injects format into the first +// bytes of the returned data. After format has been returned, data is returned +// from baseReaders in the order they're passed in. +func NewVersionedBackupReader( + format SerializationFormat, + baseReaders ...io.ReadCloser, +) (io.ReadCloser, error) { + if format.DelInFlight && len(baseReaders) > 0 { + // This is a conservative check, but we can always loosen it later on if + // needed. At the moment we really don't expect any data if the item was + // deleted. + return nil, clues.New("item marked deleted but has reader(s)") + } + + formattedVersion := persistedSerializationVersion(format.Version) + if format.DelInFlight { + formattedVersion |= delInFlightMask + } + + formattedBuf := make([]byte, versionFormatSize) + binary.BigEndian.PutUint32(formattedBuf, formattedVersion) + + versionReader := io.NopCloser(bytes.NewReader(formattedBuf)) + + // Need to add readers individually because types differ. + allReaders := make([]io.Reader, 0, len(baseReaders)+1) + allReaders = append(allReaders, versionReader) + + for _, r := range baseReaders { + allReaders = append(allReaders, r) + } + + res := &versionedBackupReader{ + baseReaders: append([]io.ReadCloser{versionReader}, baseReaders...), + combined: io.MultiReader(allReaders...), + } + + return res, nil +} + +type versionedBackupReader struct { + // baseReaders is a reference to the original readers so we can close them. + baseReaders []io.ReadCloser + // combined is the reader that will return all data. + combined io.Reader +} + +func (vbr *versionedBackupReader) Read(p []byte) (int, error) { + if vbr.combined == nil { + return 0, os.ErrClosed + } + + n, err := vbr.combined.Read(p) + if err == io.EOF { + // Golang doesn't allow wrapping of EOF. If we wrap it other things start + // thinking it's an actual error. + return n, err + } + + return n, clues.Stack(err).OrNil() +} + +func (vbr *versionedBackupReader) Close() error { + if vbr.combined == nil { + return nil + } + + vbr.combined = nil + + var errs *clues.Err + + for i, r := range vbr.baseReaders { + if err := r.Close(); err != nil { + errs = clues.Stack( + errs, + clues.Wrap(err, "closing reader").With("reader_index", i)) + } + } + + vbr.baseReaders = nil + + return errs.OrNil() +} + +// NewVersionedRestoreReader wraps baseReader and provides easy access to the +// SerializationFormat info in the first bytes of the data contained in +// baseReader. +func NewVersionedRestoreReader( + baseReader io.ReadCloser, +) (*VersionedRestoreReader, error) { + versionBuf := make([]byte, versionFormatSize) + + // Loop to account for the unlikely case where we get a short read. + for read := 0; read < versionFormatSize; { + n, err := baseReader.Read(versionBuf[read:]) + if err != nil { + return nil, clues.Wrap(err, "reading serialization version") + } + + read += n + } + + formattedVersion := binary.BigEndian.Uint32(versionBuf) + + return &VersionedRestoreReader{ + baseReader: baseReader, + format: SerializationFormat{ + Version: SerializationVersion(formattedVersion), + DelInFlight: (formattedVersion & delInFlightMask) != 0, + }, + }, nil +} + +type VersionedRestoreReader struct { + baseReader io.ReadCloser + format SerializationFormat +} + +func (vrr *VersionedRestoreReader) Read(p []byte) (int, error) { + n, err := vrr.baseReader.Read(p) + if err == io.EOF { + // Golang doesn't allow wrapping of EOF. If we wrap it other things start + // thinking it's an actual error. + return n, err + } + + return n, clues.Stack(err).OrNil() +} + +func (vrr *VersionedRestoreReader) Close() error { + return clues.Stack(vrr.baseReader.Close()).OrNil() +} + +func (vrr VersionedRestoreReader) Format() SerializationFormat { + return vrr.format +} diff --git a/src/internal/common/readers/serialization_version_test.go b/src/internal/common/readers/serialization_version_test.go new file mode 100644 index 000000000..7d99c7721 --- /dev/null +++ b/src/internal/common/readers/serialization_version_test.go @@ -0,0 +1,362 @@ +package readers_test + +import ( + "bytes" + "io" + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "golang.org/x/exp/slices" + + "github.com/alcionai/corso/src/internal/common/readers" + "github.com/alcionai/corso/src/internal/tester" +) + +type shortReader struct { + maxReadLen int + io.ReadCloser +} + +func (s *shortReader) Read(p []byte) (int, error) { + toRead := s.maxReadLen + if len(p) < toRead { + toRead = len(p) + } + + return s.ReadCloser.Read(p[:toRead]) +} + +type SerializationReaderUnitSuite struct { + tester.Suite +} + +func TestSerializationReaderUnitSuite(t *testing.T) { + suite.Run(t, &SerializationReaderUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader() { + baseData := []byte("hello world") + + table := []struct { + name string + format readers.SerializationFormat + inputReaders []io.ReadCloser + + expectErr require.ErrorAssertionFunc + expectData []byte + }{ + { + name: "DeletedInFlight NoVersion NoReaders", + format: readers.SerializationFormat{ + DelInFlight: true, + }, + expectErr: require.NoError, + expectData: []byte{0x80, 0x0, 0x0, 0x0}, + }, + { + name: "DeletedInFlight NoReaders", + format: readers.SerializationFormat{ + Version: 42, + DelInFlight: true, + }, + expectErr: require.NoError, + expectData: []byte{0x80, 0x0, 0x0, 42}, + }, + { + name: "NoVersion NoReaders", + expectErr: require.NoError, + expectData: []byte{0x00, 0x0, 0x0, 0x0}, + }, + { + name: "NoReaders", + format: readers.SerializationFormat{ + Version: 42, + }, + expectErr: require.NoError, + expectData: []byte{0x00, 0x0, 0x0, 42}, + }, + { + name: "SingleReader", + format: readers.SerializationFormat{ + Version: 42, + }, + inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))}, + expectErr: require.NoError, + expectData: append([]byte{0x00, 0x0, 0x0, 42}, baseData...), + }, + { + name: "MultipleReaders", + format: readers.SerializationFormat{ + Version: 42, + }, + inputReaders: []io.ReadCloser{ + io.NopCloser(bytes.NewReader(baseData)), + io.NopCloser(bytes.NewReader(baseData)), + }, + expectErr: require.NoError, + expectData: append( + append([]byte{0x00, 0x0, 0x0, 42}, baseData...), + baseData...), + }, + // Uncomment if we expand the version to 32 bits. + //{ + // name: "VersionWithHighBitSet NoReaders Errors", + // format: readers.SerializationFormat{ + // Version: 0x80000000, + // }, + // expectErr: require.Error, + //}, + { + name: "DeletedInFlight SingleReader Errors", + format: readers.SerializationFormat{ + DelInFlight: true, + }, + inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))}, + expectErr: require.Error, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + r, err := readers.NewVersionedBackupReader( + test.format, + test.inputReaders...) + test.expectErr(t, err, "getting backup reader: %v", clues.ToCore(err)) + + if err != nil { + return + } + + defer func() { + err := r.Close() + assert.NoError(t, err, "closing reader: %v", clues.ToCore(err)) + }() + + buf, err := io.ReadAll(r) + require.NoError( + t, + err, + "reading serialized data: %v", + clues.ToCore(err)) + + // Need to use equal because output is order-sensitive. + assert.Equal(t, test.expectData, buf, "serialized data") + }) + } +} + +func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader_ShortReads() { + t := suite.T() + + baseData := []byte("hello world") + expectData := append( + append([]byte{0x00, 0x0, 0x0, 42}, baseData...), + baseData...) + + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData)), + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "getting backup reader: %v", clues.ToCore(err)) + + defer func() { + err := r.Close() + assert.NoError(t, err, "closing reader: %v", clues.ToCore(err)) + }() + + buf := make([]byte, len(expectData)) + r = &shortReader{ + maxReadLen: 3, + ReadCloser: r, + } + + for read := 0; ; { + n, err := r.Read(buf[read:]) + + read += n + if read >= len(buf) { + break + } + + require.NoError(t, err, "reading data: %v", clues.ToCore(err)) + } + + // Need to use equal because output is order-sensitive. + assert.Equal(t, expectData, buf, "serialized data") +} + +// TestRestoreSerializationReader checks that we can read previously serialized +// data. For simplicity, it uses the versionedBackupReader to generate the +// input. This should be relatively safe because the tests for +// versionedBackupReader do compare directly against serialized data. +func (suite *SerializationReaderUnitSuite) TestRestoreSerializationReader() { + baseData := []byte("hello world") + + table := []struct { + name string + inputReader func(*testing.T) io.ReadCloser + + expectErr require.ErrorAssertionFunc + expectVersion readers.SerializationVersion + expectDelInFlight bool + expectData []byte + }{ + { + name: "NoVersion NoReaders", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader(readers.SerializationFormat{}) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectData: []byte{}, + }, + { + name: "DeletedInFlight NoReaders", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{ + Version: 42, + DelInFlight: true, + }) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectDelInFlight: true, + expectData: []byte{}, + }, + { + name: "DeletedInFlight SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + // Need to specify the bytes manually because the backup reader won't + // allow creating something with the deleted flag and data. + return io.NopCloser(bytes.NewReader(append( + []byte{0x80, 0x0, 0x0, 42}, + baseData...))) + }, + expectErr: require.NoError, + expectVersion: 42, + expectDelInFlight: true, + expectData: baseData, + }, + { + name: "NoVersion SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{}, + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectData: baseData, + }, + { + name: "SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectData: baseData, + }, + { + name: "ShortReads SingleReader", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + r = &shortReader{ + maxReadLen: 3, + ReadCloser: r, + } + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectData: baseData, + }, + { + name: "MultipleReaders", + inputReader: func(t *testing.T) io.ReadCloser { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: 42}, + io.NopCloser(bytes.NewReader(baseData)), + io.NopCloser(bytes.NewReader(baseData))) + require.NoError(t, err, "making reader: %v", clues.ToCore(err)) + + return r + }, + expectErr: require.NoError, + expectVersion: 42, + expectData: append(slices.Clone(baseData), baseData...), + }, + { + name: "EmptyReader Errors", + inputReader: func(t *testing.T) io.ReadCloser { + return io.NopCloser(bytes.NewReader([]byte{})) + }, + expectErr: require.Error, + }, + { + name: "TruncatedVersion Errors", + inputReader: func(t *testing.T) io.ReadCloser { + return io.NopCloser(bytes.NewReader([]byte{0x80, 0x0})) + }, + expectErr: require.Error, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + r, err := readers.NewVersionedRestoreReader(test.inputReader(t)) + test.expectErr(t, err, "getting restore reader: %v", clues.ToCore(err)) + + if err != nil { + return + } + + defer func() { + err := r.Close() + assert.NoError(t, err, "closing reader: %v", clues.ToCore(err)) + }() + + assert.Equal( + t, + test.expectVersion, + r.Format().Version, + "version") + assert.Equal( + t, + test.expectDelInFlight, + r.Format().DelInFlight, + "deleted in flight") + + buf, err := io.ReadAll(r) + require.NoError(t, err, "reading serialized data: %v", clues.ToCore(err)) + + // Need to use equal because output is order-sensitive. + assert.Equal(t, test.expectData, buf, "serialized data") + }) + } +} From 2fe0e8643e39c59a880d68ad4c0045b52dd23f39 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Fri, 29 Sep 2023 10:41:46 -0700 Subject: [PATCH 13/26] Add collection wrappers for serialization format (#4408) Add a few collection wrappers that help inject/remove serialization format since the handling of that is still split across layers. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #4328 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/data/mock/collection.go | 108 +++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/src/internal/data/mock/collection.go b/src/internal/data/mock/collection.go index 6fd461db6..39a974e36 100644 --- a/src/internal/data/mock/collection.go +++ b/src/internal/data/mock/collection.go @@ -3,8 +3,13 @@ package mock import ( "context" "io" + "testing" "time" + "github.com/alcionai/clues" + "github.com/stretchr/testify/require" + + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" @@ -163,3 +168,106 @@ func (rc RestoreCollection) FetchItemByName( return res, nil } + +var ( + _ data.BackupCollection = &versionedBackupCollection{} + _ data.RestoreCollection = &unversionedRestoreCollection{} + _ data.Item = &itemWrapper{} +) + +type itemWrapper struct { + data.Item + reader io.ReadCloser +} + +func (i *itemWrapper) ToReader() io.ReadCloser { + return i.reader +} + +func NewUnversionedRestoreCollection( + t *testing.T, + col data.RestoreCollection, +) *unversionedRestoreCollection { + return &unversionedRestoreCollection{ + RestoreCollection: col, + t: t, + } +} + +// unversionedRestoreCollection strips out version format headers on all items. +// +// Wrap data.RestoreCollections in this type if you don't need access to the +// version format header during tests and you know the item readers can't return +// an error. +type unversionedRestoreCollection struct { + data.RestoreCollection + t *testing.T +} + +func (c *unversionedRestoreCollection) Items( + ctx context.Context, + errs *fault.Bus, +) <-chan data.Item { + res := make(chan data.Item) + go func() { + defer close(res) + + for item := range c.RestoreCollection.Items(ctx, errs) { + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(c.t, err, clues.ToCore(err)) + + res <- &itemWrapper{ + Item: item, + reader: r, + } + } + }() + + return res +} + +func NewVersionedBackupCollection( + t *testing.T, + col data.BackupCollection, +) *versionedBackupCollection { + return &versionedBackupCollection{ + BackupCollection: col, + t: t, + } +} + +// versionedBackupCollection injects basic version information on all items. +// +// Wrap data.BackupCollections in this type if you don't need to explicitly set +// the version format header during tests, aren't trying to check reader errors +// cases, and aren't populating backup details. +type versionedBackupCollection struct { + data.BackupCollection + t *testing.T +} + +func (c *versionedBackupCollection) Items( + ctx context.Context, + errs *fault.Bus, +) <-chan data.Item { + res := make(chan data.Item) + go func() { + defer close(res) + + for item := range c.BackupCollection.Items(ctx, errs) { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + }, + item.ToReader()) + require.NoError(c.t, err, clues.ToCore(err)) + + res <- &itemWrapper{ + Item: item, + reader: r, + } + } + }() + + return res +} From 8e080f83b7df21fbb3ad55de05909539cac7532d Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Fri, 29 Sep 2023 11:08:08 -0700 Subject: [PATCH 14/26] Fix restore backup tests (#4407) Exchange tests inadvertently got disabled since it wasn't finding path matches for returned BackupCollections. This switches to using LocationPath which does allow for matching Most contacts tests are disabled since restore doesn't support nested folders --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/m365/backup_test.go | 3 +- src/internal/m365/controller_test.go | 173 ++++++++++++++++----------- src/internal/m365/helper_test.go | 43 ++----- src/internal/m365/stub/stub.go | 36 +++--- 4 files changed, 134 insertions(+), 121 deletions(-) diff --git a/src/internal/m365/backup_test.go b/src/internal/m365/backup_test.go index acaa6036b..88708aa14 100644 --- a/src/internal/m365/backup_test.go +++ b/src/internal/m365/backup_test.go @@ -458,9 +458,8 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() { for item := range collection.Items(ctx, fault.New(true)) { t.Log("File: " + item.ID()) - bs, err := io.ReadAll(item.ToReader()) + _, err := io.ReadAll(item.ToReader()) require.NoError(t, err, clues.ToCore(err)) - t.Log(string(bs)) } } } diff --git a/src/internal/m365/controller_test.go b/src/internal/m365/controller_test.go index 3f535880a..d95a56c9f 100644 --- a/src/internal/m365/controller_test.go +++ b/src/internal/m365/controller_test.go @@ -861,7 +861,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() { }, }, { - name: "MultipleContactsSingleFolder", + name: "MultipleContactsInRestoreFolder", service: path.ExchangeService, collections: []stub.ColInfo{ { @@ -887,49 +887,77 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() { }, }, }, - { - name: "MultipleContactsMultipleFolders", - service: path.ExchangeService, - collections: []stub.ColInfo{ - { - PathElements: []string{"Work"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID", - Data: exchMock.ContactBytes("Ghimley"), - LookupKey: "Ghimley", - }, - { - Name: "someencodeditemID2", - Data: exchMock.ContactBytes("Irgot"), - LookupKey: "Irgot", - }, - { - Name: "someencodeditemID3", - Data: exchMock.ContactBytes("Jannes"), - LookupKey: "Jannes", - }, - }, - }, - { - PathElements: []string{"Personal"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID4", - Data: exchMock.ContactBytes("Argon"), - LookupKey: "Argon", - }, - { - Name: "someencodeditemID5", - Data: exchMock.ContactBytes("Bernard"), - LookupKey: "Bernard", - }, - }, - }, - }, - }, + // TODO(ashmrtn): Re-enable when we can restore contacts to nested folders. + //{ + // name: "MultipleContactsSingleFolder", + // service: path.ExchangeService, + // collections: []stub.ColInfo{ + // { + // PathElements: []string{"Contacts"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID", + // Data: exchMock.ContactBytes("Ghimley"), + // LookupKey: "Ghimley", + // }, + // { + // Name: "someencodeditemID2", + // Data: exchMock.ContactBytes("Irgot"), + // LookupKey: "Irgot", + // }, + // { + // Name: "someencodeditemID3", + // Data: exchMock.ContactBytes("Jannes"), + // LookupKey: "Jannes", + // }, + // }, + // }, + // }, + //}, + //{ + // name: "MultipleContactsMultipleFolders", + // service: path.ExchangeService, + // collections: []stub.ColInfo{ + // { + // PathElements: []string{"Work"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID", + // Data: exchMock.ContactBytes("Ghimley"), + // LookupKey: "Ghimley", + // }, + // { + // Name: "someencodeditemID2", + // Data: exchMock.ContactBytes("Irgot"), + // LookupKey: "Irgot", + // }, + // { + // Name: "someencodeditemID3", + // Data: exchMock.ContactBytes("Jannes"), + // LookupKey: "Jannes", + // }, + // }, + // }, + // { + // PathElements: []string{"Personal"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID4", + // Data: exchMock.ContactBytes("Argon"), + // LookupKey: "Argon", + // }, + // { + // Name: "someencodeditemID5", + // Data: exchMock.ContactBytes("Bernard"), + // LookupKey: "Bernard", + // }, + // }, + // }, + // }, + //}, // { // name: "MultipleEventsSingleCalendar", // service: path.ExchangeService, @@ -1017,34 +1045,35 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() { func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() { table := []restoreBackupInfo{ - { - name: "Contacts", - service: path.ExchangeService, - collections: []stub.ColInfo{ - { - PathElements: []string{"Work"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID", - Data: exchMock.ContactBytes("Ghimley"), - LookupKey: "Ghimley", - }, - }, - }, - { - PathElements: []string{"Personal"}, - Category: path.ContactsCategory, - Items: []stub.ItemInfo{ - { - Name: "someencodeditemID2", - Data: exchMock.ContactBytes("Irgot"), - LookupKey: "Irgot", - }, - }, - }, - }, - }, + // TODO(ashmrtn): Re-enable when we can restore contacts to nested folders. + //{ + // name: "Contacts", + // service: path.ExchangeService, + // collections: []stub.ColInfo{ + // { + // PathElements: []string{"Work"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID", + // Data: exchMock.ContactBytes("Ghimley"), + // LookupKey: "Ghimley", + // }, + // }, + // }, + // { + // PathElements: []string{"Personal"}, + // Category: path.ContactsCategory, + // Items: []stub.ItemInfo{ + // { + // Name: "someencodeditemID2", + // Data: exchMock.ContactBytes("Irgot"), + // LookupKey: "Irgot", + // }, + // }, + // }, + // }, + //}, // { // name: "Events", // service: path.ExchangeService, diff --git a/src/internal/m365/helper_test.go b/src/internal/m365/helper_test.go index b875f7c91..aee2c11bb 100644 --- a/src/internal/m365/helper_test.go +++ b/src/internal/m365/helper_test.go @@ -919,30 +919,9 @@ func checkHasCollections( continue } - fp := g.FullPath() loc := g.(data.LocationPather).LocationPath() - if fp.Service() == path.OneDriveService || - (fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) { - dp, err := path.ToDrivePath(fp) - if !assert.NoError(t, err, clues.ToCore(err)) { - continue - } - - loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...) - } - - p, err := loc.ToDataLayerPath( - fp.Tenant(), - fp.ProtectedResource(), - fp.Service(), - fp.Category(), - false) - if !assert.NoError(t, err, clues.ToCore(err)) { - continue - } - - gotNames = append(gotNames, p.String()) + gotNames = append(gotNames, loc.String()) } assert.ElementsMatch(t, expectedNames, gotNames, "returned collections") @@ -963,14 +942,18 @@ func checkCollections( for _, returned := range got { var ( - hasItems bool - service = returned.FullPath().Service() - category = returned.FullPath().Category() - expectedColData = expected[returned.FullPath().String()] - folders = returned.FullPath().Elements() - rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location + expectedColDataByLoc map[string][]byte + hasItems bool + service = returned.FullPath().Service() + category = returned.FullPath().Category() + folders = returned.FullPath().Elements() + rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location ) + if p, ok := returned.(data.LocationPather); ok { + expectedColDataByLoc = expected[p.LocationPath().String()] + } + // Need to iterate through all items even if we don't expect to find a match // because otherwise we'll deadlock waiting for the status. Unexpected or // missing collection paths will be reported by checkHasCollections. @@ -990,14 +973,14 @@ func checkCollections( hasItems = true gotItems++ - if expectedColData == nil { + if expectedColDataByLoc == nil { continue } if !compareItem( t, returned.FullPath(), - expectedColData, + expectedColDataByLoc, service, category, item, diff --git a/src/internal/m365/stub/stub.go b/src/internal/m365/stub/stub.go index 49f27716c..b0c0104a1 100644 --- a/src/internal/m365/stub/stub.go +++ b/src/internal/m365/stub/stub.go @@ -4,6 +4,7 @@ import ( "bytes" "io" + "github.com/alcionai/clues" "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/data" @@ -163,28 +164,29 @@ func CollectionsForInfo( func backupOutputPathFromRestore( restoreCfg control.RestoreConfig, inputPath path.Path, -) (path.Path, error) { +) (*path.Builder, error) { base := []string{restoreCfg.Location} + folders := inputPath.Folders() + switch inputPath.Service() { // OneDrive has leading information like the drive ID. - if inputPath.Service() == path.OneDriveService || inputPath.Service() == path.SharePointService { - folders := inputPath.Folders() - base = append(append([]string{}, folders[:3]...), restoreCfg.Location) + case path.OneDriveService, path.SharePointService: + p, err := path.ToDrivePath(inputPath) + if err != nil { + return nil, clues.Stack(err) + } - if len(folders) > 3 { - base = append(base, folders[3:]...) + // Remove driveID, root, etc. + folders = p.Folders + // Re-add root, but it needs to be in front of the restore folder. + base = append([]string{p.Root}, base...) + + // Currently contacts restore doesn't have nested folders. + case path.ExchangeService: + if inputPath.Category() == path.ContactsCategory { + folders = nil } } - if inputPath.Service() == path.ExchangeService && inputPath.Category() == path.EmailCategory { - base = append(base, inputPath.Folders()...) - } - - return path.Build( - inputPath.Tenant(), - inputPath.ProtectedResource(), - inputPath.Service(), - inputPath.Category(), - false, - base...) + return path.Builder{}.Append(append(base, folders...)...), nil } From f0ccf35b5c8b1ec4f7085ae2a16816ead6d0da47 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Fri, 29 Sep 2023 13:16:01 -0700 Subject: [PATCH 15/26] Inject versions in generic item (#4409) Leverage the generic item struct to inject serialization format information for all items Unwires the old code that injected versions in kopia wrapper but leaves some code in the wrapper to strip out the serialization format during restore Future PRs should move the process of pulling out serialization format to individual services Viewing by commit may make review easier --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #4328 #### Test Plan - [x] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- .../common/readers/serialization_version.go | 10 +- src/internal/data/item.go | 65 ++++--- src/internal/data/item_test.go | 71 +++++-- src/internal/kopia/data_collection.go | 37 +++- src/internal/kopia/data_collection_test.go | 75 +++++--- src/internal/kopia/merge_collection_test.go | 63 ++++-- src/internal/kopia/upload.go | 6 +- src/internal/kopia/upload_test.go | 50 ++--- src/internal/kopia/wrapper.go | 5 +- src/internal/kopia/wrapper_test.go | 30 +-- .../m365/collection/drive/collection.go | 17 +- .../m365/collection/drive/collection_test.go | 27 ++- .../m365/collection/drive/collections_test.go | 13 +- .../m365/collection/exchange/backup_test.go | 32 ++- .../m365/collection/exchange/collection.go | 13 +- .../collection/exchange/collection_test.go | 182 ++++++++++++------ .../m365/collection/groups/collection.go | 28 ++- .../m365/collection/groups/collection_test.go | 12 +- .../m365/collection/site/collection.go | 16 +- .../m365/collection/site/collection_test.go | 9 +- .../m365/graph/metadata_collection.go | 13 +- .../m365/graph/metadata_collection_test.go | 30 ++- src/internal/m365/helper_test.go | 52 ++++- .../m365/service/sharepoint/api/pages_test.go | 3 +- src/internal/streamstore/streamstore.go | 13 +- 25 files changed, 606 insertions(+), 266 deletions(-) diff --git a/src/internal/common/readers/serialization_version.go b/src/internal/common/readers/serialization_version.go index a6713f959..f203c3233 100644 --- a/src/internal/common/readers/serialization_version.go +++ b/src/internal/common/readers/serialization_version.go @@ -38,8 +38,8 @@ type SerializationVersion uint16 const DefaultSerializationVersion SerializationVersion = 1 const ( - versionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0))) - delInFlightMask persistedSerializationVersion = 1 << ((versionFormatSize * 8) - 1) + VersionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0))) + delInFlightMask persistedSerializationVersion = 1 << ((VersionFormatSize * 8) - 1) ) // SerializationFormat is a struct describing serialization format versions and @@ -68,7 +68,7 @@ func NewVersionedBackupReader( formattedVersion |= delInFlightMask } - formattedBuf := make([]byte, versionFormatSize) + formattedBuf := make([]byte, VersionFormatSize) binary.BigEndian.PutUint32(formattedBuf, formattedVersion) versionReader := io.NopCloser(bytes.NewReader(formattedBuf)) @@ -139,10 +139,10 @@ func (vbr *versionedBackupReader) Close() error { func NewVersionedRestoreReader( baseReader io.ReadCloser, ) (*VersionedRestoreReader, error) { - versionBuf := make([]byte, versionFormatSize) + versionBuf := make([]byte, VersionFormatSize) // Loop to account for the unlikely case where we get a short read. - for read := 0; read < versionFormatSize; { + for read := 0; read < VersionFormatSize; { n, err := baseReader.Read(versionBuf[read:]) if err != nil { return nil, clues.Wrap(err, "reading serialization version") diff --git a/src/internal/data/item.go b/src/internal/data/item.go index 6d316ad6b..c6cb064e7 100644 --- a/src/internal/data/item.go +++ b/src/internal/data/item.go @@ -1,7 +1,6 @@ package data import ( - "bytes" "context" "io" "sync" @@ -10,6 +9,7 @@ import ( "github.com/alcionai/clues" "github.com/spatialcurrent/go-lazy/pkg/lazy" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" @@ -46,12 +46,19 @@ func NewUnindexedPrefetchedItem( reader io.ReadCloser, itemID string, modTime time.Time, -) Item { +) (*unindexedPrefetchedItem, error) { + r, err := readers.NewVersionedBackupReader( + readers.SerializationFormat{Version: readers.DefaultSerializationVersion}, + reader) + if err != nil { + return nil, clues.Stack(err) + } + return &unindexedPrefetchedItem{ id: itemID, - reader: reader, + reader: r, modTime: modTime, - } + }, nil } // unindexedPrefetchedItem represents a single item retrieved from the remote @@ -92,15 +99,16 @@ func NewPrefetchedItem( reader io.ReadCloser, itemID string, info details.ItemInfo, -) Item { - return &prefetchedItem{ - unindexedPrefetchedItem: unindexedPrefetchedItem{ - id: itemID, - reader: reader, - modTime: info.Modified(), - }, - info: info, +) (*prefetchedItem, error) { + inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified()) + if err != nil { + return nil, clues.Stack(err) } + + return &prefetchedItem{ + unindexedPrefetchedItem: inner, + info: info, + }, nil } // prefetchedItem represents a single item retrieved from the remote service. @@ -108,7 +116,7 @@ func NewPrefetchedItem( // This item implements ItemInfo so it should be used for things that need to // appear in backup details. type prefetchedItem struct { - unindexedPrefetchedItem + *unindexedPrefetchedItem info details.ItemInfo } @@ -129,7 +137,7 @@ func NewUnindexedLazyItem( itemID string, modTime time.Time, errs *fault.Bus, -) Item { +) *unindexedLazyItem { return &unindexedLazyItem{ ctx: ctx, id: itemID, @@ -182,6 +190,10 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser { return nil, clues.Stack(err) } + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + // If an item was deleted then return an empty file so we don't fail the // backup and return a sentinel error when asked for ItemInfo so we don't // display the item in the backup. @@ -193,13 +205,17 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser { logger.Ctx(i.ctx).Info("item not found") i.delInFlight = true + format.DelInFlight = true + r, err := readers.NewVersionedBackupReader(format) - return io.NopCloser(bytes.NewReader([]byte{})), nil + return r, clues.Stack(err).OrNil() } i.info = info - return reader, nil + r, err := readers.NewVersionedBackupReader(format, reader) + + return r, clues.Stack(err).OrNil() }) } @@ -217,15 +233,14 @@ func NewLazyItem( itemID string, modTime time.Time, errs *fault.Bus, -) Item { +) *lazyItem { return &lazyItem{ - unindexedLazyItem: unindexedLazyItem{ - ctx: ctx, - id: itemID, - itemGetter: itemGetter, - modTime: modTime, - errs: errs, - }, + unindexedLazyItem: NewUnindexedLazyItem( + ctx, + itemGetter, + itemID, + modTime, + errs), } } @@ -236,7 +251,7 @@ func NewLazyItem( // This item implements ItemInfo so it should be used for things that need to // appear in backup details. type lazyItem struct { - unindexedLazyItem + *unindexedLazyItem } func (i *lazyItem) Info() (details.ItemInfo, error) { diff --git a/src/internal/data/item_test.go b/src/internal/data/item_test.go index 9484613e4..f0c7e9009 100644 --- a/src/internal/data/item_test.go +++ b/src/internal/data/item_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" @@ -50,11 +51,15 @@ func TestItemUnitSuite(t *testing.T) { } func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() { - prefetch := data.NewUnindexedPrefetchedItem( + prefetch, err := data.NewUnindexedPrefetchedItem( io.NopCloser(bytes.NewReader([]byte{})), "foo", time.Time{}) - _, ok := prefetch.(data.ItemInfo) + require.NoError(suite.T(), err, clues.ToCore(err)) + + var item data.Item = prefetch + + _, ok := item.(data.ItemInfo) assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()") } @@ -70,7 +75,10 @@ func (suite *ItemUnitSuite) TestUnindexedLazyItem() { "foo", time.Time{}, fault.New(true)) - _, ok := lazy.(data.ItemInfo) + + var item data.Item = lazy + + _, ok := item.(data.ItemInfo) assert.False(t, ok, "unindexedLazyItem implements Info()") } @@ -140,18 +148,29 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() { suite.Run(test.name, func() { t := suite.T() - item := data.NewPrefetchedItem(test.reader, id, test.info) + item, err := data.NewPrefetchedItem(test.reader, id, test.info) + require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, id, item.ID(), "ID") assert.False(t, item.Deleted(), "deleted") assert.Equal( t, test.info.Modified(), - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") - readData, err := io.ReadAll(item.ToReader()) - test.readErr(t, err, clues.ToCore(err), "read error") + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, "version error: %v", clues.ToCore(err)) + + if err != nil { + return + } + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) + test.readErr(t, err, "read error: %v", clues.ToCore(err)) assert.Equal(t, test.expectData, readData, "read data") }) } @@ -194,6 +213,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { table := []struct { name string mid *mockItemDataGetter + versionErr assert.ErrorAssertionFunc readErr assert.ErrorAssertionFunc infoErr assert.ErrorAssertionFunc expectData []byte @@ -205,6 +225,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { reader: io.NopCloser(bytes.NewReader([]byte{})), info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.NoError, infoErr: assert.NoError, expectData: []byte{}, @@ -215,6 +236,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { reader: io.NopCloser(bytes.NewReader(baseData)), info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.NoError, infoErr: assert.NoError, expectData: baseData, @@ -225,6 +247,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { reader: io.NopCloser(bytes.NewReader(baseData)), info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.NoError, infoErr: assert.NoError, expectData: baseData, @@ -234,6 +257,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { mid: &mockItemDataGetter{ err: assert.AnError, }, + versionErr: assert.Error, readErr: assert.Error, infoErr: assert.Error, expectData: []byte{}, @@ -249,6 +273,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { }, info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}}, }, + versionErr: assert.NoError, readErr: assert.Error, infoErr: assert.NoError, expectData: baseData[:5], @@ -278,15 +303,25 @@ func (suite *ItemUnitSuite) TestLazyItem() { assert.Equal( t, now, - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") // Read data to execute lazy reader. - readData, err := io.ReadAll(item.ToReader()) + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + test.versionErr(t, err, "version error: %v", clues.ToCore(err)) + + if err != nil { + return + } + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) test.readErr(t, err, clues.ToCore(err), "read error") assert.Equal(t, test.expectData, readData, "read data") - _, err = item.(data.ItemInfo).Info() + _, err = item.Info() test.infoErr(t, err, "Info(): %v", clues.ToCore(err)) e := errs.Errors() @@ -326,15 +361,21 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() { assert.Equal( t, now, - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") // Read data to execute lazy reader. - readData, err := io.ReadAll(item.ToReader()) + r, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, "version error: %v", clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.True(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) require.NoError(t, err, clues.ToCore(err), "read error") assert.Empty(t, readData, "read data") - _, err = item.(data.ItemInfo).Info() + _, err = item.Info() assert.ErrorIs(t, err, data.ErrNotFound, "Info() error") e := errs.Errors() @@ -366,9 +407,9 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() { assert.Equal( t, now, - item.(data.ItemModTime).ModTime(), + item.ModTime(), "mod time") - _, err := item.(data.ItemInfo).Info() + _, err := item.Info() assert.Error(t, err, "Info() error") } diff --git a/src/internal/kopia/data_collection.go b/src/internal/kopia/data_collection.go index 319914f1a..c5899afdf 100644 --- a/src/internal/kopia/data_collection.go +++ b/src/internal/kopia/data_collection.go @@ -7,6 +7,7 @@ import ( "github.com/alcionai/clues" "github.com/kopia/kopia/fs" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" @@ -16,6 +17,7 @@ import ( var ( _ data.RestoreCollection = &kopiaDataCollection{} _ data.Item = &kopiaDataStream{} + _ data.ItemSize = &kopiaDataStream{} ) type kopiaDataCollection struct { @@ -23,7 +25,7 @@ type kopiaDataCollection struct { dir fs.Directory items []string counter ByteCounter - expectedVersion uint32 + expectedVersion readers.SerializationVersion } func (kdc *kopiaDataCollection) Items( @@ -102,7 +104,7 @@ func (kdc kopiaDataCollection) FetchItemByName( return nil, clues.New("object is not a file").WithClues(ctx) } - size := f.Size() - int64(versionSize) + size := f.Size() - int64(readers.VersionFormatSize) if size < 0 { logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size) @@ -118,13 +120,32 @@ func (kdc kopiaDataCollection) FetchItemByName( return nil, clues.Wrap(err, "opening file").WithClues(ctx) } + // TODO(ashmrtn): Remove this when individual services implement checks for + // version and deleted items. + rr, err := readers.NewVersionedRestoreReader(r) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + if rr.Format().Version != kdc.expectedVersion { + return nil, clues.New("unexpected data format"). + WithClues(ctx). + With( + "read_version", rr.Format().Version, + "expected_version", kdc.expectedVersion) + } + + // This is a conservative check, but we shouldn't be seeing items that were + // deleted in flight during restores because there's no way to select them. + if rr.Format().DelInFlight { + return nil, clues.New("selected item marked as deleted in flight"). + WithClues(ctx) + } + return &kopiaDataStream{ - id: name, - reader: &restoreStreamReader{ - ReadCloser: r, - expectedVersion: kdc.expectedVersion, - }, - size: size, + id: name, + reader: rr, + size: size, }, nil } diff --git a/src/internal/kopia/data_collection_test.go b/src/internal/kopia/data_collection_test.go index d587730ca..4b1b4a4b2 100644 --- a/src/internal/kopia/data_collection_test.go +++ b/src/internal/kopia/data_collection_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/tester" @@ -121,25 +122,35 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() { ) // Needs to be a function so the readers get refreshed each time. - getLayout := func() fs.Directory { + getLayout := func(t *testing.T) fs.Directory { + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(files[0].data))) + require.NoError(t, err, clues.ToCore(err)) + + r2, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(files[1].data))) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(files[0].uuid), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(files[0].data))), - size: int64(len(files[0].data) + versionSize), + r: r1, + size: int64(len(files[0].data) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(files[1].uuid), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(files[1].data))), - size: int64(len(files[1].data) + versionSize), + r: r2, + size: int64(len(files[1].data) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( @@ -224,10 +235,10 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() { } c := kopiaDataCollection{ - dir: getLayout(), + dir: getLayout(t), path: nil, items: items, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } var ( @@ -291,23 +302,34 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { // Needs to be a function so we can switch the serialization version as // needed. - getLayout := func(serVersion uint32) fs.Directory { + getLayout := func( + t *testing.T, + serVersion readers.SerializationVersion, + ) fs.Directory { + format := readers.SerializationFormat{Version: serVersion} + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader([]byte(noErrFileData)))) + require.NoError(t, err, clues.ToCore(err)) + + r2, err := readers.NewVersionedBackupReader( + format, + errReader.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(noErrFileName), nil), - r: newBackupStreamReader( - serVersion, - io.NopCloser(bytes.NewReader([]byte(noErrFileData)))), + r: r1, }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(errFileName), nil), - r: newBackupStreamReader( - serVersion, - errReader.ToReader()), + r: r2, }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( @@ -330,7 +352,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { table := []struct { name string inputName string - inputSerializationVersion uint32 + inputSerializationVersion readers.SerializationVersion expectedData []byte lookupErr assert.ErrorAssertionFunc readErr assert.ErrorAssertionFunc @@ -339,7 +361,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { { name: "FileFound_NoError", inputName: noErrFileName, - inputSerializationVersion: serializationVersion, + inputSerializationVersion: readers.DefaultSerializationVersion, expectedData: []byte(noErrFileData), lookupErr: assert.NoError, readErr: assert.NoError, @@ -347,21 +369,20 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { { name: "FileFound_ReadError", inputName: errFileName, - inputSerializationVersion: serializationVersion, + inputSerializationVersion: readers.DefaultSerializationVersion, lookupErr: assert.NoError, readErr: assert.Error, }, { name: "FileFound_VersionError", inputName: noErrFileName, - inputSerializationVersion: serializationVersion + 1, - lookupErr: assert.NoError, - readErr: assert.Error, + inputSerializationVersion: readers.DefaultSerializationVersion + 1, + lookupErr: assert.Error, }, { name: "FileNotFound", inputName: "foo", - inputSerializationVersion: serializationVersion + 1, + inputSerializationVersion: readers.DefaultSerializationVersion + 1, lookupErr: assert.Error, notFoundErr: true, }, @@ -373,14 +394,14 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() { ctx, flush := tester.NewContext(t) defer flush() - root := getLayout(test.inputSerializationVersion) + root := getLayout(t, test.inputSerializationVersion) c := &i64counter{} col := &kopiaDataCollection{ path: pth, dir: root, counter: c, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } s, err := col.FetchItemByName(ctx, test.inputName) diff --git a/src/internal/kopia/merge_collection_test.go b/src/internal/kopia/merge_collection_test.go index f89c2dd95..fefbfbb15 100644 --- a/src/internal/kopia/merge_collection_test.go +++ b/src/internal/kopia/merge_collection_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/service/exchange/mock" "github.com/alcionai/corso/src/internal/tester" @@ -150,20 +151,27 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() { require.NoError(suite.T(), err, clues.ToCore(err)) // Needs to be a function so the readers get refreshed each time. - layouts := []func() fs.Directory{ + layouts := []func(t *testing.T) fs.Directory{ // Has the following; // - file1: data[0] // - errOpen: (error opening file) - func() fs.Directory { + func(t *testing.T) fs.Directory { + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData1))) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileName1), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData1))), - size: int64(len(fileData1) + versionSize), + r: r1, + size: int64(len(fileData1) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( @@ -178,34 +186,47 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() { // - file1: data[1] // - file2: data[0] // - errOpen: data[2] - func() fs.Directory { + func(t *testing.T) fs.Directory { + format := readers.SerializationFormat{ + Version: readers.DefaultSerializationVersion, + } + + r1, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData2))) + require.NoError(t, err, clues.ToCore(err)) + + r2, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData1))) + require.NoError(t, err, clues.ToCore(err)) + + r3, err := readers.NewVersionedBackupReader( + format, + io.NopCloser(bytes.NewReader(fileData3))) + require.NoError(t, err, clues.ToCore(err)) + return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{ &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileName1), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData2))), - size: int64(len(fileData2) + versionSize), + r: r1, + size: int64(len(fileData2) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileName2), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData1))), - size: int64(len(fileData1) + versionSize), + r: r2, + size: int64(len(fileData1) + readers.VersionFormatSize), }, &mockFile{ StreamingFile: virtualfs.StreamingFileFromReader( encodeAsPath(fileOpenErrName), nil), - r: newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData3))), - size: int64(len(fileData3) + versionSize), + r: r3, + size: int64(len(fileData3) + readers.VersionFormatSize), }, }) }, @@ -257,9 +278,9 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() { for i, layout := range layouts { col := &kopiaDataCollection{ path: pth, - dir: layout(), + dir: layout(t), counter: c, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } err := dc.addCollection(colPaths[i], col) diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index bc7a1f034..c1e1351e5 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -13,7 +13,6 @@ import ( "sync" "sync/atomic" "time" - "unsafe" "github.com/alcionai/clues" "github.com/kopia/kopia/fs" @@ -24,6 +23,7 @@ import ( "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/m365/graph" @@ -37,7 +37,7 @@ import ( const maxInflateTraversalDepth = 500 -var versionSize = int(unsafe.Sizeof(serializationVersion)) +var versionSize = readers.VersionFormatSize func newBackupStreamReader(version uint32, reader io.ReadCloser) *backupStreamReader { buf := make([]byte, versionSize) @@ -436,7 +436,7 @@ func collectionEntries( entry := virtualfs.StreamingFileWithModTimeFromReader( encodedName, modTime, - newBackupStreamReader(serializationVersion, e.ToReader())) + e.ToReader()) err = ctr(ctx, entry) if err != nil { diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index fd74cd9fa..c88da8af0 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -124,12 +124,6 @@ func expectFileData( return } - // Need to wrap with a restore stream reader to remove the version. - r = &restoreStreamReader{ - ReadCloser: io.NopCloser(r), - expectedVersion: serializationVersion, - } - got, err := io.ReadAll(r) if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) { return @@ -2420,9 +2414,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt encodeElements(inboxFileName1)[0], time.Time{}, // Wrap with a backup reader so it gets the version injected. - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(inboxFileData1v2)))), + io.NopCloser(bytes.NewReader(inboxFileData1v2))), }), }), virtualfs.NewStaticDirectory( @@ -2582,9 +2574,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt virtualfs.StreamingFileWithModTimeFromReader( encodeElements(inboxFileName1)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(inboxFileData1)))), + io.NopCloser(bytes.NewReader(inboxFileData1))), }), }), virtualfs.NewStaticDirectory( @@ -2596,9 +2586,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt virtualfs.StreamingFileWithModTimeFromReader( encodeElements(contactsFileName1)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(contactsFileData1)))), + io.NopCloser(bytes.NewReader(contactsFileData1))), }), }), }) @@ -2817,15 +2805,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName5)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData5)))), + io.NopCloser(bytes.NewReader(fileData5))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName6)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData6)))), + io.NopCloser(bytes.NewReader(fileData6))), }) counters[folderID3] = count @@ -2835,15 +2819,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName3)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData3)))), + io.NopCloser(bytes.NewReader(fileData3))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName4)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData4)))), + io.NopCloser(bytes.NewReader(fileData4))), folder, }) counters[folderID2] = count @@ -2859,15 +2839,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName1)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData1)))), + io.NopCloser(bytes.NewReader(fileData1))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName2)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData2)))), + io.NopCloser(bytes.NewReader(fileData2))), folder, folder4, }) @@ -2879,15 +2855,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName7)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData7)))), + io.NopCloser(bytes.NewReader(fileData7))), virtualfs.StreamingFileWithModTimeFromReader( encodeElements(fileName8)[0], time.Time{}, - newBackupStreamReader( - serializationVersion, - io.NopCloser(bytes.NewReader(fileData8)))), + io.NopCloser(bytes.NewReader(fileData8))), }) counters[folderID5] = count diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index 24e0708b5..10523de6c 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -18,6 +18,7 @@ import ( "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/common/prefixmatcher" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/observe" @@ -36,8 +37,6 @@ const ( // possibly corresponding to who is making the backup. corsoHost = "corso-host" corsoUser = "corso" - - serializationVersion uint32 = 1 ) // common manifest tags @@ -447,7 +446,7 @@ func loadDirsAndItems( dir: dir, items: dirItems.items, counter: bcounter, - expectedVersion: serializationVersion, + expectedVersion: readers.DefaultSerializationVersion, } if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil { diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 452bc4ffa..77721fc7b 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -1268,7 +1268,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { ctx, []identity.Reasoner{r}, nil, - []data.BackupCollection{dc1, dc2}, + []data.BackupCollection{ + dataMock.NewVersionedBackupCollection(t, dc1), + dataMock.NewVersionedBackupCollection(t, dc2), + }, nil, nil, true, @@ -1577,12 +1580,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { }) } - collections = append(collections, collection) + collections = append( + collections, + dataMock.NewVersionedBackupCollection(t, collection)) } r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) - stats, deets, _, err := suite.w.ConsumeBackupCollections( + // Other tests check basic things about deets so not doing that again here. + stats, _, _, err := suite.w.ConsumeBackupCollections( suite.ctx, []identity.Reasoner{r}, nil, @@ -1597,8 +1603,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { require.Equal(t, stats.TotalDirectoryCount, expectedDirs) require.Equal(t, stats.IgnoredErrorCount, 0) require.False(t, stats.Incomplete) - // 6 file and 2 folder entries. - assert.Len(t, deets.Details().Entries, expectedFiles+2) suite.snapshotID = manifest.ID(stats.SnapshotID) } @@ -1629,7 +1633,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludePrefix bool expectedCachedItems int expectedUncachedItems int - cols func() []data.BackupCollection + cols func(t *testing.T) []data.BackupCollection backupIDCheck require.ValueAssertionFunc restoreCheck assert.ErrorAssertionFunc }{ @@ -1638,7 +1642,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludeItem: true, expectedCachedItems: len(suite.filesByPath) - 1, expectedUncachedItems: 0, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { return nil }, backupIDCheck: require.NotEmpty, @@ -1650,7 +1654,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludePrefix: true, expectedCachedItems: len(suite.filesByPath) - 1, expectedUncachedItems: 0, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { return nil }, backupIDCheck: require.NotEmpty, @@ -1661,7 +1665,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { // No snapshot should be made since there were no changes. expectedCachedItems: 0, expectedUncachedItems: 0, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { return nil }, // Backup doesn't run. @@ -1671,7 +1675,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { name: "NoExcludeItemWithChanges", expectedCachedItems: len(suite.filesByPath), expectedUncachedItems: 1, - cols: func() []data.BackupCollection { + cols: func(t *testing.T) []data.BackupCollection { c := exchMock.NewCollection( suite.testPath1, suite.testPath1, @@ -1679,7 +1683,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { c.ColState = data.NotMovedState c.PrevPath = suite.testPath1 - return []data.BackupCollection{c} + return []data.BackupCollection{ + dataMock.NewVersionedBackupCollection(t, c), + } }, backupIDCheck: require.NotEmpty, restoreCheck: assert.NoError, @@ -1717,7 +1723,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { Manifest: man, Reasons: []identity.Reasoner{r}, }), - test.cols(), + test.cols(t), excluded, nil, true, diff --git a/src/internal/m365/collection/drive/collection.go b/src/internal/m365/collection/drive/collection.go index 19de8e0dc..423c43930 100644 --- a/src/internal/m365/collection/drive/collection.go +++ b/src/internal/m365/collection/drive/collection.go @@ -584,15 +584,24 @@ func (oc *Collection) streamDriveItem( return progReader, nil }) - // We wrap the reader with a lazy reader so that the progress bar is only - // initialized if the file is read. Since we're not actually lazily reading - // data just use the eager item implementation. - oc.data <- data.NewUnindexedPrefetchedItem( + storeItem, err := data.NewUnindexedPrefetchedItem( metaReader, metaFileName+metaSuffix, // Metadata file should always use the latest time as // permissions change does not update mod time. time.Now()) + if err != nil { + errs.AddRecoverable(ctx, clues.Stack(err). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return + } + + // We wrap the reader with a lazy reader so that the progress bar is only + // initialized if the file is read. Since we're not actually lazily reading + // data just use the eager item implementation. + oc.data <- storeItem // Item read successfully, add to collection if isFile { diff --git a/src/internal/m365/collection/drive/collection_test.go b/src/internal/m365/collection/drive/collection_test.go index b99a2484e..2e2f85160 100644 --- a/src/internal/m365/collection/drive/collection_test.go +++ b/src/internal/m365/collection/drive/collection_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata" @@ -256,7 +257,7 @@ func (suite *CollectionUnitSuite) TestCollection() { mt := readItem.(data.ItemModTime) assert.Equal(t, now, mt.ModTime()) - readData, err := io.ReadAll(readItem.ToReader()) + rr, err := readers.NewVersionedRestoreReader(readItem.ToReader()) test.expectErr(t, err) if err != nil { @@ -267,13 +268,25 @@ func (suite *CollectionUnitSuite) TestCollection() { return } + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + + readData, err := io.ReadAll(rr) + require.NoError(t, err, clues.ToCore(err)) + assert.Equal(t, stubItemContent, readData) readItemMeta := readItems[1] assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID()) + rr, err = readers.NewVersionedRestoreReader(readItemMeta.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + readMeta := metadata.Metadata{} - err = json.NewDecoder(readItemMeta.ToReader()).Decode(&readMeta) + err = json.NewDecoder(rr).Decode(&readMeta) require.NoError(t, err, clues.ToCore(err)) metaTD.AssertMetadataEqual(t, stubMeta, readMeta) @@ -485,12 +498,18 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime() for _, i := range readItems { if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) { - content, err := io.ReadAll(i.ToReader()) + rr, err := readers.NewVersionedRestoreReader(i.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + + content, err := io.ReadAll(rr) require.NoError(t, err, clues.ToCore(err)) require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content)) im, ok := i.(data.ItemModTime) - require.Equal(t, ok, true, "modtime interface") + require.True(t, ok, "modtime interface") require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time") } } diff --git a/src/internal/m365/collection/drive/collections_test.go b/src/internal/m365/collection/drive/collections_test.go index 1b50d074a..d0e33477f 100644 --- a/src/internal/m365/collection/drive/collections_test.go +++ b/src/internal/m365/collection/drive/collections_test.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/common/prefixmatcher" pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock" "github.com/alcionai/corso/src/internal/data" + dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/m365/graph" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" @@ -1114,7 +1115,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { func(*support.ControllerOperationStatus) {}) require.NoError(t, err, clues.ToCore(err)) - cols = append(cols, data.NoFetchRestoreCollection{Collection: mc}) + cols = append(cols, dataMock.NewUnversionedRestoreCollection( + t, + data.NoFetchRestoreCollection{Collection: mc})) } deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols) @@ -2211,7 +2214,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { func(*support.ControllerOperationStatus) {}) assert.NoError(t, err, "creating metadata collection", clues.ToCore(err)) - prevMetadata := []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: mc}} + prevMetadata := []data.RestoreCollection{ + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: mc}), + } errs := fault.New(true) delList := prefixmatcher.NewStringSetBuilder() @@ -2238,7 +2243,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { deltas, paths, _, err := deserializeMetadata( ctx, []data.RestoreCollection{ - data.NoFetchRestoreCollection{Collection: baseCol}, + dataMock.NewUnversionedRestoreCollection( + t, + data.NoFetchRestoreCollection{Collection: baseCol}), }) if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) { continue diff --git a/src/internal/m365/collection/exchange/backup_test.go b/src/internal/m365/collection/exchange/backup_test.go index 4b046fd47..bb6aad27c 100644 --- a/src/internal/m365/collection/exchange/backup_test.go +++ b/src/internal/m365/collection/exchange/backup_test.go @@ -15,7 +15,9 @@ import ( inMock "github.com/alcionai/corso/src/internal/common/idname/mock" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" + dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/operations/inject" @@ -322,7 +324,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { require.NoError(t, err, clues.ToCore(err)) cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{ - data.NoFetchRestoreCollection{Collection: coll}, + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: coll}), }) test.expectError(t, err, clues.ToCore(err)) @@ -591,7 +593,7 @@ func (suite *BackupIntgSuite) TestDelta() { require.NotNil(t, metadata, "collections contains a metadata collection") cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{ - data.NoFetchRestoreCollection{Collection: metadata}, + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: metadata}), }) require.NoError(t, err, clues.ToCore(err)) assert.True(t, canUsePreviousBackup, "can use previous backup") @@ -666,7 +668,12 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() { for stream := range streamChannel { buf := &bytes.Buffer{} - read, err := buf.ReadFrom(stream.ToReader()) + rr, err := readers.NewVersionedRestoreReader(stream.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + + read, err := buf.ReadFrom(rr) assert.NoError(t, err, clues.ToCore(err)) assert.NotZero(t, read) @@ -744,7 +751,13 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() { for stream := range edc.Items(ctx, fault.New(true)) { buf := &bytes.Buffer{} - read, err := buf.ReadFrom(stream.ToReader()) + + rr, err := readers.NewVersionedRestoreReader(stream.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + + read, err := buf.ReadFrom(rr) assert.NoError(t, err, clues.ToCore(err)) assert.NotZero(t, read) @@ -878,7 +891,12 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() { for item := range edc.Items(ctx, fault.New(true)) { buf := &bytes.Buffer{} - read, err := buf.ReadFrom(item.ToReader()) + rr, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + + read, err := buf.ReadFrom(rr) assert.NoError(t, err, clues.ToCore(err)) assert.NotZero(t, read) @@ -1198,7 +1216,9 @@ func checkMetadata( ) { catPaths, _, err := ParseMetadataCollections( ctx, - []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}}) + []data.RestoreCollection{ + dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: c}), + }) if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) { return } diff --git a/src/internal/m365/collection/exchange/collection.go b/src/internal/m365/collection/exchange/collection.go index 30f28672d..71b9bb01b 100644 --- a/src/internal/m365/collection/exchange/collection.go +++ b/src/internal/m365/collection/exchange/collection.go @@ -278,10 +278,21 @@ func (col *prefetchCollection) streamItems( return } - stream <- data.NewPrefetchedItem( + item, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(itemData)), id, details.ItemInfo{Exchange: info}) + if err != nil { + el.AddRecoverable( + ctx, + clues.Stack(err). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return + } + + stream <- item atomic.AddInt64(&success, 1) atomic.AddInt64(&totalBytes, info.Size) diff --git a/src/internal/m365/collection/exchange/collection_test.go b/src/internal/m365/collection/exchange/collection_test.go index 5e1665faa..f373bd1a5 100644 --- a/src/internal/m365/collection/exchange/collection_test.go +++ b/src/internal/m365/collection/exchange/collection_test.go @@ -17,6 +17,7 @@ import ( "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/exchange/mock" "github.com/alcionai/corso/src/internal/m365/graph" @@ -55,13 +56,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() { suite.Run(test.name, func() { t := suite.T() - ed := data.NewPrefetchedItem( + ed, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(test.readData)), "itemID", details.ItemInfo{}) + require.NoError(t, err, clues.ToCore(err)) + + r, err := readers.NewVersionedRestoreReader(ed.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) buf := &bytes.Buffer{} - _, err := buf.ReadFrom(ed.ToReader()) + _, err = buf.ReadFrom(r) assert.NoError(t, err, "reading data: %v", clues.ToCore(err)) assert.Equal(t, test.readData, buf.Bytes(), "read data") assert.Equal(t, "itemID", ed.ID(), "item ID") @@ -493,11 +501,11 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() { time.Now(), fault.New(true)) - _, err := li.(data.ItemInfo).Info() + _, err := li.Info() assert.Error(suite.T(), err, "Info without reading data should error") } -func (suite *CollectionUnitSuite) TestLazyItem() { +func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() { var ( parentPath = "inbox/private/silly cats" now = time.Now() @@ -505,44 +513,19 @@ func (suite *CollectionUnitSuite) TestLazyItem() { table := []struct { name string - modTime time.Time getErr error serializeErr error - expectModTime time.Time expectReadErrType error - dataCheck assert.ValueAssertionFunc - expectInfoErr bool - expectInfoErrType error }{ - { - name: "ReturnsEmptyReaderOnDeletedInFlight", - modTime: now, - getErr: graph.ErrDeletedInFlight, - dataCheck: assert.Empty, - expectInfoErr: true, - expectInfoErrType: data.ErrNotFound, - }, - { - name: "ReturnsValidReaderAndInfo", - modTime: now, - dataCheck: assert.NotEmpty, - expectModTime: now, - }, { name: "ReturnsErrorOnGenericGetError", - modTime: now, getErr: assert.AnError, expectReadErrType: assert.AnError, - dataCheck: assert.Empty, - expectInfoErr: true, }, { name: "ReturnsErrorOnGenericSerializeError", - modTime: now, serializeErr: assert.AnError, expectReadErrType: assert.AnError, - dataCheck: assert.Empty, - expectInfoErr: true, }, } @@ -575,47 +558,128 @@ func (suite *CollectionUnitSuite) TestLazyItem() { userID: "userID", itemID: "itemID", getter: getter, - modTime: test.modTime, + modTime: now, immutableIDs: false, parentPath: parentPath, }, "itemID", - test.modTime, + now, fault.New(true)) assert.False(t, li.Deleted(), "item shouldn't be marked deleted") - assert.Equal( - t, - test.modTime, - li.(data.ItemModTime).ModTime(), - "item mod time") + assert.Equal(t, now, li.ModTime(), "item mod time") - readData, err := io.ReadAll(li.ToReader()) - if test.expectReadErrType == nil { - assert.NoError(t, err, "reading item data: %v", clues.ToCore(err)) - } else { - assert.ErrorIs(t, err, test.expectReadErrType, "read error") - } - - test.dataCheck(t, readData, "read item data") - - info, err := li.(data.ItemInfo).Info() - - // Didn't expect an error getting info, it should be valid. - if !test.expectInfoErr { - assert.NoError(t, err, "getting item info: %v", clues.ToCore(err)) - assert.Equal(t, parentPath, info.Exchange.ParentPath) - assert.Equal(t, test.expectModTime, info.Modified()) - - return - } + _, err := readers.NewVersionedRestoreReader(li.ToReader()) + assert.ErrorIs(t, err, test.expectReadErrType) // Should get some form of error when trying to get info. + _, err = li.Info() assert.Error(t, err, "Info()") - - if test.expectInfoErrType != nil { - assert.ErrorIs(t, err, test.expectInfoErrType, "Info() error") - } }) } } + +func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlight() { + var ( + t = suite.T() + + parentPath = "inbox/private/silly cats" + now = time.Now() + ) + + ctx, flush := tester.NewContext(t) + defer flush() + + getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight} + + li := data.NewLazyItem( + ctx, + &lazyItemGetter{ + userID: "userID", + itemID: "itemID", + getter: getter, + modTime: now, + immutableIDs: false, + parentPath: parentPath, + }, + "itemID", + now, + fault.New(true)) + + assert.False(t, li.Deleted(), "item shouldn't be marked deleted") + assert.Equal( + t, + now, + li.ModTime(), + "item mod time") + + r, err := readers.NewVersionedRestoreReader(li.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.True(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) + assert.NoError(t, err, "reading item data: %v", clues.ToCore(err)) + + assert.Empty(t, readData, "read item data") + + _, err = li.Info() + assert.ErrorIs(t, err, data.ErrNotFound, "Info() error") +} + +func (suite *CollectionUnitSuite) TestLazyItem() { + var ( + t = suite.T() + + parentPath = "inbox/private/silly cats" + now = time.Now() + ) + + ctx, flush := tester.NewContext(t) + defer flush() + + // Exact data type doesn't really matter. + testData := models.NewMessage() + testData.SetSubject(ptr.To("hello world")) + + getter := &mock.ItemGetSerialize{GetData: testData} + + li := data.NewLazyItem( + ctx, + &lazyItemGetter{ + userID: "userID", + itemID: "itemID", + getter: getter, + modTime: now, + immutableIDs: false, + parentPath: parentPath, + }, + "itemID", + now, + fault.New(true)) + + assert.False(t, li.Deleted(), "item shouldn't be marked deleted") + assert.Equal( + t, + now, + li.ModTime(), + "item mod time") + + r, err := readers.NewVersionedRestoreReader(li.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) + + readData, err := io.ReadAll(r) + assert.NoError(t, err, "reading item data: %v", clues.ToCore(err)) + + assert.NotEmpty(t, readData, "read item data") + + info, err := li.Info() + assert.NoError(t, err, "getting item info: %v", clues.ToCore(err)) + + assert.Equal(t, parentPath, info.Exchange.ParentPath) + assert.Equal(t, now, info.Modified()) +} diff --git a/src/internal/m365/collection/groups/collection.go b/src/internal/m365/collection/groups/collection.go index b8ff3b436..0a1ca7212 100644 --- a/src/internal/m365/collection/groups/collection.go +++ b/src/internal/m365/collection/groups/collection.go @@ -150,27 +150,47 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { parentFolderID, id) if err != nil { - el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer")) + el.AddRecoverable( + ctx, + clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation)) + return } if err := writer.WriteObjectValue("", item); err != nil { - el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer")) + el.AddRecoverable( + ctx, + clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation)) + return } itemData, err := writer.GetSerializedContent() if err != nil { - el.AddRecoverable(ctx, clues.Wrap(err, "serializing channel message")) + el.AddRecoverable( + ctx, + clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation)) + return } info.ParentPath = col.LocationPath().String() - col.stream <- data.NewPrefetchedItem( + storeItem, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(itemData)), id, details.ItemInfo{Groups: info}) + if err != nil { + el.AddRecoverable( + ctx, + clues.Stack(err). + WithClues(ctx). + Label(fault.LabelForceNoBackupCreation)) + + return + } + + col.stream <- storeItem atomic.AddInt64(&streamedItems, 1) atomic.AddInt64(&totalBytes, info.Size) diff --git a/src/internal/m365/collection/groups/collection_test.go b/src/internal/m365/collection/groups/collection_test.go index be4c52dc7..e0bf19d19 100644 --- a/src/internal/m365/collection/groups/collection_test.go +++ b/src/internal/m365/collection/groups/collection_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/groups/mock" "github.com/alcionai/corso/src/internal/m365/support" @@ -48,13 +49,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() { suite.Run(test.name, func() { t := suite.T() - ed := data.NewPrefetchedItem( + ed, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(test.readData)), "itemID", details.ItemInfo{}) + require.NoError(t, err, clues.ToCore(err)) + + r, err := readers.NewVersionedRestoreReader(ed.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version) + assert.False(t, r.Format().DelInFlight) buf := &bytes.Buffer{} - _, err := buf.ReadFrom(ed.ToReader()) + _, err = buf.ReadFrom(r) assert.NoError(t, err, "reading data: %v", clues.ToCore(err)) assert.Equal(t, test.readData, buf.Bytes(), "read data") assert.Equal(t, "itemID", ed.ID(), "item ID") diff --git a/src/internal/m365/collection/site/collection.go b/src/internal/m365/collection/site/collection.go index 422ed4b2a..8af643d4b 100644 --- a/src/internal/m365/collection/site/collection.go +++ b/src/internal/m365/collection/site/collection.go @@ -211,11 +211,17 @@ func (sc *Collection) retrieveLists( metrics.Bytes += size metrics.Successes++ - sc.data <- data.NewPrefetchedItem( + + item, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(byteArray)), ptr.Val(lst.GetId()), details.ItemInfo{SharePoint: ListToSPInfo(lst, size)}) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) + continue + } + sc.data <- item progress <- struct{}{} } } @@ -272,11 +278,17 @@ func (sc *Collection) retrievePages( if size > 0 { metrics.Bytes += size metrics.Successes++ - sc.data <- data.NewPrefetchedItem( + + item, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(byteArray)), ptr.Val(pg.GetId()), details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)}) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation)) + continue + } + sc.data <- item progress <- struct{}{} } } diff --git a/src/internal/m365/collection/site/collection_test.go b/src/internal/m365/collection/site/collection_test.go index 3d0336217..5b53513f0 100644 --- a/src/internal/m365/collection/site/collection_test.go +++ b/src/internal/m365/collection/site/collection_test.go @@ -103,10 +103,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { byteArray, err := ow.GetSerializedContent() require.NoError(t, err, clues.ToCore(err)) - data := data.NewPrefetchedItem( + data, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(byteArray)), name, details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) + require.NoError(t, err, clues.ToCore(err)) return data }, @@ -132,10 +133,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { page, err := betaAPI.CreatePageFromBytes(byteArray) require.NoError(t, err, clues.ToCore(err)) - data := data.NewPrefetchedItem( + data, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(byteArray)), itemName, details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))}) + require.NoError(t, err, clues.ToCore(err)) return data }, @@ -194,10 +196,11 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() { byteArray, err := service.Serialize(listing) require.NoError(t, err, clues.ToCore(err)) - listData := data.NewPrefetchedItem( + listData, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(byteArray)), testName, details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) + require.NoError(t, err, clues.ToCore(err)) destName := testdata.DefaultRestoreConfig("").Location diff --git a/src/internal/m365/graph/metadata_collection.go b/src/internal/m365/graph/metadata_collection.go index 7e06faaba..1c3d1f766 100644 --- a/src/internal/m365/graph/metadata_collection.go +++ b/src/internal/m365/graph/metadata_collection.go @@ -57,11 +57,16 @@ func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) { return metadataItem{}, clues.Wrap(err, "serializing metadata") } + item, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(buf), + mce.fileName, + time.Now()) + if err != nil { + return metadataItem{}, clues.Stack(err) + } + return metadataItem{ - Item: data.NewUnindexedPrefetchedItem( - io.NopCloser(buf), - mce.fileName, - time.Now()), + Item: item, size: int64(buf.Len()), }, nil } diff --git a/src/internal/m365/graph/metadata_collection_test.go b/src/internal/m365/graph/metadata_collection_test.go index 0423cdf40..ee9ca6b5c 100644 --- a/src/internal/m365/graph/metadata_collection_test.go +++ b/src/internal/m365/graph/metadata_collection_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/tester" @@ -69,13 +70,16 @@ func (suite *MetadataCollectionUnitSuite) TestItems() { items := []metadataItem{} for i := 0; i < len(itemNames); i++ { + item, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(itemData[i])), + itemNames[i], + time.Time{}) + require.NoError(t, err, clues.ToCore(err)) + items = append( items, metadataItem{ - Item: data.NewUnindexedPrefetchedItem( - io.NopCloser(bytes.NewReader(itemData[i])), - itemNames[i], - time.Time{}), + Item: item, size: int64(len(itemData[i])), }) } @@ -103,7 +107,13 @@ func (suite *MetadataCollectionUnitSuite) TestItems() { for s := range c.Items(ctx, fault.New(true)) { gotNames = append(gotNames, s.ID()) - buf, err := io.ReadAll(s.ToReader()) + rr, err := readers.NewVersionedRestoreReader(s.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + + buf, err := io.ReadAll(rr) if !assert.NoError(t, err, clues.ToCore(err)) { continue } @@ -204,11 +214,17 @@ func (suite *MetadataCollectionUnitSuite) TestMakeMetadataCollection() { for item := range col.Items(ctx, fault.New(true)) { assert.Equal(t, test.metadata.fileName, item.ID()) + rr, err := readers.NewVersionedRestoreReader(item.ToReader()) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) + assert.False(t, rr.Format().DelInFlight) + gotMap := map[string]string{} - decoder := json.NewDecoder(item.ToReader()) + decoder := json.NewDecoder(rr) itemCount++ - err := decoder.Decode(&gotMap) + err = decoder.Decode(&gotMap) if !assert.NoError(t, err, clues.ToCore(err)) { continue } diff --git a/src/internal/m365/helper_test.go b/src/internal/m365/helper_test.go index aee2c11bb..6f3907394 100644 --- a/src/internal/m365/helper_test.go +++ b/src/internal/m365/helper_test.go @@ -16,6 +16,7 @@ import ( "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub" @@ -573,7 +574,12 @@ func compareExchangeEmail( expected map[string][]byte, item data.Item, ) { - itemData, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return + } + + itemData, err := io.ReadAll(rr) if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { return } @@ -600,7 +606,12 @@ func compareExchangeContact( expected map[string][]byte, item data.Item, ) { - itemData, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return + } + + itemData, err := io.ReadAll(rr) if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { return } @@ -628,7 +639,12 @@ func compareExchangeEvent( expected map[string][]byte, item data.Item, ) { - itemData, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return + } + + itemData, err := io.ReadAll(rr) if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) { return } @@ -718,7 +734,12 @@ func compareDriveItem( return false } - buf, err := io.ReadAll(item.ToReader()) + rr := versionedReadWrapper(t, item.ToReader()) + if rr == nil { + return true + } + + buf, err := io.ReadAll(rr) if !assert.NoError(t, err, clues.ToCore(err)) { return true } @@ -850,6 +871,29 @@ func compareDriveItem( return true } +// versionedReaderWrapper strips out the version format header and checks it +// meets the current standard for all service types. If it doesn't meet the +// standard, returns nil. Else returns the versionedRestoreReader. +func versionedReadWrapper( + t *testing.T, + reader io.ReadCloser, +) io.ReadCloser { + rr, err := readers.NewVersionedRestoreReader(reader) + if !assert.NoError(t, err, clues.ToCore(err)) { + return nil + } + + if !assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) { + return nil + } + + if !assert.False(t, rr.Format().DelInFlight) { + return nil + } + + return rr +} + // compareItem compares the data returned by backup with the expected data. // Returns true if a comparison was done else false. Bool return is mostly used // to exclude OneDrive permissions for the root right now. diff --git a/src/internal/m365/service/sharepoint/api/pages_test.go b/src/internal/m365/service/sharepoint/api/pages_test.go index f462805d2..792e3eda0 100644 --- a/src/internal/m365/service/sharepoint/api/pages_test.go +++ b/src/internal/m365/service/sharepoint/api/pages_test.go @@ -109,10 +109,11 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() { //nolint:lll byteArray := spMock.Page("Byte Test") - pageData := data.NewUnindexedPrefetchedItem( + pageData, err := data.NewUnindexedPrefetchedItem( io.NopCloser(bytes.NewReader(byteArray)), testName, time.Now()) + require.NoError(t, err, clues.ToCore(err)) info, err := api.RestoreSitePage( ctx, diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index eb5673196..9246a9325 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -182,12 +182,17 @@ func collect( return nil, clues.Wrap(err, "marshalling body").WithClues(ctx) } + item, err := data.NewUnindexedPrefetchedItem( + io.NopCloser(bytes.NewReader(bs)), + col.itemName, + time.Now()) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + dc := streamCollection{ folderPath: p, - item: data.NewUnindexedPrefetchedItem( - io.NopCloser(bytes.NewReader(bs)), - col.itemName, - time.Now()), + item: item, } return &dc, nil From 5eaf95052dc6d9edc9d4e9637a5201f7162f46ef Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Fri, 29 Sep 2023 14:26:15 -0700 Subject: [PATCH 16/26] Remove old serialization format code (#4410) Remove the now unused serialization format code that lived in the kopia package --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * closes #4328 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/upload.go | 100 ----------------------- src/internal/kopia/upload_test.go | 130 ------------------------------ 2 files changed, 230 deletions(-) diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index c1e1351e5..6030ec838 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -1,13 +1,9 @@ package kopia import ( - "bytes" "context" "encoding/base64" - "encoding/binary" "errors" - "io" - "os" "runtime/trace" "strings" "sync" @@ -23,7 +19,6 @@ import ( "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/m365/graph" @@ -37,101 +32,6 @@ import ( const maxInflateTraversalDepth = 500 -var versionSize = readers.VersionFormatSize - -func newBackupStreamReader(version uint32, reader io.ReadCloser) *backupStreamReader { - buf := make([]byte, versionSize) - binary.BigEndian.PutUint32(buf, version) - bufReader := io.NopCloser(bytes.NewReader(buf)) - - return &backupStreamReader{ - readers: []io.ReadCloser{bufReader, reader}, - combined: io.NopCloser(io.MultiReader(bufReader, reader)), - } -} - -// backupStreamReader is a wrapper around the io.Reader that other Corso -// components return when backing up information. It injects a version number at -// the start of the data stream. Future versions of Corso may not need this if -// they use more complex serialization logic as serialization/version injection -// will be handled by other components. -type backupStreamReader struct { - readers []io.ReadCloser - combined io.ReadCloser -} - -func (rw *backupStreamReader) Read(p []byte) (n int, err error) { - if rw.combined == nil { - return 0, os.ErrClosed - } - - return rw.combined.Read(p) -} - -func (rw *backupStreamReader) Close() error { - if rw.combined == nil { - return nil - } - - rw.combined = nil - - var errs *clues.Err - - for _, r := range rw.readers { - err := r.Close() - if err != nil { - errs = clues.Stack(clues.Wrap(err, "closing reader"), errs) - } - } - - return errs.OrNil() -} - -// restoreStreamReader is a wrapper around the io.Reader that kopia returns when -// reading data from an item. It examines and strips off the version number of -// the restored data. Future versions of Corso may not need this if they use -// more complex serialization logic as version checking/deserialization will be -// handled by other components. A reader that returns a version error is no -// longer valid and should not be used once the version error is returned. -type restoreStreamReader struct { - io.ReadCloser - expectedVersion uint32 - readVersion bool -} - -func (rw *restoreStreamReader) checkVersion() error { - versionBuf := make([]byte, versionSize) - - for newlyRead := 0; newlyRead < versionSize; { - n, err := rw.ReadCloser.Read(versionBuf[newlyRead:]) - if err != nil { - return clues.Wrap(err, "reading data format version") - } - - newlyRead += n - } - - version := binary.BigEndian.Uint32(versionBuf) - - if version != rw.expectedVersion { - return clues.New("unexpected data format").With("read_version", version) - } - - return nil -} - -func (rw *restoreStreamReader) Read(p []byte) (n int, err error) { - if !rw.readVersion { - rw.readVersion = true - - if err := rw.checkVersion(); err != nil { - return 0, err - } - } - - return rw.ReadCloser.Read(p) -} - type itemDetails struct { infoer data.ItemInfo repoPath path.Path diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index c88da8af0..168d32617 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -14,7 +14,6 @@ import ( "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot/snapshotfs" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -220,135 +219,6 @@ func getDirEntriesForEntry( // --------------- // unit tests // --------------- -type limitedRangeReader struct { - readLen int - io.ReadCloser -} - -func (lrr *limitedRangeReader) Read(p []byte) (int, error) { - if len(p) == 0 { - // Not well specified behavior, defer to underlying reader. - return lrr.ReadCloser.Read(p) - } - - toRead := lrr.readLen - if len(p) < toRead { - toRead = len(p) - } - - return lrr.ReadCloser.Read(p[:toRead]) -} - -type VersionReadersUnitSuite struct { - tester.Suite -} - -func TestVersionReadersUnitSuite(t *testing.T) { - suite.Run(t, &VersionReadersUnitSuite{Suite: tester.NewUnitSuite(t)}) -} - -func (suite *VersionReadersUnitSuite) TestWriteAndRead() { - inputData := []byte("This is some data for the reader to test with") - table := []struct { - name string - readVersion uint32 - writeVersion uint32 - check assert.ErrorAssertionFunc - }{ - { - name: "SameVersionSucceeds", - readVersion: 42, - writeVersion: 42, - check: assert.NoError, - }, - { - name: "DifferentVersionsFail", - readVersion: 7, - writeVersion: 42, - check: assert.Error, - }, - } - - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - baseReader := bytes.NewReader(inputData) - - reversible := &restoreStreamReader{ - expectedVersion: test.readVersion, - ReadCloser: newBackupStreamReader( - test.writeVersion, - io.NopCloser(baseReader)), - } - - defer reversible.Close() - - allData, err := io.ReadAll(reversible) - test.check(t, err, clues.ToCore(err)) - - if err != nil { - return - } - - assert.Equal(t, inputData, allData) - }) - } -} - -func readAllInParts( - t *testing.T, - partLen int, - reader io.ReadCloser, -) ([]byte, int) { - res := []byte{} - read := 0 - tmp := make([]byte, partLen) - - for { - n, err := reader.Read(tmp) - if errors.Is(err, io.EOF) { - break - } - - require.NoError(t, err, clues.ToCore(err)) - - read += n - res = append(res, tmp[:n]...) - } - - return res, read -} - -func (suite *VersionReadersUnitSuite) TestWriteHandlesShortReads() { - t := suite.T() - inputData := []byte("This is some data for the reader to test with") - version := uint32(42) - baseReader := bytes.NewReader(inputData) - versioner := newBackupStreamReader(version, io.NopCloser(baseReader)) - expectedToWrite := len(inputData) + int(versionSize) - - // "Write" all the data. - versionedData, writtenLen := readAllInParts(t, 1, versioner) - assert.Equal(t, expectedToWrite, writtenLen) - - // Read all of the data back. - baseReader = bytes.NewReader(versionedData) - reader := &restoreStreamReader{ - expectedVersion: version, - // Be adversarial and only allow reads of length 1 from the byte reader. - ReadCloser: &limitedRangeReader{ - readLen: 1, - ReadCloser: io.NopCloser(baseReader), - }, - } - readData, readLen := readAllInParts(t, 1, reader) - // This reports the bytes read and returned to the user, excluding the version - // that is stripped off at the start. - assert.Equal(t, len(inputData), readLen) - assert.Equal(t, inputData, readData) -} - type CorsoProgressUnitSuite struct { tester.Suite targetFilePath path.Path From b15f8a6fcde07f984d4ca094fc7e551daa30cfe5 Mon Sep 17 00:00:00 2001 From: Keepers Date: Sat, 30 Sep 2023 10:56:13 -0600 Subject: [PATCH 17/26] add generic details command (#4352) centralizes details command processing in the cli --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :broom: Tech Debt/Cleanup #### Issue(s) * #2025 --- src/cli/backup/backup.go | 63 ++- src/cli/backup/backup_test.go | 68 +++ src/cli/backup/exchange.go | 75 +-- src/cli/backup/exchange_test.go | 52 -- src/cli/backup/groups.go | 73 +-- src/cli/backup/helpers_test.go | 3 +- src/cli/backup/onedrive.go | 75 +-- src/cli/backup/onedrive_test.go | 52 -- src/cli/backup/sharepoint.go | 75 +-- src/cli/backup/sharepoint_test.go | 52 -- src/cli/repo/filesystem.go | 8 +- src/cli/repo/filesystem_e2e_test.go | 3 +- src/cli/repo/s3.go | 6 +- src/cli/repo/s3_e2e_test.go | 3 +- src/cli/restore/exchange_e2e_test.go | 3 +- src/cli/utils/utils.go | 8 +- src/cmd/longevity_test/longevity.go | 6 +- src/internal/m365/controller.go | 39 +- src/pkg/path/service_type.go | 6 +- src/pkg/repository/backups.go | 359 ++++++++++++ src/pkg/repository/data_providers.go | 88 +++ src/pkg/repository/exports.go | 40 ++ .../loadtest/repository_load_test.go | 3 +- src/pkg/repository/repository.go | 515 ++---------------- src/pkg/repository/repository_test.go | 52 +- src/pkg/repository/restores.go | 42 ++ src/pkg/services/m365/api/access.go | 68 +++ src/pkg/services/m365/api/access_test.go | 122 +++++ src/pkg/services/m365/api/client.go | 11 + 29 files changed, 1043 insertions(+), 927 deletions(-) create mode 100644 src/cli/backup/backup_test.go create mode 100644 src/pkg/repository/backups.go create mode 100644 src/pkg/repository/data_providers.go create mode 100644 src/pkg/repository/exports.go create mode 100644 src/pkg/repository/restores.go create mode 100644 src/pkg/services/m365/api/access.go create mode 100644 src/pkg/services/m365/api/access_test.go diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index 8b6808a01..5d885e059 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -16,6 +16,8 @@ import ( "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" @@ -163,7 +165,7 @@ func handleDeleteCmd(cmd *cobra.Command, args []string) error { // standard set of selector behavior that we want used in the cli var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true} -func runBackups( +func genericCreateCommand( ctx context.Context, r repository.Repositoryer, serviceName string, @@ -332,6 +334,65 @@ func genericListCommand( return nil } +func genericDetailsCommand( + cmd *cobra.Command, + backupID string, + sel selectors.Selector, +) (*details.Details, error) { + ctx := cmd.Context() + + r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService) + if err != nil { + return nil, clues.Stack(err) + } + + defer utils.CloseRepo(ctx, r) + + return genericDetailsCore( + ctx, + r, + backupID, + sel, + rdao.Opts) +} + +func genericDetailsCore( + ctx context.Context, + bg repository.BackupGetter, + backupID string, + sel selectors.Selector, + opts control.Options, +) (*details.Details, error) { + ctx = clues.Add(ctx, "backup_id", backupID) + + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + + d, _, errs := bg.GetBackupDetails(ctx, backupID) + // TODO: log/track recoverable errors + if errs.Failure() != nil { + if errors.Is(errs.Failure(), data.ErrNotFound) { + return nil, clues.New("no backup exists with the id " + backupID) + } + + return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") + } + + if opts.SkipReduce { + return d, nil + } + + d, err := sel.Reduce(ctx, d, errs) + if err != nil { + return nil, clues.Wrap(err, "filtering backup details to selection") + } + + return d, nil +} + +// --------------------------------------------------------------------------- +// helper funcs +// --------------------------------------------------------------------------- + func ifShow(flag string) bool { return strings.ToLower(strings.TrimSpace(flag)) == "show" } diff --git a/src/cli/backup/backup_test.go b/src/cli/backup/backup_test.go new file mode 100644 index 000000000..4d70702ae --- /dev/null +++ b/src/cli/backup/backup_test.go @@ -0,0 +1,68 @@ +package backup + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/utils/testdata" + "github.com/alcionai/corso/src/internal/tester" + dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" +) + +type BackupUnitSuite struct { + tester.Suite +} + +func TestBackupUnitSuite(t *testing.T) { + suite.Run(t, &BackupUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *BackupUnitSuite) TestGenericDetailsCore() { + t := suite.T() + + expected := append( + append( + dtd.GetItemsForVersion( + t, + path.ExchangeService, + path.EmailCategory, + 0, + -1), + dtd.GetItemsForVersion( + t, + path.ExchangeService, + path.EventsCategory, + 0, + -1)...), + dtd.GetItemsForVersion( + t, + path.ExchangeService, + path.ContactsCategory, + 0, + -1)...) + + ctx, flush := tester.NewContext(t) + defer flush() + + bg := testdata.VersionedBackupGetter{ + Details: dtd.GetDetailsSetForVersion(t, 0), + } + + sel := selectors.NewExchangeBackup([]string{"user-id"}) + sel.Include(sel.AllData()) + + output, err := genericDetailsCore( + ctx, + bg, + "backup-ID", + sel.Selector, + control.DefaultOptions()) + assert.NoError(t, err, clues.ToCore(err)) + assert.ElementsMatch(t, expected, output.Entries) +} diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index d4f0d9534..d25cefff0 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -1,21 +1,15 @@ package backup import ( - "context" - "github.com/alcionai/clues" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/alcionai/corso/src/cli/flags" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -182,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "Exchange", @@ -272,74 +266,31 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsExchangeCmd(cmd) +} + +func runDetailsExchangeCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeExchangeOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService) + sel := utils.IncludeExchangeRestoreDataSelectors(opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterExchangeRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsExchangeCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } -// runDetailsExchangeCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsExchangeCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.ExchangeOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("No backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeExchangeRestoreDataSelectors(opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterExchangeRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} - // ------------------------------------------------------------------------------------------------ // backup delete // ------------------------------------------------------------------------------------------------ diff --git a/src/cli/backup/exchange_test.go b/src/cli/backup/exchange_test.go index 87b6f49c8..1ed8f718e 100644 --- a/src/cli/backup/exchange_test.go +++ b/src/cli/backup/exchange_test.go @@ -1,7 +1,6 @@ package backup import ( - "fmt" "strconv" "testing" @@ -15,10 +14,7 @@ import ( flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" - utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/internal/version" - dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" ) @@ -368,51 +364,3 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupCreateSelectors() { }) } } - -func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectors() { - for v := 0; v <= version.Backup; v++ { - suite.Run(fmt.Sprintf("version%d", v), func() { - for _, test := range utilsTD.ExchangeOptionDetailLookups { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - bg := utilsTD.VersionedBackupGetter{ - Details: dtd.GetDetailsSetForVersion(t, v), - } - - output, err := runDetailsExchangeCmd( - ctx, - bg, - "backup-ID", - test.Opts(t, v), - false) - assert.NoError(t, err, clues.ToCore(err)) - assert.ElementsMatch(t, test.Expected(t, v), output.Entries) - }) - } - }) - } -} - -func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectorsBadFormats() { - for _, test := range utilsTD.BadExchangeOptionsFormats { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - output, err := runDetailsExchangeCmd( - ctx, - test.BackupGetter, - "backup-ID", - test.Opts(t, version.Backup), - false) - assert.Error(t, err, clues.ToCore(err)) - assert.Empty(t, output) - }) - } -} diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go index c8be220f3..d834e5f29 100644 --- a/src/cli/backup/groups.go +++ b/src/cli/backup/groups.go @@ -2,7 +2,6 @@ package backup import ( "context" - "errors" "fmt" "github.com/alcionai/clues" @@ -14,12 +13,9 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common/idname" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365" ) @@ -174,7 +170,7 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "Group", @@ -225,74 +221,31 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsGroupsCmd(cmd) +} + +func runDetailsGroupsCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeGroupsOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService) + sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterGroupsRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsGroupsCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } -// runDetailsGroupsCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsGroupsCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.GroupsOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("no backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterGroupsRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} - // ------------------------------------------------------------------------------------------------ // backup delete // ------------------------------------------------------------------------------------------------ diff --git a/src/cli/backup/helpers_test.go b/src/cli/backup/helpers_test.go index 8589d70d0..14486f703 100644 --- a/src/cli/backup/helpers_test.go +++ b/src/cli/backup/helpers_test.go @@ -21,7 +21,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api/mock" @@ -160,7 +159,7 @@ func prepM365Test( repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = repo.Initialize(ctx, ctrlRepo.Retention{}) + err = repo.Initialize(ctx, repository.InitConfig{}) require.NoError(t, err, clues.ToCore(err)) return dependencies{ diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index fa8170f64..54d479b7c 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -1,21 +1,15 @@ package backup import ( - "context" - "github.com/alcionai/clues" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/alcionai/corso/src/cli/flags" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -162,7 +156,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "OneDrive", @@ -229,74 +223,31 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsOneDriveCmd(cmd) +} + +func runDetailsOneDriveCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeOneDriveOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService) + sel := utils.IncludeOneDriveRestoreDataSelectors(opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterOneDriveRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsOneDriveCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } -// runDetailsOneDriveCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsOneDriveCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.OneDriveOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("no backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeOneDriveRestoreDataSelectors(opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterOneDriveRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} - // `corso backup delete onedrive [...]` func oneDriveDeleteCmd() *cobra.Command { return &cobra.Command{ diff --git a/src/cli/backup/onedrive_test.go b/src/cli/backup/onedrive_test.go index 6d0e0b202..8c1bb583f 100644 --- a/src/cli/backup/onedrive_test.go +++ b/src/cli/backup/onedrive_test.go @@ -1,7 +1,6 @@ package backup import ( - "fmt" "testing" "github.com/alcionai/clues" @@ -14,10 +13,7 @@ import ( flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" - utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/internal/version" - dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" ) @@ -227,51 +223,3 @@ func (suite *OneDriveUnitSuite) TestValidateOneDriveBackupCreateFlags() { }) } } - -func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectors() { - for v := 0; v <= version.Backup; v++ { - suite.Run(fmt.Sprintf("version%d", v), func() { - for _, test := range utilsTD.OneDriveOptionDetailLookups { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - bg := utilsTD.VersionedBackupGetter{ - Details: dtd.GetDetailsSetForVersion(t, v), - } - - output, err := runDetailsOneDriveCmd( - ctx, - bg, - "backup-ID", - test.Opts(t, v), - false) - assert.NoError(t, err, clues.ToCore(err)) - assert.ElementsMatch(t, test.Expected(t, v), output.Entries) - }) - } - }) - } -} - -func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectorsBadFormats() { - for _, test := range utilsTD.BadOneDriveOptionsFormats { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - output, err := runDetailsOneDriveCmd( - ctx, - test.BackupGetter, - "backup-ID", - test.Opts(t, version.Backup), - false) - assert.Error(t, err, clues.ToCore(err)) - assert.Empty(t, output) - }) - } -} diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index 507a4a6d2..bfeefaa54 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -4,7 +4,6 @@ import ( "context" "github.com/alcionai/clues" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/exp/slices" @@ -13,12 +12,9 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common/idname" - "github.com/alcionai/corso/src/internal/data" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365" ) @@ -179,7 +175,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { selectorSet = append(selectorSet, discSel.Selector) } - return runBackups( + return genericCreateCommand( ctx, r, "SharePoint", @@ -303,7 +299,7 @@ func deleteSharePointCmd(cmd *cobra.Command, args []string) error { // backup details // ------------------------------------------------------------------------------------------------ -// `corso backup details onedrive [...]` +// `corso backup details SharePoint [...]` func sharePointDetailsCmd() *cobra.Command { return &cobra.Command{ Use: sharePointServiceCommand, @@ -324,70 +320,27 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error { return nil } + return runDetailsSharePointCmd(cmd) +} + +func runDetailsSharePointCmd(cmd *cobra.Command) error { ctx := cmd.Context() opts := utils.MakeSharePointOpts(cmd) - r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService) + sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterSharePointRestoreInfoSelectors(sel, opts) + + ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector) if err != nil { return Only(ctx, err) } - defer utils.CloseRepo(ctx, r) - - ds, err := runDetailsSharePointCmd( - ctx, - r, - flags.BackupIDFV, - opts, - rdao.Opts.SkipReduce) - if err != nil { - return Only(ctx, err) - } - - if len(ds.Entries) == 0 { + if len(ds.Entries) > 0 { + ds.PrintEntries(ctx) + } else { Info(ctx, selectors.ErrorNoMatchingItems) - return nil } - ds.PrintEntries(ctx) - return nil } - -// runDetailsSharePointCmd actually performs the lookup in backup details. -// the fault.Errors return is always non-nil. Callers should check if -// errs.Failure() == nil. -func runDetailsSharePointCmd( - ctx context.Context, - r repository.BackupGetter, - backupID string, - opts utils.SharePointOpts, - skipReduce bool, -) (*details.Details, error) { - if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil { - return nil, err - } - - ctx = clues.Add(ctx, "backup_id", backupID) - - d, _, errs := r.GetBackupDetails(ctx, backupID) - // TODO: log/track recoverable errors - if errs.Failure() != nil { - if errors.Is(errs.Failure(), data.ErrNotFound) { - return nil, clues.New("no backup exists with the id " + backupID) - } - - return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") - } - - ctx = clues.Add(ctx, "details_entries", len(d.Entries)) - - if !skipReduce { - sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts) - sel.Configure(selectors.Config{OnlyMatchItemNames: true}) - utils.FilterSharePointRestoreInfoSelectors(sel, opts) - d = sel.Reduce(ctx, d, errs) - } - - return d, nil -} diff --git a/src/cli/backup/sharepoint_test.go b/src/cli/backup/sharepoint_test.go index f09bbe878..f018a7ba2 100644 --- a/src/cli/backup/sharepoint_test.go +++ b/src/cli/backup/sharepoint_test.go @@ -1,7 +1,6 @@ package backup import ( - "fmt" "strings" "testing" @@ -15,11 +14,8 @@ import ( flagsTD "github.com/alcionai/corso/src/cli/flags/testdata" cliTD "github.com/alcionai/corso/src/cli/testdata" "github.com/alcionai/corso/src/cli/utils" - utilsTD "github.com/alcionai/corso/src/cli/utils/testdata" "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/tester" - "github.com/alcionai/corso/src/internal/version" - dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -339,51 +335,3 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() { }) } } - -func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectors() { - for v := 0; v <= version.Backup; v++ { - suite.Run(fmt.Sprintf("version%d", v), func() { - for _, test := range utilsTD.SharePointOptionDetailLookups { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - bg := utilsTD.VersionedBackupGetter{ - Details: dtd.GetDetailsSetForVersion(t, v), - } - - output, err := runDetailsSharePointCmd( - ctx, - bg, - "backup-ID", - test.Opts(t, v), - false) - assert.NoError(t, err, clues.ToCore(err)) - assert.ElementsMatch(t, test.Expected(t, v), output.Entries) - }) - } - }) - } -} - -func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectorsBadFormats() { - for _, test := range utilsTD.BadSharePointOptionsFormats { - suite.Run(test.Name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - output, err := runDetailsSharePointCmd( - ctx, - test.BackupGetter, - "backup-ID", - test.Opts(t, version.Backup), - false) - assert.Error(t, err, clues.ToCore(err)) - assert.Empty(t, output) - }) - } -} diff --git a/src/cli/repo/filesystem.go b/src/cli/repo/filesystem.go index 40e8b05a5..f6a495f21 100644 --- a/src/cli/repo/filesystem.go +++ b/src/cli/repo/filesystem.go @@ -85,7 +85,7 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { opt := utils.ControlWithConfig(cfg) // Retention is not supported for filesystem repos. - retention := ctrlRepo.Retention{} + retentionOpts := ctrlRepo.Retention{} // SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated utils.SendStartCorsoEvent( @@ -116,7 +116,9 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller")) } - if err = r.Initialize(ctx, retention); err != nil { + ric := repository.InitConfig{RetentionOpts: retentionOpts} + + if err = r.Initialize(ctx, ric); err != nil { if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) { return nil } @@ -207,7 +209,7 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to create a repository controller")) } - if err := r.Connect(ctx); err != nil { + if err := r.Connect(ctx, repository.ConnConfig{}); err != nil { return Only(ctx, clues.Stack(ErrConnectingRepo, err)) } diff --git a/src/cli/repo/filesystem_e2e_test.go b/src/cli/repo/filesystem_e2e_test.go index d7a28047c..6a76e3fa8 100644 --- a/src/cli/repo/filesystem_e2e_test.go +++ b/src/cli/repo/filesystem_e2e_test.go @@ -16,7 +16,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/storage" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -138,7 +137,7 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() { repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, repository.InitConfig{}) require.NoError(t, err, clues.ToCore(err)) // then test it diff --git a/src/cli/repo/s3.go b/src/cli/repo/s3.go index 253be0dfe..3fb0833e6 100644 --- a/src/cli/repo/s3.go +++ b/src/cli/repo/s3.go @@ -138,7 +138,9 @@ func initS3Cmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller")) } - if err = r.Initialize(ctx, retentionOpts); err != nil { + ric := repository.InitConfig{RetentionOpts: retentionOpts} + + if err = r.Initialize(ctx, ric); err != nil { if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) { return nil } @@ -221,7 +223,7 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error { return Only(ctx, clues.Wrap(err, "Failed to create a repository controller")) } - if err := r.Connect(ctx); err != nil { + if err := r.Connect(ctx, repository.ConnConfig{}); err != nil { return Only(ctx, clues.Stack(ErrConnectingRepo, err)) } diff --git a/src/cli/repo/s3_e2e_test.go b/src/cli/repo/s3_e2e_test.go index a9f50e277..e1d65c4f3 100644 --- a/src/cli/repo/s3_e2e_test.go +++ b/src/cli/repo/s3_e2e_test.go @@ -18,7 +18,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/storage" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -214,7 +213,7 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, repository.InitConfig{}) require.NoError(t, err, clues.ToCore(err)) // then test it diff --git a/src/cli/restore/exchange_e2e_test.go b/src/cli/restore/exchange_e2e_test.go index 36c6b8973..67896831b 100644 --- a/src/cli/restore/exchange_e2e_test.go +++ b/src/cli/restore/exchange_e2e_test.go @@ -20,7 +20,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -92,7 +91,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() { repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = suite.repo.Initialize(ctx, ctrlRepo.Retention{}) + err = suite.repo.Initialize(ctx, repository.InitConfig{Service: path.ExchangeService}) require.NoError(t, err, clues.ToCore(err)) suite.backupOps = make(map[path.CategoryType]operations.BackupOperation) diff --git a/src/cli/utils/utils.go b/src/cli/utils/utils.go index 2a4e3de34..2ee9ac090 100644 --- a/src/cli/utils/utils.go +++ b/src/cli/utils/utils.go @@ -78,16 +78,10 @@ func GetAccountAndConnectWithOverrides( return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller") } - if err := r.Connect(ctx); err != nil { + if err := r.Connect(ctx, repository.ConnConfig{Service: pst}); err != nil { return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository") } - // this initializes our graph api client configurations, - // including control options such as concurency limitations. - if _, err := r.ConnectToM365(ctx, pst); err != nil { - return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365") - } - rdao := RepoDetailsAndOpts{ Repo: cfg, Opts: opts, diff --git a/src/cmd/longevity_test/longevity.go b/src/cmd/longevity_test/longevity.go index ec7862191..c8e9f29cf 100644 --- a/src/cmd/longevity_test/longevity.go +++ b/src/cmd/longevity_test/longevity.go @@ -72,7 +72,7 @@ func deleteBackups( // Only supported for S3 repos currently. func pitrListBackups( ctx context.Context, - service path.ServiceType, + pst path.ServiceType, pitr time.Time, backupIDs []string, ) error { @@ -113,14 +113,14 @@ func pitrListBackups( return clues.Wrap(err, "creating a repo") } - err = r.Connect(ctx) + err = r.Connect(ctx, repository.ConnConfig{Service: pst}) if err != nil { return clues.Wrap(err, "connecting to the repository") } defer r.Close(ctx) - backups, err := r.BackupsByTag(ctx, store.Service(service)) + backups, err := r.BackupsByTag(ctx, store.Service(pst)) if err != nil { return clues.Wrap(err, "listing backups").WithClues(ctx) } diff --git a/src/internal/m365/controller.go b/src/internal/m365/controller.go index 0bd15ee17..3e0b3af93 100644 --- a/src/internal/m365/controller.go +++ b/src/internal/m365/controller.go @@ -79,20 +79,29 @@ func NewController( return nil, clues.Wrap(err, "creating api client").WithClues(ctx) } - rc := resource.UnknownResource + var rCli *resourceClient - switch pst { - case path.ExchangeService, path.OneDriveService: - rc = resource.Users - case path.GroupsService: - rc = resource.Groups - case path.SharePointService: - rc = resource.Sites - } + // no failure for unknown service. + // In that case we create a controller that doesn't attempt to look up any resource + // data. This case helps avoid unnecessary service calls when the end user is running + // repo init and connect commands via the CLI. All other callers should be expected + // to pass in a known service, or else expect downstream failures. + if pst != path.UnknownService { + rc := resource.UnknownResource - rCli, err := getResourceClient(rc, ac) - if err != nil { - return nil, clues.Wrap(err, "creating resource client").WithClues(ctx) + switch pst { + case path.ExchangeService, path.OneDriveService: + rc = resource.Users + case path.GroupsService: + rc = resource.Groups + case path.SharePointService: + rc = resource.Sites + } + + rCli, err = getResourceClient(rc, ac) + if err != nil { + return nil, clues.Wrap(err, "creating resource client").WithClues(ctx) + } } ctrl := Controller{ @@ -110,6 +119,10 @@ func NewController( return &ctrl, nil } +func (ctrl *Controller) VerifyAccess(ctx context.Context) error { + return ctrl.AC.Access().GetToken(ctx) +} + // --------------------------------------------------------------------------- // Processing Status // --------------------------------------------------------------------------- @@ -195,7 +208,7 @@ func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, er case resource.Groups: return &resourceClient{enum: rc, getter: ac.Groups()}, nil default: - return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc) + return nil, clues.New("unrecognized owner resource type").With("resource_enum", rc) } } diff --git a/src/pkg/path/service_type.go b/src/pkg/path/service_type.go index a4a99ec6c..14847ce35 100644 --- a/src/pkg/path/service_type.go +++ b/src/pkg/path/service_type.go @@ -15,9 +15,9 @@ var ErrorUnknownService = clues.New("unknown service string") // Metadata services are not considered valid service types for resource paths // though they can be used for metadata paths. // -// The order of the enums below can be changed, but the string representation of -// each enum must remain the same or migration code needs to be added to handle -// changes to the string format. +// The string representaton of each enum _must remain the same_. In case of +// changes to those values, we'll need migration code to handle transitions +// across states else we'll get marshalling/unmarshalling errors. type ServiceType int //go:generate stringer -type=ServiceType -linecomment diff --git a/src/pkg/repository/backups.go b/src/pkg/repository/backups.go new file mode 100644 index 000000000..a4314eb01 --- /dev/null +++ b/src/pkg/repository/backups.go @@ -0,0 +1,359 @@ +package repository + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/repo/manifest" + "github.com/pkg/errors" + + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/operations" + "github.com/alcionai/corso/src/internal/streamstore" + "github.com/alcionai/corso/src/internal/version" + "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/store" +) + +// BackupGetter deals with retrieving metadata about backups from the +// repository. +type BackupGetter interface { + Backup(ctx context.Context, id string) (*backup.Backup, error) + Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) + BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) + GetBackupDetails( + ctx context.Context, + backupID string, + ) (*details.Details, *backup.Backup, *fault.Bus) + GetBackupErrors( + ctx context.Context, + backupID string, + ) (*fault.Errors, *backup.Backup, *fault.Bus) +} + +type Backuper interface { + NewBackup( + ctx context.Context, + self selectors.Selector, + ) (operations.BackupOperation, error) + NewBackupWithLookup( + ctx context.Context, + self selectors.Selector, + ins idname.Cacher, + ) (operations.BackupOperation, error) + DeleteBackups( + ctx context.Context, + failOnMissing bool, + ids ...string, + ) error +} + +// NewBackup generates a BackupOperation runner. +func (r repository) NewBackup( + ctx context.Context, + sel selectors.Selector, +) (operations.BackupOperation, error) { + return r.NewBackupWithLookup(ctx, sel, nil) +} + +// NewBackupWithLookup generates a BackupOperation runner. +// ownerIDToName and ownerNameToID are optional populations, in case the caller has +// already generated those values. +func (r repository) NewBackupWithLookup( + ctx context.Context, + sel selectors.Selector, + ins idname.Cacher, +) (operations.BackupOperation, error) { + err := r.ConnectDataProvider(ctx, sel.PathService()) + if err != nil { + return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365") + } + + ownerID, ownerName, err := r.Provider.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins) + if err != nil { + return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details") + } + + // TODO: retrieve display name from gc + sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName) + + return operations.NewBackupOperation( + ctx, + r.Opts, + r.dataLayer, + store.NewWrapper(r.modelStore), + r.Provider, + r.Account, + sel, + sel, // the selector acts as an IDNamer for its discrete resource owner. + r.Bus) +} + +// Backup retrieves a backup by id. +func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) { + return getBackup(ctx, id, store.NewWrapper(r.modelStore)) +} + +// getBackup handles the processing for Backup. +func getBackup( + ctx context.Context, + id string, + sw store.BackupGetter, +) (*backup.Backup, error) { + b, err := sw.GetBackup(ctx, model.StableID(id)) + if err != nil { + return nil, errWrapper(err) + } + + return b, nil +} + +// Backups lists backups by ID. Returns as many backups as possible with +// errors for the backups it was unable to retrieve. +func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) { + var ( + bups []*backup.Backup + errs = fault.New(false) + sw = store.NewWrapper(r.modelStore) + ) + + for _, id := range ids { + ictx := clues.Add(ctx, "backup_id", id) + + b, err := sw.GetBackup(ictx, model.StableID(id)) + if err != nil { + errs.AddRecoverable(ctx, errWrapper(err)) + } + + bups = append(bups, b) + } + + return bups, errs +} + +// BackupsByTag lists all backups in a repository that contain all the tags +// specified. +func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) { + sw := store.NewWrapper(r.modelStore) + return backupsByTag(ctx, sw, fs) +} + +// backupsByTag returns all backups matching all provided tags. +// +// TODO(ashmrtn): This exists mostly for testing, but we could restructure the +// code in this file so there's a more elegant mocking solution. +func backupsByTag( + ctx context.Context, + sw store.BackupWrapper, + fs []store.FilterOption, +) ([]*backup.Backup, error) { + bs, err := sw.GetBackups(ctx, fs...) + if err != nil { + return nil, clues.Stack(err) + } + + // Filter out assist backup bases as they're considered incomplete and we + // haven't been displaying them before now. + res := make([]*backup.Backup, 0, len(bs)) + + for _, b := range bs { + if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup { + res = append(res, b) + } + } + + return res, nil +} + +// BackupDetails returns the specified backup.Details +func (r repository) GetBackupDetails( + ctx context.Context, + backupID string, +) (*details.Details, *backup.Backup, *fault.Bus) { + errs := fault.New(false) + + deets, bup, err := getBackupDetails( + ctx, + backupID, + r.Account.ID(), + r.dataLayer, + store.NewWrapper(r.modelStore), + errs) + + return deets, bup, errs.Fail(err) +} + +// getBackupDetails handles the processing for GetBackupDetails. +func getBackupDetails( + ctx context.Context, + backupID, tenantID string, + kw *kopia.Wrapper, + sw store.BackupGetter, + errs *fault.Bus, +) (*details.Details, *backup.Backup, error) { + b, err := sw.GetBackup(ctx, model.StableID(backupID)) + if err != nil { + return nil, nil, errWrapper(err) + } + + ssid := b.StreamStoreID + if len(ssid) == 0 { + ssid = b.DetailsID + } + + if len(ssid) == 0 { + return nil, b, clues.New("no streamstore id in backup").WithClues(ctx) + } + + var ( + sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) + deets details.Details + ) + + err = sstore.Read( + ctx, + ssid, + streamstore.DetailsReader(details.UnmarshalTo(&deets)), + errs) + if err != nil { + return nil, nil, err + } + + // Retroactively fill in isMeta information for items in older + // backup versions without that info + // version.Restore2 introduces the IsMeta flag, so only v1 needs a check. + if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker { + for _, d := range deets.Entries { + if d.OneDrive != nil { + d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef) + } + } + } + + deets.DetailsModel = deets.FilterMetaFiles() + + return &deets, b, nil +} + +// BackupErrors returns the specified backup's fault.Errors +func (r repository) GetBackupErrors( + ctx context.Context, + backupID string, +) (*fault.Errors, *backup.Backup, *fault.Bus) { + errs := fault.New(false) + + fe, bup, err := getBackupErrors( + ctx, + backupID, + r.Account.ID(), + r.dataLayer, + store.NewWrapper(r.modelStore), + errs) + + return fe, bup, errs.Fail(err) +} + +// getBackupErrors handles the processing for GetBackupErrors. +func getBackupErrors( + ctx context.Context, + backupID, tenantID string, + kw *kopia.Wrapper, + sw store.BackupGetter, + errs *fault.Bus, +) (*fault.Errors, *backup.Backup, error) { + b, err := sw.GetBackup(ctx, model.StableID(backupID)) + if err != nil { + return nil, nil, errWrapper(err) + } + + ssid := b.StreamStoreID + if len(ssid) == 0 { + return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx) + } + + var ( + sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) + fe fault.Errors + ) + + err = sstore.Read( + ctx, + ssid, + streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)), + errs) + if err != nil { + return nil, nil, err + } + + return &fe, b, nil +} + +// DeleteBackups removes the backups from both the model store and the backup +// storage. +// +// If failOnMissing is true then returns an error if a backup model can't be +// found. Otherwise ignores missing backup models. +// +// Missing models or snapshots during the actual deletion do not cause errors. +// +// All backups are delete as an atomic unit so any failures will result in no +// deletions. +func (r repository) DeleteBackups( + ctx context.Context, + failOnMissing bool, + ids ...string, +) error { + return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...) +} + +// deleteBackup handles the processing for backup deletion. +func deleteBackups( + ctx context.Context, + sw store.BackupGetterModelDeleter, + failOnMissing bool, + ids ...string, +) error { + // Although we haven't explicitly stated it, snapshots are technically + // manifests in kopia. This means we can use the same delete API to remove + // them and backup models. Deleting all of them together gives us both + // atomicity guarantees (around when data will be flushed) and helps reduce + // the number of manifest blobs that kopia will create. + var toDelete []manifest.ID + + for _, id := range ids { + b, err := sw.GetBackup(ctx, model.StableID(id)) + if err != nil { + if !failOnMissing && errors.Is(err, data.ErrNotFound) { + continue + } + + return clues.Stack(errWrapper(err)). + WithClues(ctx). + With("delete_backup_id", id) + } + + toDelete = append(toDelete, b.ModelStoreID) + + if len(b.SnapshotID) > 0 { + toDelete = append(toDelete, manifest.ID(b.SnapshotID)) + } + + ssid := b.StreamStoreID + if len(ssid) == 0 { + ssid = b.DetailsID + } + + if len(ssid) > 0 { + toDelete = append(toDelete, manifest.ID(ssid)) + } + } + + return sw.DeleteWithModelStoreIDs(ctx, toDelete...) +} diff --git a/src/pkg/repository/data_providers.go b/src/pkg/repository/data_providers.go new file mode 100644 index 000000000..f95f85b56 --- /dev/null +++ b/src/pkg/repository/data_providers.go @@ -0,0 +1,88 @@ +package repository + +import ( + "context" + "fmt" + + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/m365" + "github.com/alcionai/corso/src/internal/observe" + "github.com/alcionai/corso/src/internal/operations/inject" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/path" +) + +type DataProvider interface { + inject.BackupProducer + inject.ExportConsumer + inject.RestoreConsumer + + VerifyAccess(ctx context.Context) error +} + +type DataProviderConnector interface { + // ConnectDataProvider initializes configurations + // and establishes the client connection with the + // data provider for this operation. + ConnectDataProvider( + ctx context.Context, + pst path.ServiceType, + ) error +} + +func (r *repository) ConnectDataProvider( + ctx context.Context, + pst path.ServiceType, +) error { + var ( + provider DataProvider + err error + ) + + switch r.Account.Provider { + case account.ProviderM365: + provider, err = connectToM365(ctx, *r, pst) + default: + err = clues.New("unrecognized provider").WithClues(ctx) + } + + if err != nil { + return clues.Wrap(err, "connecting data provider") + } + + if err := provider.VerifyAccess(ctx); err != nil { + return clues.Wrap(err, fmt.Sprintf("verifying %s account connection", r.Account.Provider)) + } + + r.Provider = provider + + return nil +} + +func connectToM365( + ctx context.Context, + r repository, + pst path.ServiceType, +) (*m365.Controller, error) { + if r.Provider != nil { + ctrl, ok := r.Provider.(*m365.Controller) + if !ok { + // if the provider is initialized to a non-m365 controller, we should not + // attempt to connnect to m365 afterward. + return nil, clues.New("Attempted to connect to multiple data providers") + } + + return ctrl, nil + } + + progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365") + defer close(progressBar) + + ctrl, err := m365.NewController(ctx, r.Account, pst, r.Opts) + if err != nil { + return nil, clues.Wrap(err, "creating m365 client controller") + } + + return ctrl, nil +} diff --git a/src/pkg/repository/exports.go b/src/pkg/repository/exports.go new file mode 100644 index 000000000..2aadd2bfb --- /dev/null +++ b/src/pkg/repository/exports.go @@ -0,0 +1,40 @@ +package repository + +import ( + "context" + + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/operations" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/store" +) + +type Exporter interface { + NewExport( + ctx context.Context, + backupID string, + sel selectors.Selector, + exportCfg control.ExportConfig, + ) (operations.ExportOperation, error) +} + +// NewExport generates a exportOperation runner. +func (r repository) NewExport( + ctx context.Context, + backupID string, + sel selectors.Selector, + exportCfg control.ExportConfig, +) (operations.ExportOperation, error) { + return operations.NewExportOperation( + ctx, + r.Opts, + r.dataLayer, + store.NewWrapper(r.modelStore), + r.Provider, + r.Account, + model.StableID(backupID), + sel, + exportCfg, + r.Bus) +} diff --git a/src/pkg/repository/loadtest/repository_load_test.go b/src/pkg/repository/loadtest/repository_load_test.go index 9cfc38ffc..d65cb21e1 100644 --- a/src/pkg/repository/loadtest/repository_load_test.go +++ b/src/pkg/repository/loadtest/repository_load_test.go @@ -21,7 +21,6 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" - ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" @@ -111,7 +110,7 @@ func initM365Repo(t *testing.T) ( repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, repository.InitConfig{}) require.NoError(t, err, clues.ToCore(err)) return ctx, r, ac, st diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index 277eb1bba..539c3c3b1 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -6,31 +6,20 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" - "github.com/kopia/kopia/repo/manifest" "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/common/crash" - "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/events" "github.com/alcionai/corso/src/internal/kopia" - "github.com/alcionai/corso/src/internal/m365" - "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/operations" - "github.com/alcionai/corso/src/internal/streamstore" - "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/account" - "github.com/alcionai/corso/src/pkg/backup" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" - "github.com/alcionai/corso/src/pkg/count" - "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" - "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/store" ) @@ -42,48 +31,24 @@ var ( ErrorBackupNotFound = clues.New("no backup exists with that id") ) -// BackupGetter deals with retrieving metadata about backups from the -// repository. -type BackupGetter interface { - Backup(ctx context.Context, id string) (*backup.Backup, error) - Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) - BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) - GetBackupDetails( - ctx context.Context, - backupID string, - ) (*details.Details, *backup.Backup, *fault.Bus) - GetBackupErrors( - ctx context.Context, - backupID string, - ) (*fault.Errors, *backup.Backup, *fault.Bus) -} - type Repositoryer interface { - Initialize(ctx context.Context, retentionOpts ctrlRepo.Retention) error - Connect(ctx context.Context) error + Backuper + BackupGetter + Restorer + Exporter + DataProviderConnector + + Initialize( + ctx context.Context, + cfg InitConfig, + ) error + Connect( + ctx context.Context, + cfg ConnConfig, + ) error GetID() string Close(context.Context) error - NewBackup( - ctx context.Context, - self selectors.Selector, - ) (operations.BackupOperation, error) - NewBackupWithLookup( - ctx context.Context, - self selectors.Selector, - ins idname.Cacher, - ) (operations.BackupOperation, error) - NewRestore( - ctx context.Context, - backupID string, - sel selectors.Selector, - restoreCfg control.RestoreConfig, - ) (operations.RestoreOperation, error) - NewExport( - ctx context.Context, - backupID string, - sel selectors.Selector, - exportCfg control.ExportConfig, - ) (operations.ExportOperation, error) + NewMaintenance( ctx context.Context, mOpts ctrlRepo.Maintenance, @@ -92,14 +57,6 @@ type Repositoryer interface { ctx context.Context, rcOpts ctrlRepo.Retention, ) (operations.RetentionConfigOperation, error) - DeleteBackups(ctx context.Context, failOnMissing bool, ids ...string) error - BackupGetter - // ConnectToM365 establishes graph api connections - // and initializes api client configurations. - ConnectToM365( - ctx context.Context, - pst path.ServiceType, - ) (*m365.Controller, error) } // Repository contains storage provider information. @@ -108,9 +65,10 @@ type repository struct { CreatedAt time.Time Version string // in case of future breaking changes - Account account.Account // the user's m365 account connection details - Storage storage.Storage // the storage provider details and configuration - Opts control.Options + Account account.Account // the user's m365 account connection details + Storage storage.Storage // the storage provider details and configuration + Opts control.Options + Provider DataProvider // the client controller used for external user data CRUD Bus events.Eventer dataLayer *kopia.Wrapper @@ -125,7 +83,7 @@ func (r repository) GetID() string { func New( ctx context.Context, acct account.Account, - s storage.Storage, + st storage.Storage, opts control.Options, configFileRepoID string, ) (repo *repository, err error) { @@ -133,16 +91,16 @@ func New( ctx, "acct_provider", acct.Provider.String(), "acct_id", clues.Hide(acct.ID()), - "storage_provider", s.Provider.String()) + "storage_provider", st.Provider.String()) - bus, err := events.NewBus(ctx, s, acct.ID(), opts) + bus, err := events.NewBus(ctx, st, acct.ID(), opts) if err != nil { return nil, clues.Wrap(err, "constructing event bus").WithClues(ctx) } repoID := configFileRepoID if len(configFileRepoID) == 0 { - repoID = newRepoID(s) + repoID = newRepoID(st) } bus.SetRepoID(repoID) @@ -151,7 +109,7 @@ func New( ID: repoID, Version: "v1", Account: acct, - Storage: s, + Storage: st, Bus: bus, Opts: opts, } @@ -163,17 +121,22 @@ func New( return &r, nil } +type InitConfig struct { + // tells the data provider which service to + // use for its connection pattern. Optional. + Service path.ServiceType + RetentionOpts ctrlRepo.Retention +} + // Initialize will: -// - validate the m365 account & secrets // - connect to the m365 account to ensure communication capability -// - validate the provider config & secrets // - initialize the kopia repo with the provider and retention parameters // - update maintenance retention parameters as needed // - store the configuration details // - connect to the provider func (r *repository) Initialize( ctx context.Context, - retentionOpts ctrlRepo.Retention, + cfg InitConfig, ) (err error) { ctx = clues.Add( ctx, @@ -187,10 +150,14 @@ func (r *repository) Initialize( } }() + if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil { + return clues.Stack(err) + } + observe.Message(ctx, "Initializing repository") kopiaRef := kopia.NewConn(r.Storage) - if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts); err != nil { + if err := kopiaRef.Initialize(ctx, r.Opts.Repo, cfg.RetentionOpts); err != nil { // replace common internal errors so that sdk users can check results with errors.Is() if errors.Is(err, kopia.ErrorRepoAlreadyExists) { return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) @@ -221,12 +188,21 @@ func (r *repository) Initialize( return nil } +type ConnConfig struct { + // tells the data provider which service to + // use for its connection pattern. Leave empty + // to skip the provider connection. + Service path.ServiceType +} + // Connect will: -// - validate the m365 account details -// - connect to the m365 account to ensure communication capability +// - connect to the m365 account // - connect to the provider storage // - return the connected repository -func (r *repository) Connect(ctx context.Context) (err error) { +func (r *repository) Connect( + ctx context.Context, + cfg ConnConfig, +) (err error) { ctx = clues.Add( ctx, "acct_provider", r.Account.Provider.String(), @@ -239,6 +215,10 @@ func (r *repository) Connect(ctx context.Context) (err error) { } }() + if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil { + return clues.Stack(err) + } + observe.Message(ctx, "Connecting to repository") kopiaRef := kopia.NewConn(r.Storage) @@ -297,98 +277,6 @@ func (r *repository) Close(ctx context.Context) error { return nil } -// NewBackup generates a BackupOperation runner. -func (r repository) NewBackup( - ctx context.Context, - sel selectors.Selector, -) (operations.BackupOperation, error) { - return r.NewBackupWithLookup(ctx, sel, nil) -} - -// NewBackupWithLookup generates a BackupOperation runner. -// ownerIDToName and ownerNameToID are optional populations, in case the caller has -// already generated those values. -func (r repository) NewBackupWithLookup( - ctx context.Context, - sel selectors.Selector, - ins idname.Cacher, -) (operations.BackupOperation, error) { - ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts) - if err != nil { - return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365") - } - - ownerID, ownerName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins) - if err != nil { - return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details") - } - - // TODO: retrieve display name from gc - sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName) - - return operations.NewBackupOperation( - ctx, - r.Opts, - r.dataLayer, - store.NewWrapper(r.modelStore), - ctrl, - r.Account, - sel, - sel, // the selector acts as an IDNamer for its discrete resource owner. - r.Bus) -} - -// NewExport generates a exportOperation runner. -func (r repository) NewExport( - ctx context.Context, - backupID string, - sel selectors.Selector, - exportCfg control.ExportConfig, -) (operations.ExportOperation, error) { - ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts) - if err != nil { - return operations.ExportOperation{}, clues.Wrap(err, "connecting to m365") - } - - return operations.NewExportOperation( - ctx, - r.Opts, - r.dataLayer, - store.NewWrapper(r.modelStore), - ctrl, - r.Account, - model.StableID(backupID), - sel, - exportCfg, - r.Bus) -} - -// NewRestore generates a restoreOperation runner. -func (r repository) NewRestore( - ctx context.Context, - backupID string, - sel selectors.Selector, - restoreCfg control.RestoreConfig, -) (operations.RestoreOperation, error) { - ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts) - if err != nil { - return operations.RestoreOperation{}, clues.Wrap(err, "connecting to m365") - } - - return operations.NewRestoreOperation( - ctx, - r.Opts, - r.dataLayer, - store.NewWrapper(r.modelStore), - ctrl, - r.Account, - model.StableID(backupID), - sel, - restoreCfg, - r.Bus, - count.New()) -} - func (r repository) NewMaintenance( ctx context.Context, mOpts ctrlRepo.Maintenance, @@ -414,280 +302,6 @@ func (r repository) NewRetentionConfig( r.Bus) } -// Backup retrieves a backup by id. -func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) { - return getBackup(ctx, id, store.NewWrapper(r.modelStore)) -} - -// getBackup handles the processing for Backup. -func getBackup( - ctx context.Context, - id string, - sw store.BackupGetter, -) (*backup.Backup, error) { - b, err := sw.GetBackup(ctx, model.StableID(id)) - if err != nil { - return nil, errWrapper(err) - } - - return b, nil -} - -// Backups lists backups by ID. Returns as many backups as possible with -// errors for the backups it was unable to retrieve. -func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) { - var ( - bups []*backup.Backup - errs = fault.New(false) - sw = store.NewWrapper(r.modelStore) - ) - - for _, id := range ids { - ictx := clues.Add(ctx, "backup_id", id) - - b, err := sw.GetBackup(ictx, model.StableID(id)) - if err != nil { - errs.AddRecoverable(ctx, errWrapper(err)) - } - - bups = append(bups, b) - } - - return bups, errs -} - -// BackupsByTag lists all backups in a repository that contain all the tags -// specified. -func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) { - sw := store.NewWrapper(r.modelStore) - return backupsByTag(ctx, sw, fs) -} - -// backupsByTag returns all backups matching all provided tags. -// -// TODO(ashmrtn): This exists mostly for testing, but we could restructure the -// code in this file so there's a more elegant mocking solution. -func backupsByTag( - ctx context.Context, - sw store.BackupWrapper, - fs []store.FilterOption, -) ([]*backup.Backup, error) { - bs, err := sw.GetBackups(ctx, fs...) - if err != nil { - return nil, clues.Stack(err) - } - - // Filter out assist backup bases as they're considered incomplete and we - // haven't been displaying them before now. - res := make([]*backup.Backup, 0, len(bs)) - - for _, b := range bs { - if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup { - res = append(res, b) - } - } - - return res, nil -} - -// BackupDetails returns the specified backup.Details -func (r repository) GetBackupDetails( - ctx context.Context, - backupID string, -) (*details.Details, *backup.Backup, *fault.Bus) { - errs := fault.New(false) - - deets, bup, err := getBackupDetails( - ctx, - backupID, - r.Account.ID(), - r.dataLayer, - store.NewWrapper(r.modelStore), - errs) - - return deets, bup, errs.Fail(err) -} - -// getBackupDetails handles the processing for GetBackupDetails. -func getBackupDetails( - ctx context.Context, - backupID, tenantID string, - kw *kopia.Wrapper, - sw store.BackupGetter, - errs *fault.Bus, -) (*details.Details, *backup.Backup, error) { - b, err := sw.GetBackup(ctx, model.StableID(backupID)) - if err != nil { - return nil, nil, errWrapper(err) - } - - ssid := b.StreamStoreID - if len(ssid) == 0 { - ssid = b.DetailsID - } - - if len(ssid) == 0 { - return nil, b, clues.New("no streamstore id in backup").WithClues(ctx) - } - - var ( - sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) - deets details.Details - ) - - err = sstore.Read( - ctx, - ssid, - streamstore.DetailsReader(details.UnmarshalTo(&deets)), - errs) - if err != nil { - return nil, nil, err - } - - // Retroactively fill in isMeta information for items in older - // backup versions without that info - // version.Restore2 introduces the IsMeta flag, so only v1 needs a check. - if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker { - for _, d := range deets.Entries { - if d.OneDrive != nil { - d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef) - } - } - } - - deets.DetailsModel = deets.FilterMetaFiles() - - return &deets, b, nil -} - -// BackupErrors returns the specified backup's fault.Errors -func (r repository) GetBackupErrors( - ctx context.Context, - backupID string, -) (*fault.Errors, *backup.Backup, *fault.Bus) { - errs := fault.New(false) - - fe, bup, err := getBackupErrors( - ctx, - backupID, - r.Account.ID(), - r.dataLayer, - store.NewWrapper(r.modelStore), - errs) - - return fe, bup, errs.Fail(err) -} - -// getBackupErrors handles the processing for GetBackupErrors. -func getBackupErrors( - ctx context.Context, - backupID, tenantID string, - kw *kopia.Wrapper, - sw store.BackupGetter, - errs *fault.Bus, -) (*fault.Errors, *backup.Backup, error) { - b, err := sw.GetBackup(ctx, model.StableID(backupID)) - if err != nil { - return nil, nil, errWrapper(err) - } - - ssid := b.StreamStoreID - if len(ssid) == 0 { - return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx) - } - - var ( - sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService()) - fe fault.Errors - ) - - err = sstore.Read( - ctx, - ssid, - streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)), - errs) - if err != nil { - return nil, nil, err - } - - return &fe, b, nil -} - -// DeleteBackups removes the backups from both the model store and the backup -// storage. -// -// If failOnMissing is true then returns an error if a backup model can't be -// found. Otherwise ignores missing backup models. -// -// Missing models or snapshots during the actual deletion do not cause errors. -// -// All backups are delete as an atomic unit so any failures will result in no -// deletions. -func (r repository) DeleteBackups( - ctx context.Context, - failOnMissing bool, - ids ...string, -) error { - return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...) -} - -// deleteBackup handles the processing for backup deletion. -func deleteBackups( - ctx context.Context, - sw store.BackupGetterModelDeleter, - failOnMissing bool, - ids ...string, -) error { - // Although we haven't explicitly stated it, snapshots are technically - // manifests in kopia. This means we can use the same delete API to remove - // them and backup models. Deleting all of them together gives us both - // atomicity guarantees (around when data will be flushed) and helps reduce - // the number of manifest blobs that kopia will create. - var toDelete []manifest.ID - - for _, id := range ids { - b, err := sw.GetBackup(ctx, model.StableID(id)) - if err != nil { - if !failOnMissing && errors.Is(err, data.ErrNotFound) { - continue - } - - return clues.Stack(errWrapper(err)). - WithClues(ctx). - With("delete_backup_id", id) - } - - toDelete = append(toDelete, b.ModelStoreID) - - if len(b.SnapshotID) > 0 { - toDelete = append(toDelete, manifest.ID(b.SnapshotID)) - } - - ssid := b.StreamStoreID - if len(ssid) == 0 { - ssid = b.DetailsID - } - - if len(ssid) > 0 { - toDelete = append(toDelete, manifest.ID(ssid)) - } - } - - return sw.DeleteWithModelStoreIDs(ctx, toDelete...) -} - -func (r repository) ConnectToM365( - ctx context.Context, - pst path.ServiceType, -) (*m365.Controller, error) { - ctrl, err := connectToM365(ctx, pst, r.Account, r.Opts) - if err != nil { - return nil, clues.Wrap(err, "connecting to m365") - } - - return ctrl, nil -} - // --------------------------------------------------------------------------- // Repository ID Model // --------------------------------------------------------------------------- @@ -736,29 +350,6 @@ func newRepoID(s storage.Storage) string { // helpers // --------------------------------------------------------------------------- -var m365nonce bool - -func connectToM365( - ctx context.Context, - pst path.ServiceType, - acct account.Account, - co control.Options, -) (*m365.Controller, error) { - if !m365nonce { - m365nonce = true - - progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365") - defer close(progressBar) - } - - ctrl, err := m365.NewController(ctx, acct, pst, co) - if err != nil { - return nil, err - } - - return ctrl, nil -} - func errWrapper(err error) error { if errors.Is(err, data.ErrNotFound) { return clues.Stack(ErrorBackupNotFound, err) diff --git a/src/pkg/repository/repository_test.go b/src/pkg/repository/repository_test.go index c276f35f5..97456fe70 100644 --- a/src/pkg/repository/repository_test.go +++ b/src/pkg/repository/repository_test.go @@ -17,6 +17,7 @@ import ( ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/extensions" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/storage" @@ -69,7 +70,7 @@ func (suite *RepositoryUnitSuite) TestInitialize() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) test.errCheck(t, err, clues.ToCore(err)) }) } @@ -85,12 +86,12 @@ func (suite *RepositoryUnitSuite) TestConnect() { errCheck assert.ErrorAssertionFunc }{ { - storage.ProviderUnknown.String(), - func() (storage.Storage, error) { + name: storage.ProviderUnknown.String(), + storage: func() (storage.Storage, error) { return storage.NewStorage(storage.ProviderUnknown) }, - account.Account{}, - assert.Error, + account: account.Account{}, + errCheck: assert.Error, }, } for _, test := range table { @@ -111,7 +112,7 @@ func (suite *RepositoryUnitSuite) TestConnect() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) test.errCheck(t, err, clues.ToCore(err)) }) } @@ -136,12 +137,13 @@ func TestRepositoryIntegrationSuite(t *testing.T) { func (suite *RepositoryIntegrationSuite) TestInitialize() { table := []struct { name string - account account.Account + account func(*testing.T) account.Account storage func(tester.TestT) storage.Storage errCheck assert.ErrorAssertionFunc }{ { name: "success", + account: tconfig.NewM365Account, storage: storeTD.NewPrefixedS3Storage, errCheck: assert.NoError, }, @@ -156,13 +158,13 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() { st := test.storage(t) r, err := New( ctx, - test.account, + test.account(t), st, control.DefaultOptions(), NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) if err == nil { defer func() { err := r.Close(ctx) @@ -204,7 +206,7 @@ func (suite *RepositoryIntegrationSuite) TestInitializeWithRole() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err) defer func() { @@ -218,21 +220,23 @@ func (suite *RepositoryIntegrationSuite) TestConnect() { ctx, flush := tester.NewContext(t) defer flush() + acct := tconfig.NewM365Account(t) + // need to initialize the repository before we can test connecting to it. st := storeTD.NewPrefixedS3Storage(t) r, err := New( ctx, - account.Account{}, + acct, st, control.DefaultOptions(), NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) // now re-connect - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) assert.NoError(t, err, clues.ToCore(err)) } @@ -242,17 +246,19 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { ctx, flush := tester.NewContext(t) defer flush() + acct := tconfig.NewM365Account(t) + // need to initialize the repository before we can test connecting to it. st := storeTD.NewPrefixedS3Storage(t) r, err := New( ctx, - account.Account{}, + acct, st, control.DefaultOptions(), NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) oldID := r.GetID() @@ -261,7 +267,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { require.NoError(t, err, clues.ToCore(err)) // now re-connect - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, oldID, r.GetID()) } @@ -284,7 +290,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackup() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + // service doesn't matter here, we just need a valid value. + err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService}) require.NoError(t, err, clues.ToCore(err)) userID := tconfig.M365UserID(t) @@ -313,7 +320,7 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() { "") require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) ro, err := r.NewRestore( @@ -343,7 +350,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackupAndDelete() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + // service doesn't matter here, we just need a valid value. + err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService}) require.NoError(t, err, clues.ToCore(err)) userID := tconfig.M365UserID(t) @@ -396,7 +404,7 @@ func (suite *RepositoryIntegrationSuite) TestNewMaintenance() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err, clues.ToCore(err)) mo, err := r.NewMaintenance(ctx, ctrlRepo.Maintenance{}) @@ -465,11 +473,11 @@ func (suite *RepositoryIntegrationSuite) Test_Options() { NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = r.Initialize(ctx, InitConfig{}) require.NoError(t, err) assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory)) - err = r.Connect(ctx) + err = r.Connect(ctx, ConnConfig{}) assert.NoError(t, err) assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory)) }) diff --git a/src/pkg/repository/restores.go b/src/pkg/repository/restores.go new file mode 100644 index 000000000..6fe121e76 --- /dev/null +++ b/src/pkg/repository/restores.go @@ -0,0 +1,42 @@ +package repository + +import ( + "context" + + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/operations" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/count" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/store" +) + +type Restorer interface { + NewRestore( + ctx context.Context, + backupID string, + sel selectors.Selector, + restoreCfg control.RestoreConfig, + ) (operations.RestoreOperation, error) +} + +// NewRestore generates a restoreOperation runner. +func (r repository) NewRestore( + ctx context.Context, + backupID string, + sel selectors.Selector, + restoreCfg control.RestoreConfig, +) (operations.RestoreOperation, error) { + return operations.NewRestoreOperation( + ctx, + r.Opts, + r.dataLayer, + store.NewWrapper(r.modelStore), + r.Provider, + r.Account, + model.StableID(backupID), + sel, + restoreCfg, + r.Bus, + count.New()) +} diff --git a/src/pkg/services/m365/api/access.go b/src/pkg/services/m365/api/access.go new file mode 100644 index 000000000..956f9db05 --- /dev/null +++ b/src/pkg/services/m365/api/access.go @@ -0,0 +1,68 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/alcionai/clues" + + "github.com/alcionai/corso/src/internal/m365/graph" +) + +// --------------------------------------------------------------------------- +// controller +// --------------------------------------------------------------------------- + +func (c Client) Access() Access { + return Access{c} +} + +// Access is an interface-compliant provider of the client. +type Access struct { + Client +} + +// GetToken retrieves a m365 application auth token using client id and secret credentials. +// This token is not normally needed in order for corso to function, and is implemented +// primarily as a way to exercise the validity of those credentials without need of specific +// permissions. +func (c Access) GetToken( + ctx context.Context, +) error { + var ( + //nolint:lll + // https://learn.microsoft.com/en-us/graph/connecting-external-content-connectors-api-postman#step-5-get-an-authentication-token + rawURL = fmt.Sprintf( + "https://login.microsoftonline.com/%s/oauth2/v2.0/token", + c.Credentials.AzureTenantID) + headers = map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + } + body = strings.NewReader(fmt.Sprintf( + "client_id=%s"+ + "&client_secret=%s"+ + "&scope=https://graph.microsoft.com/.default"+ + "&grant_type=client_credentials", + c.Credentials.AzureClientID, + c.Credentials.AzureClientSecret)) + ) + + resp, err := c.Post(ctx, rawURL, headers, body) + if err != nil { + return graph.Stack(ctx, err) + } + + if resp.StatusCode == http.StatusBadRequest { + return clues.New("incorrect tenant or application parameters") + } + + if resp.StatusCode/100 == 4 || resp.StatusCode/100 == 5 { + return clues.New("non-2xx response: " + resp.Status) + } + + defer resp.Body.Close() + + return nil +} diff --git a/src/pkg/services/m365/api/access_test.go b/src/pkg/services/m365/api/access_test.go new file mode 100644 index 000000000..c903fcde1 --- /dev/null +++ b/src/pkg/services/m365/api/access_test.go @@ -0,0 +1,122 @@ +package api_test + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +type AccessAPIIntgSuite struct { + tester.Suite + its intgTesterSetup +} + +func TestAccessAPIIntgSuite(t *testing.T) { + suite.Run(t, &AccessAPIIntgSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *AccessAPIIntgSuite) SetupSuite() { + suite.its = newIntegrationTesterSetup(suite.T()) +} + +func (suite *AccessAPIIntgSuite) TestGetToken() { + tests := []struct { + name string + creds func() account.M365Config + expectErr require.ErrorAssertionFunc + }{ + { + name: "good", + creds: func() account.M365Config { return suite.its.ac.Credentials }, + expectErr: require.NoError, + }, + { + name: "bad tenant ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureTenantID = "ZIM" + + return creds + }, + expectErr: require.Error, + }, + { + name: "missing tenant ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureTenantID = "" + + return creds + }, + expectErr: require.Error, + }, + { + name: "bad client ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientID = "GIR" + + return creds + }, + expectErr: require.Error, + }, + { + name: "missing client ID", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientID = "" + + return creds + }, + expectErr: require.Error, + }, + { + name: "bad client secret", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientSecret = "MY TALLEST" + + return creds + }, + expectErr: require.Error, + }, + { + name: "missing client secret", + creds: func() account.M365Config { + creds := suite.its.ac.Credentials + creds.AzureClientSecret = "" + + return creds + }, + expectErr: require.Error, + }, + } + for _, test := range tests { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + ac, err := api.NewClient(suite.its.ac.Credentials, control.DefaultOptions()) + require.NoError(t, err, clues.ToCore(err)) + + ac.Credentials = test.creds() + + err = ac.Access().GetToken(ctx) + test.expectErr(t, err, clues.ToCore(err)) + }) + } +} diff --git a/src/pkg/services/m365/api/client.go b/src/pkg/services/m365/api/client.go index a3f1fcee7..a0d90eb46 100644 --- a/src/pkg/services/m365/api/client.go +++ b/src/pkg/services/m365/api/client.go @@ -2,6 +2,7 @@ package api import ( "context" + "io" "net/http" "github.com/alcionai/clues" @@ -119,6 +120,16 @@ func (c Client) Get( return c.Requester.Request(ctx, http.MethodGet, url, nil, headers) } +// Get performs an ad-hoc get request using its graph.Requester +func (c Client) Post( + ctx context.Context, + url string, + headers map[string]string, + body io.Reader, +) (*http.Response, error) { + return c.Requester.Request(ctx, http.MethodGet, url, body, headers) +} + // --------------------------------------------------------------------------- // per-call config // --------------------------------------------------------------------------- From 363cbca86fd4ff0d73369be09ebca5725c17338f Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Mon, 2 Oct 2023 18:47:15 +0530 Subject: [PATCH 18/26] Backup/Restore/Export multiples sites in Groups (#4344) I've updated the Team used in the CI to include private and shared channels. Sanity tests should ideally do the e2e tests for multi site backups. --- #### Does this PR need a docs update or release note? - [x] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- CHANGELOG.md | 1 + src/cli/flags/testdata/backup_list.go | 3 +- src/internal/m365/graph/errors.go | 5 + src/internal/m365/graph/errors_test.go | 45 +++++++ src/internal/m365/restore.go | 1 + src/internal/m365/service/groups/backup.go | 71 ++++++----- src/internal/m365/service/groups/restore.go | 79 +++++++++--- .../m365/service/groups/restore_test.go | 114 ++++++++++++++++++ src/internal/operations/test/onedrive_test.go | 16 ++- src/pkg/services/m365/api/groups.go | 86 ++++++++++++- src/pkg/services/m365/api/groups_test.go | 27 +++++ src/pkg/services/m365/api/sites.go | 2 + 12 files changed, 393 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e8c12dfd..5e4fca312 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes. - Increase Exchange backup performance by lazily fetching data only for items whose content changed. - Added `--backups` flag to delete multiple backups in `corso backup delete` command. +- Backup now includes all sites that belongs to a team, not just the root site. ## Fixed - Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup. diff --git a/src/cli/flags/testdata/backup_list.go b/src/cli/flags/testdata/backup_list.go index 911a6b450..82b08646f 100644 --- a/src/cli/flags/testdata/backup_list.go +++ b/src/cli/flags/testdata/backup_list.go @@ -3,9 +3,10 @@ package testdata import ( "testing" - "github.com/alcionai/corso/src/cli/flags" "github.com/spf13/cobra" "gotest.tools/v3/assert" + + "github.com/alcionai/corso/src/cli/flags" ) func PreparedBackupListFlags() []string { diff --git a/src/internal/m365/graph/errors.go b/src/internal/m365/graph/errors.go index f5c7824ab..6a758977e 100644 --- a/src/internal/m365/graph/errors.go +++ b/src/internal/m365/graph/errors.go @@ -70,6 +70,7 @@ const ( NoSPLicense errorMessage = "Tenant does not have a SPO license" parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request" usersCannotBeResolved errorMessage = "One or more users could not be resolved" + requestedSiteCouldNotBeFound errorMessage = "Requested site could not be found" ) const ( @@ -259,6 +260,10 @@ func IsErrUsersCannotBeResolved(err error) bool { return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved) } +func IsErrSiteNotFound(err error) bool { + return hasErrorMessage(err, requestedSiteCouldNotBeFound) +} + // --------------------------------------------------------------------------- // error parsers // --------------------------------------------------------------------------- diff --git a/src/internal/m365/graph/errors_test.go b/src/internal/m365/graph/errors_test.go index cf9f2f99d..cd0057fda 100644 --- a/src/internal/m365/graph/errors_test.go +++ b/src/internal/m365/graph/errors_test.go @@ -628,6 +628,51 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUsersCannotBeResolved() { } } +func (suite *GraphErrorsUnitSuite) TestIsErrSiteCouldNotBeFound() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "non-matching oDataErr", + err: odErrMsg("InvalidRequest", "cant resolve sites"), + expect: assert.False, + }, + { + name: "matching oDataErr msg", + err: odErrMsg("InvalidRequest", string(requestedSiteCouldNotBeFound)), + expect: assert.True, + }, + // next two tests are to make sure the checks are case insensitive + { + name: "oDataErr uppercase", + err: odErrMsg("InvalidRequest", strings.ToUpper(string(requestedSiteCouldNotBeFound))), + expect: assert.True, + }, + { + name: "oDataErr lowercase", + err: odErrMsg("InvalidRequest", strings.ToLower(string(requestedSiteCouldNotBeFound))), + expect: assert.True, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + test.expect(suite.T(), IsErrSiteNotFound(test.err)) + }) + } +} + func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() { table := []struct { name string diff --git a/src/internal/m365/restore.go b/src/internal/m365/restore.go index 616fd6f2b..d6237c072 100644 --- a/src/internal/m365/restore.go +++ b/src/internal/m365/restore.go @@ -84,6 +84,7 @@ func (ctrl *Controller) ConsumeRestoreCollections( rcc, ctrl.AC, ctrl.backupDriveIDNames, + ctrl.backupSiteIDWebURL, dcs, deets, errs, diff --git a/src/internal/m365/service/groups/backup.go b/src/internal/m365/service/groups/backup.go index 7dbbf8e13..1943b8fb4 100644 --- a/src/internal/m365/service/groups/backup.go +++ b/src/internal/m365/service/groups/backup.go @@ -79,10 +79,7 @@ func ProduceBackupCollections( switch scope.Category().PathType() { case path.LibrariesCategory: - // TODO(meain): Private channels get a separate SharePoint - // site. We should also back those up and not just the - // default one. - resp, err := ac.Groups().GetRootSite(ctx, bpc.ProtectedResource.ID()) + sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs) if err != nil { return nil, nil, false, err } @@ -95,39 +92,47 @@ func ProduceBackupCollections( siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c) } - pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName())) - sbpc := inject.BackupProducerConfig{ - LastBackupVersion: bpc.LastBackupVersion, - Options: bpc.Options, - ProtectedResource: pr, - Selector: bpc.Selector, - MetadataCollections: siteMetadataCollection[ptr.Val(resp.GetId())], - } + for _, s := range sites { + pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetName())) + sbpc := inject.BackupProducerConfig{ + LastBackupVersion: bpc.LastBackupVersion, + Options: bpc.Options, + ProtectedResource: pr, + Selector: bpc.Selector, + MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())], + } - bh := drive.NewGroupBackupHandler( - bpc.ProtectedResource.ID(), - ptr.Val(resp.GetId()), - ac.Drives(), - scope) + bh := drive.NewGroupBackupHandler( + bpc.ProtectedResource.ID(), + ptr.Val(s.GetId()), + ac.Drives(), + scope) - cp, err := bh.SitePathPrefix(creds.AzureTenantID) - if err != nil { - return nil, nil, false, clues.Wrap(err, "getting canonical path") - } + cp, err := bh.SitePathPrefix(creds.AzureTenantID) + if err != nil { + return nil, nil, false, clues.Wrap(err, "getting canonical path") + } - sitesPreviousPaths[ptr.Val(resp.GetId())] = cp.String() + sitesPreviousPaths[ptr.Val(s.GetId())] = cp.String() - dbcs, canUsePreviousBackup, err = site.CollectLibraries( - ctx, - sbpc, - bh, - creds.AzureTenantID, - ssmb, - su, - errs) - if err != nil { - el.AddRecoverable(ctx, err) - continue + cs, cupb, err := site.CollectLibraries( + ctx, + sbpc, + bh, + creds.AzureTenantID, + ssmb, + su, + errs) + if err != nil { + el.AddRecoverable(ctx, err) + continue + } + + dbcs = append(dbcs, cs...) + + // FIXME(meain): This can cause incorrect backup + // https://github.com/alcionai/corso/issues/4371 + canUsePreviousBackup = canUsePreviousBackup || cupb } case path.ChannelMessagesCategory: diff --git a/src/internal/m365/service/groups/restore.go b/src/internal/m365/service/groups/restore.go index 9a94a921b..fc09088e4 100644 --- a/src/internal/m365/service/groups/restore.go +++ b/src/internal/m365/service/groups/restore.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/m365/collection/drive" + "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/pkg/backup/details" @@ -29,24 +30,20 @@ func ConsumeRestoreCollections( rcc inject.RestoreConsumerConfig, ac api.Client, backupDriveIDNames idname.Cacher, + backupSiteIDWebURL idname.Cacher, dcs []data.RestoreCollection, deets *details.Builder, errs *fault.Bus, ctr *count.Bus, ) (*support.ControllerOperationStatus, error) { var ( - restoreMetrics support.CollectionMetrics - caches = drive.NewRestoreCaches(backupDriveIDNames) - lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService()) - el = errs.Local() + restoreMetrics support.CollectionMetrics + caches = drive.NewRestoreCaches(backupDriveIDNames) + lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService()) + el = errs.Local() + webURLToSiteNames = map[string]string{} ) - // TODO: uncomment when a handler is available - // err := caches.Populate(ctx, lrh, rcc.ProtectedResource.ID()) - // if err != nil { - // return nil, clues.Wrap(err, "initializing restore caches") - // } - // Reorder collections so that the parents directories are created // before the child directories; a requirement for permissions. data.SortRestoreCollections(dcs) @@ -59,7 +56,7 @@ func ConsumeRestoreCollections( var ( err error - resp models.Siteable + siteName string category = dc.FullPath().Category() metrics support.CollectionMetrics ictx = clues.Add(ctx, @@ -71,16 +68,25 @@ func ConsumeRestoreCollections( switch dc.FullPath().Category() { case path.LibrariesCategory: - // TODO(meain): As of now we only restore the root site - // and that too to whatever is currently the root site of the - // group and not the original one. Not sure if the - // original can be changed. - resp, err = ac.Groups().GetRootSite(ctx, rcc.ProtectedResource.ID()) - if err != nil { - return nil, err + siteID := dc.FullPath().Folders()[1] + + webURL, ok := backupSiteIDWebURL.NameOf(siteID) + if !ok { + // This should not happen, but just in case + logger.Ctx(ctx).With("site_id", siteID).Info("site weburl not found, using site id") } - pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName())) + siteName, err = getSiteName(ctx, siteID, webURL, ac.Sites(), webURLToSiteNames) + if err != nil { + el.AddRecoverable(ctx, clues.Wrap(err, "getting site"). + With("web_url", webURL, "site_id", siteID)) + } else if len(siteName) == 0 { + // Site was deleted in between and restore and is not + // available anymore. + continue + } + + pr := idname.NewProvider(siteID, siteName) srcc := inject.RestoreConsumerConfig{ BackupVersion: rcc.BackupVersion, Options: rcc.Options, @@ -133,3 +139,38 @@ func ConsumeRestoreCollections( return status, el.Failure() } + +func getSiteName( + ctx context.Context, + siteID string, + webURL string, + ac api.GetByIDer[models.Siteable], + webURLToSiteNames map[string]string, +) (string, error) { + siteName, ok := webURLToSiteNames[webURL] + if ok { + return siteName, nil + } + + site, err := ac.GetByID(ctx, siteID, api.CallConfig{}) + if err != nil { + webURLToSiteNames[webURL] = "" + + if graph.IsErrSiteNotFound(err) { + // TODO(meain): Should we surface this to the user somehow? + // In case a site that we had previously backed up was + // deleted, skip that site with a warning. + logger.Ctx(ctx).With("web_url", webURL, "site_id", siteID). + Info("Site does not exist, skipping restore.") + + return "", nil + } + + return "", err + } + + siteName = ptr.Val(site.GetDisplayName()) + webURLToSiteNames[webURL] = siteName + + return siteName, nil +} diff --git a/src/internal/m365/service/groups/restore_test.go b/src/internal/m365/service/groups/restore_test.go index d87000fc5..262bc3159 100644 --- a/src/internal/m365/service/groups/restore_test.go +++ b/src/internal/m365/service/groups/restore_test.go @@ -7,12 +7,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data/mock" + "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/operations/inject" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" @@ -52,9 +57,118 @@ func (suite *GroupsUnitSuite) TestConsumeRestoreCollections_noErrorOnGroups() { rcc, api.Client{}, idname.NewCache(map[string]string{}), + idname.NewCache(map[string]string{}), dcs, nil, fault.New(false), nil) assert.NoError(t, err, "Groups Channels restore") } + +type groupsIntegrationSuite struct { + tester.Suite + resource string + tenantID string + ac api.Client +} + +func TestGroupsIntegrationSuite(t *testing.T) { + suite.Run(t, &groupsIntegrationSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *groupsIntegrationSuite) SetupSuite() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + suite.resource = tconfig.M365TeamID(t) + + acct := tconfig.NewM365Account(t) + creds, err := acct.M365Config() + require.NoError(t, err, clues.ToCore(err)) + + suite.ac, err = api.NewClient(creds, control.DefaultOptions()) + require.NoError(t, err, clues.ToCore(err)) + + suite.tenantID = creds.AzureTenantID +} + +// test for getSiteName +func (suite *groupsIntegrationSuite) TestGetSiteName() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + rootSite, err := suite.ac.Groups().GetRootSite(ctx, suite.resource) + require.NoError(t, err, clues.ToCore(err)) + + // Generate a fake site ID that appears valid to graph API but doesn't actually exist. + // This "could" be flaky, but highly unlikely + unavailableSiteID := []rune(ptr.Val(rootSite.GetId())) + firstIDChar := slices.Index(unavailableSiteID, ',') + 1 + + if unavailableSiteID[firstIDChar] != '2' { + unavailableSiteID[firstIDChar] = '2' + } else { + unavailableSiteID[firstIDChar] = '1' + } + + tests := []struct { + name string + siteID string + webURL string + siteName string + webURLToSiteNames map[string]string + expectErr assert.ErrorAssertionFunc + }{ + { + name: "valid", + siteID: ptr.Val(rootSite.GetId()), + webURL: ptr.Val(rootSite.GetWebUrl()), + siteName: *rootSite.GetDisplayName(), + webURLToSiteNames: map[string]string{}, + expectErr: assert.NoError, + }, + { + name: "unavailable", + siteID: string(unavailableSiteID), + webURL: "https://does-not-matter", + siteName: "", + webURLToSiteNames: map[string]string{}, + expectErr: assert.NoError, + }, + { + name: "previously found", + siteID: "random-id", + webURL: "https://random-url", + siteName: "random-name", + webURLToSiteNames: map[string]string{"https://random-url": "random-name"}, + expectErr: assert.NoError, + }, + } + + for _, test := range tests { + suite.Run(test.name, func() { + t := suite.T() + + siteName, err := getSiteName( + ctx, + test.siteID, + test.webURL, + suite.ac.Sites(), + test.webURLToSiteNames) + require.NoError(t, err, clues.ToCore(err)) + + test.expectErr(t, err) + assert.Equal(t, test.siteName, siteName) + }) + } +} diff --git a/src/internal/operations/test/onedrive_test.go b/src/internal/operations/test/onedrive_test.go index 8b4ac9b81..6e53566c9 100644 --- a/src/internal/operations/test/onedrive_test.go +++ b/src/internal/operations/test/onedrive_test.go @@ -762,11 +762,10 @@ func runDriveIncrementalTest( true) // do some additional checks to ensure the incremental dealt with fewer items. - // +2 on read/writes to account for metadata: 1 delta and 1 path. var ( - expectWrites = test.itemsWritten + 2 + expectWrites = test.itemsWritten expectNonMetaWrites = test.nonMetaItemsWritten - expectReads = test.itemsRead + 2 + expectReads = test.itemsRead assertReadWrite = assert.Equal ) @@ -775,6 +774,17 @@ func runDriveIncrementalTest( // /libraries/sites/previouspath expectWrites++ expectReads++ + + // +2 on read/writes to account for metadata: 1 delta and 1 path (for each site) + sites, err := ac.Groups().GetAllSites(ctx, owner, fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + + expectWrites += len(sites) * 2 + expectReads += len(sites) * 2 + } else { + // +2 on read/writes to account for metadata: 1 delta and 1 path. + expectWrites += 2 + expectReads += 2 } // Sharepoint can produce a superset of permissions by nature of diff --git a/src/pkg/services/m365/api/groups.go b/src/pkg/services/m365/api/groups.go index 2aacdedf0..b6223dbdd 100644 --- a/src/pkg/services/m365/api/groups.go +++ b/src/pkg/services/m365/api/groups.go @@ -3,6 +3,8 @@ package api import ( "context" "fmt" + "net/url" + "strings" "github.com/alcionai/clues" msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" @@ -154,6 +156,88 @@ func (c Groups) GetByID( return group, nil } +// GetAllSites gets all the sites that belong to a group. This is +// necessary as private and shared channels gets their on individual +// sites. All the other channels make use of the root site. +func (c Groups) GetAllSites( + ctx context.Context, + identifier string, + errs *fault.Bus, +) ([]models.Siteable, error) { + el := errs.Local() + + root, err := c.GetRootSite(ctx, identifier) + if err != nil { + return nil, clues.Wrap(err, "getting root site"). + With("group_id", identifier) + } + + sites := []models.Siteable{root} + + channels, err := Channels(c).GetChannels(ctx, identifier) + if err != nil { + return nil, clues.Wrap(err, "getting channels") + } + + service, err := c.Service() + if err != nil { + return nil, graph.Stack(ctx, err) + } + + for _, ch := range channels { + if ptr.Val(ch.GetMembershipType()) == models.STANDARD_CHANNELMEMBERSHIPTYPE { + // Standard channels use root site + continue + } + + ictx := clues.Add( + ctx, + "channel_id", + ptr.Val(ch.GetId()), + "channel_name", + clues.Hide(ptr.Val(ch.GetDisplayName()))) + + resp, err := service. + Client(). + Teams(). + ByTeamId(identifier). + Channels(). + ByChannelId(ptr.Val(ch.GetId())). + FilesFolder(). + Get(ictx, nil) + if err != nil { + return nil, clues.Wrap(err, "getting files folder for channel"). + WithClues(ictx) + } + + // WebURL returned here is the url to the documents folder, we + // have to trim that out to get the actual site's webURL + // https://example.sharepoint.com/sites//Shared%20Documents/ + documentWebURL := ptr.Val(resp.GetWebUrl()) + + u, err := url.Parse(documentWebURL) + if err != nil { + return nil, clues.Wrap(err, "parsing document web url"). + WithClues(ictx) + } + + pathSegments := strings.Split(u.Path, "/") // pathSegments[0] == "" + siteWebURL := fmt.Sprintf("%s://%s/%s/%s", u.Scheme, u.Host, pathSegments[1], pathSegments[2]) + + ictx = clues.Add(ictx, "document_web_url", documentWebURL, "site_web_url", siteWebURL) + + site, err := Sites(c).GetByID(ictx, siteWebURL, CallConfig{}) + if err != nil { + el.AddRecoverable(ctx, clues.Wrap(err, "getting site")) + continue + } + + sites = append(sites, site) + } + + return sites, el.Failure() +} + func (c Groups) GetRootSite( ctx context.Context, identifier string, @@ -171,7 +255,7 @@ func (c Groups) GetRootSite( BySiteId("root"). Get(ctx, nil) if err != nil { - return nil, clues.Wrap(err, "getting root site for group") + return nil, graph.Stack(ctx, err) } return resp, graph.Stack(ctx, err).OrNil() diff --git a/src/pkg/services/m365/api/groups_test.go b/src/pkg/services/m365/api/groups_test.go index b60240cff..213bb5d81 100644 --- a/src/pkg/services/m365/api/groups_test.go +++ b/src/pkg/services/m365/api/groups_test.go @@ -110,6 +110,33 @@ func (suite *GroupsIntgSuite) TestGetAll() { require.NotZero(t, len(groups), "must have at least one group") } +func (suite *GroupsIntgSuite) TestGetAllSites() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + channels, err := suite.its.ac. + Channels().GetChannels(ctx, suite.its.group.id) + require.NoError(t, err, "getting channels") + require.NotZero(t, len(channels), "must have at least one channel") + + siteCount := 1 + + for _, c := range channels { + if ptr.Val(c.GetMembershipType()) != models.STANDARD_CHANNELMEMBERSHIPTYPE { + siteCount++ + } + } + + sites, err := suite.its.ac. + Groups(). + GetAllSites(ctx, suite.its.group.id, fault.New(true)) + require.NoError(t, err) + require.NotZero(t, len(sites), "must have at least one site") + require.Equal(t, siteCount, len(sites), "incorrect number of sites") +} + func (suite *GroupsIntgSuite) TestGroups_GetByID() { t := suite.T() diff --git a/src/pkg/services/m365/api/sites.go b/src/pkg/services/m365/api/sites.go index 0865a4f47..813f1c1fa 100644 --- a/src/pkg/services/m365/api/sites.go +++ b/src/pkg/services/m365/api/sites.go @@ -142,6 +142,8 @@ func (c Sites) GetByID( options.QueryParameters.Expand = cc.Expand } + // NOTE: `/sites` sends `displayName` as name, but + // `/sites/` send base of `webURL` as name resp, err = c.Stable. Client(). Sites(). From 8bdef88a8bb7e8d155795d1770e99548e00dd1ee Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Mon, 2 Oct 2023 20:03:15 +0530 Subject: [PATCH 19/26] Return tombstone instead of canUsePreviousBackup (#4394) This is necessary to fix the correctness of the backup. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * fixes https://github.com/alcionai/corso/issues/4371 #### Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test - [x] :green_heart: E2E --- src/internal/m365/backup.go | 6 +- src/internal/m365/backup_test.go | 123 ++++++++++++++++++ .../m365/collection/drive/collections.go | 9 +- .../m365/collection/drive/collections_test.go | 18 +-- .../m365/collection/groups/channel_handler.go | 9 ++ src/internal/m365/service/groups/backup.go | 59 +++++---- 6 files changed, 186 insertions(+), 38 deletions(-) diff --git a/src/internal/m365/backup.go b/src/internal/m365/backup.go index f916c7257..7b54c36cb 100644 --- a/src/internal/m365/backup.go +++ b/src/internal/m365/backup.go @@ -100,7 +100,7 @@ func (ctrl *Controller) ProduceBackupCollections( } case path.GroupsService: - colls, ssmb, canUsePreviousBackup, err = groups.ProduceBackupCollections( + colls, ssmb, err = groups.ProduceBackupCollections( ctx, bpc, ctrl.AC, @@ -111,6 +111,10 @@ func (ctrl *Controller) ProduceBackupCollections( return nil, nil, false, err } + // canUsePreviousBacukp can be always returned true for groups as we + // return a tombstone collection in case the metadata read fails + canUsePreviousBackup = true + default: return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx) } diff --git a/src/internal/m365/backup_test.go b/src/internal/m365/backup_test.go index 88708aa14..f7e51f89d 100644 --- a/src/internal/m365/backup_test.go +++ b/src/internal/m365/backup_test.go @@ -11,6 +11,9 @@ import ( "github.com/stretchr/testify/suite" inMock "github.com/alcionai/corso/src/internal/common/idname/mock" + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/service/exchange" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" "github.com/alcionai/corso/src/internal/m365/service/sharepoint" @@ -574,3 +577,123 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint() assert.NotZero(t, status.Successes) t.Log(status.String()) } + +func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_InvalidMetadata() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + groupID = tconfig.M365GroupID(t) + ctrl = newController(ctx, t, path.GroupsService) + groupIDs = []string{groupID} + ) + + id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil) + require.NoError(t, err, clues.ToCore(err)) + + sel := selectors.NewGroupsBackup(groupIDs) + sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch())) + + sel.SetDiscreteOwnerIDName(id, name) + + site, err := suite.connector.AC.Groups().GetRootSite(ctx, groupID) + require.NoError(t, err, clues.ToCore(err)) + + pth, err := path.Build( + suite.tenantID, + groupID, + path.GroupsService, + path.LibrariesCategory, + true, + odConsts.SitesPathDir, + ptr.Val(site.GetId())) + require.NoError(t, err, clues.ToCore(err)) + + mmc := []data.RestoreCollection{ + mock.Collection{ + Path: pth, + ItemData: []data.Item{ + &mock.Item{ + ItemID: "previouspath", + Reader: io.NopCloser(bytes.NewReader([]byte("invalid"))), + }, + }, + }, + } + + bpc := inject.BackupProducerConfig{ + LastBackupVersion: version.NoBackup, + Options: control.DefaultOptions(), + ProtectedResource: inMock.NewProvider(id, name), + Selector: sel.Selector, + MetadataCollections: mmc, + } + + collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections( + ctx, + bpc, + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + assert.True(t, canUsePreviousBackup, "can use previous backup") + // No excludes yet as this isn't an incremental backup. + assert.True(t, excludes.Empty()) + + // we don't know an exact count of drives this will produce, + // but it should be more than one. + assert.Greater(t, len(collections), 1) + + p, err := path.BuildMetadata( + suite.tenantID, + groupID, + path.GroupsService, + path.LibrariesCategory, + false) + require.NoError(t, err, clues.ToCore(err)) + + p, err = p.Append(false, odConsts.SitesPathDir) + require.NoError(t, err, clues.ToCore(err)) + + foundSitesMetadata := false + foundRootTombstone := false + + sp, err := path.BuildPrefix( + suite.tenantID, + groupID, + path.GroupsService, + path.LibrariesCategory) + require.NoError(t, err, clues.ToCore(err)) + + sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId())) + require.NoError(t, err, clues.ToCore(err)) + + for _, coll := range collections { + if coll.State() == data.DeletedState { + if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() { + foundRootTombstone = true + } + + continue + } + + sitesMetadataCollection := coll.FullPath().String() == p.String() + + for object := range coll.Items(ctx, fault.New(true)) { + if object.ID() == "previouspath" && sitesMetadataCollection { + foundSitesMetadata = true + } + + buf := &bytes.Buffer{} + _, err := buf.ReadFrom(object.ToReader()) + assert.NoError(t, err, "reading item", clues.ToCore(err)) + } + } + + assert.True(t, foundSitesMetadata, "missing sites metadata") + assert.True(t, foundRootTombstone, "missing root tombstone") + + status := ctrl.Wait() + assert.NotZero(t, status.Successes) + t.Log(status.String()) +} diff --git a/src/internal/m365/collection/drive/collections.go b/src/internal/m365/collection/drive/collections.go index 2f54b0429..35c11d1be 100644 --- a/src/internal/m365/collection/drive/collections.go +++ b/src/internal/m365/collection/drive/collections.go @@ -135,11 +135,6 @@ func deserializeMetadata( continue } - if err == nil { - // Successful decode. - continue - } - // This is conservative, but report an error if either any of the items // for any of the deserialized maps have duplicate drive IDs or there's // some other problem deserializing things. This will cause the entire @@ -147,7 +142,9 @@ func deserializeMetadata( // these cases. We can make the logic for deciding when to continue vs. // when to fail less strict in the future if needed. if err != nil { - return nil, nil, false, clues.Stack(err).WithClues(ictx) + errs.Fail(clues.Stack(err).WithClues(ictx)) + + return map[string]string{}, map[string]map[string]string{}, false, nil } } } diff --git a/src/internal/m365/collection/drive/collections_test.go b/src/internal/m365/collection/drive/collections_test.go index d0e33477f..622f5029c 100644 --- a/src/internal/m365/collection/drive/collections_test.go +++ b/src/internal/m365/collection/drive/collections_test.go @@ -978,7 +978,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { { // Bad formats are logged but skip adding entries to the maps and don't // return an error. - name: "BadFormat", + name: "BadFormat", + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, cols: []func() []graph.MetadataCollectionEntry{ func() []graph.MetadataCollectionEntry { return []graph.MetadataCollectionEntry{ @@ -989,7 +991,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { }, }, canUsePreviousBackup: false, - errCheck: assert.Error, + errCheck: assert.NoError, }, { // Unexpected files are logged and skipped. They don't cause an error to @@ -1054,10 +1056,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { } }, }, - expectedDeltas: nil, - expectedPaths: nil, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, canUsePreviousBackup: false, - errCheck: assert.Error, + errCheck: assert.NoError, }, { name: "DriveAlreadyFound_Deltas", @@ -1084,10 +1086,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() { } }, }, - expectedDeltas: nil, - expectedPaths: nil, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, canUsePreviousBackup: false, - errCheck: assert.Error, + errCheck: assert.NoError, }, } diff --git a/src/internal/m365/collection/groups/channel_handler.go b/src/internal/m365/collection/groups/channel_handler.go index 80c36cbef..db50446ca 100644 --- a/src/internal/m365/collection/groups/channel_handler.go +++ b/src/internal/m365/collection/groups/channel_handler.go @@ -67,6 +67,15 @@ func (bh channelsBackupHandler) canonicalPath( false) } +func (bh channelsBackupHandler) PathPrefix(tenantID string) (path.Path, error) { + return path.Build( + tenantID, + bh.protectedResource, + path.GroupsService, + path.ChannelMessagesCategory, + false) +} + func (bh channelsBackupHandler) GetChannelMessage( ctx context.Context, teamID, channelID, itemID string, diff --git a/src/internal/m365/service/groups/backup.go b/src/internal/m365/service/groups/backup.go index 1943b8fb4..25210ade3 100644 --- a/src/internal/m365/service/groups/backup.go +++ b/src/internal/m365/service/groups/backup.go @@ -22,6 +22,7 @@ import ( "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/identity" "github.com/alcionai/corso/src/pkg/backup/metadata" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -35,19 +36,18 @@ func ProduceBackupCollections( creds account.M365Config, su support.StatusUpdater, errs *fault.Bus, -) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) { +) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) { b, err := bpc.Selector.ToGroupsBackup() if err != nil { - return nil, nil, false, clues.Wrap(err, "groupsDataCollection: parsing selector") + return nil, nil, clues.Wrap(err, "groupsDataCollection: parsing selector") } var ( - el = errs.Local() - collections = []data.BackupCollection{} - categories = map[path.CategoryType]struct{}{} - ssmb = prefixmatcher.NewStringSetBuilder() - canUsePreviousBackup bool - sitesPreviousPaths = map[string]string{} + el = errs.Local() + collections = []data.BackupCollection{} + categories = map[path.CategoryType]struct{}{} + ssmb = prefixmatcher.NewStringSetBuilder() + sitesPreviousPaths = map[string]string{} ) ctx = clues.Add( @@ -60,7 +60,7 @@ func ProduceBackupCollections( bpc.ProtectedResource.ID(), api.CallConfig{}) if err != nil { - return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx) + return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx) } isTeam := api.IsTeam(ctx, group) @@ -81,7 +81,7 @@ func ProduceBackupCollections( case path.LibrariesCategory: sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs) if err != nil { - return nil, nil, false, err + return nil, nil, err } siteMetadataCollection := map[string][]data.RestoreCollection{} @@ -108,14 +108,14 @@ func ProduceBackupCollections( ac.Drives(), scope) - cp, err := bh.SitePathPrefix(creds.AzureTenantID) + sp, err := bh.SitePathPrefix(creds.AzureTenantID) if err != nil { - return nil, nil, false, clues.Wrap(err, "getting canonical path") + return nil, nil, clues.Wrap(err, "getting site path") } - sitesPreviousPaths[ptr.Val(s.GetId())] = cp.String() + sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String() - cs, cupb, err := site.CollectLibraries( + cs, canUsePreviousBackup, err := site.CollectLibraries( ctx, sbpc, bh, @@ -128,11 +128,11 @@ func ProduceBackupCollections( continue } - dbcs = append(dbcs, cs...) + if !canUsePreviousBackup { + dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{})) + } - // FIXME(meain): This can cause incorrect backup - // https://github.com/alcionai/corso/issues/4371 - canUsePreviousBackup = canUsePreviousBackup || cupb + dbcs = append(dbcs, cs...) } case path.ChannelMessagesCategory: @@ -140,10 +140,12 @@ func ProduceBackupCollections( continue } - dbcs, canUsePreviousBackup, err = groups.CreateCollections( + bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()) + + cs, canUsePreviousBackup, err := groups.CreateCollections( ctx, bpc, - groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()), + bh, creds.AzureTenantID, scope, su, @@ -152,6 +154,17 @@ func ProduceBackupCollections( el.AddRecoverable(ctx, err) continue } + + if !canUsePreviousBackup { + tp, err := bh.PathPrefix(creds.AzureTenantID) + if err != nil { + return nil, nil, clues.Wrap(err, "getting message path") + } + + dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{})) + } + + dbcs = append(dbcs, cs...) } collections = append(collections, dbcs...) @@ -170,7 +183,7 @@ func ProduceBackupCollections( su, errs) if err != nil { - return nil, nil, false, err + return nil, nil, err } collections = append(collections, baseCols...) @@ -183,12 +196,12 @@ func ProduceBackupCollections( sitesPreviousPaths, su) if err != nil { - return nil, nil, false, err + return nil, nil, err } collections = append(collections, md) - return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure() + return collections, ssmb.ToReader(), el.Failure() } func getSitesMetadataCollection( From 3d78183651289e2051b8690850069c9b41df6bd0 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 2 Oct 2023 10:19:39 -0600 Subject: [PATCH 20/26] Revert "move drive pagers to pager pattern (#4316)" (#4412) This reverts commit c3f94fd7f76f377e4728c715abbb8c7846e9fb25. The specified commit is working fine for CI and development, but contains performance degredation (solved in a follow-up pr) that we want to avoid for the next release. This rever is temporary, and the changes will be re-instated after release. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :robot: Supportability/Tests #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/cmd/purge/scripts/onedrivePurge.ps1 | 2 +- .../common/prefixmatcher/mock/mock.go | 2 +- .../m365/collection/drive/collections.go | 208 ++++----- .../m365/collection/drive/collections_test.go | 416 +++++++++++++----- .../m365/collection/drive/handlers.go | 14 +- .../m365/collection/drive/item_collector.go | 142 ++++++ .../m365/collection/drive/item_handler.go | 14 +- .../m365/collection/drive/item_test.go | 97 +++- .../m365/collection/drive/library_handler.go | 14 +- src/internal/m365/collection/drive/restore.go | 6 +- .../m365/collection/drive/url_cache.go | 61 ++- .../m365/collection/drive/url_cache_test.go | 194 +++++--- .../m365/collection/groups/backup_test.go | 5 + .../m365/service/onedrive/mock/handlers.go | 75 +--- .../m365/service/sharepoint/backup_test.go | 12 +- src/pkg/fault/fault.go | 12 +- src/pkg/selectors/exchange.go | 2 +- src/pkg/selectors/groups.go | 2 +- src/pkg/selectors/onedrive.go | 2 +- src/pkg/selectors/scopes.go | 4 +- src/pkg/selectors/scopes_test.go | 12 +- src/pkg/selectors/sharepoint.go | 2 +- src/pkg/services/m365/api/config.go | 2 +- src/pkg/services/m365/api/delta.go | 11 + src/pkg/services/m365/api/drive.go | 18 - src/pkg/services/m365/api/drive_pager.go | 75 ++-- src/pkg/services/m365/api/drive_pager_test.go | 15 - src/pkg/services/m365/api/drive_test.go | 27 +- src/pkg/services/m365/api/item_pager.go | 14 - src/pkg/services/m365/api/mock/pager.go | 9 +- 30 files changed, 918 insertions(+), 551 deletions(-) create mode 100644 src/internal/m365/collection/drive/item_collector.go create mode 100644 src/pkg/services/m365/api/delta.go diff --git a/src/cmd/purge/scripts/onedrivePurge.ps1 b/src/cmd/purge/scripts/onedrivePurge.ps1 index 4204d5596..e8f258b95 100644 --- a/src/cmd/purge/scripts/onedrivePurge.ps1 +++ b/src/cmd/purge/scripts/onedrivePurge.ps1 @@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) { } } else { - Write-Host "User (for OneDrive) or Site (for Sharepoint) is required" + Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required" Exit } diff --git a/src/internal/common/prefixmatcher/mock/mock.go b/src/internal/common/prefixmatcher/mock/mock.go index 4516f8665..ad4568114 100644 --- a/src/internal/common/prefixmatcher/mock/mock.go +++ b/src/internal/common/prefixmatcher/mock/mock.go @@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap { func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) { if pm.Empty() { - require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys()) + require.True(t, r.Empty(), "both prefix maps are empty") return } diff --git a/src/internal/m365/collection/drive/collections.go b/src/internal/m365/collection/drive/collections.go index 35c11d1be..7d94156ea 100644 --- a/src/internal/m365/collection/drive/collections.go +++ b/src/internal/m365/collection/drive/collections.go @@ -227,16 +227,16 @@ func (c *Collections) Get( ssmb *prefixmatcher.StringSetMatchBuilder, errs *fault.Bus, ) ([]data.BackupCollection, bool, error) { - prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata) + prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata) if err != nil { return nil, false, err } - ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup) + ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup) driveTombstones := map[string]struct{}{} - for driveID := range oldPrevPathsByDriveID { + for driveID := range oldPathsByDriveID { driveTombstones[driveID] = struct{}{} } @@ -254,88 +254,76 @@ func (c *Collections) Get( } var ( - driveIDToDeltaLink = map[string]string{} - driveIDToPrevPaths = map[string]map[string]string{} - numPrevItems = 0 + // Drive ID -> delta URL for drive + deltaURLs = map[string]string{} + // Drive ID -> folder ID -> folder path + folderPaths = map[string]map[string]string{} + numPrevItems = 0 ) for _, d := range drives { var ( - driveID = ptr.Val(d.GetId()) - driveName = ptr.Val(d.GetName()) - ictx = clues.Add( - ctx, - "drive_id", driveID, - "drive_name", clues.Hide(driveName)) - - excludedItemIDs = map[string]struct{}{} - oldPrevPaths = oldPrevPathsByDriveID[driveID] - prevDeltaLink = prevDriveIDToDelta[driveID] - - // itemCollection is used to identify which collection a - // file belongs to. This is useful to delete a file from the - // collection it was previously in, in case it was moved to a - // different collection within the same delta query - // item ID -> item ID - itemCollection = map[string]string{} + driveID = ptr.Val(d.GetId()) + driveName = ptr.Val(d.GetName()) + prevDelta = prevDeltas[driveID] + oldPaths = oldPathsByDriveID[driveID] + numOldDelta = 0 + ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName) ) delete(driveTombstones, driveID) - if _, ok := driveIDToPrevPaths[driveID]; !ok { - driveIDToPrevPaths[driveID] = map[string]string{} - } - if _, ok := c.CollectionMap[driveID]; !ok { c.CollectionMap[driveID] = map[string]*Collection{} } + if len(prevDelta) > 0 { + numOldDelta++ + } + logger.Ctx(ictx).Infow( "previous metadata for drive", - "num_paths_entries", len(oldPrevPaths)) + "num_paths_entries", len(oldPaths), + "num_deltas_entries", numOldDelta) - items, du, err := c.handler.EnumerateDriveItemsDelta( + delta, paths, excluded, err := collectItems( ictx, + c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()), driveID, - prevDeltaLink) + driveName, + c.UpdateCollections, + oldPaths, + prevDelta, + errs) if err != nil { return nil, false, err } + // Used for logging below. + numDeltas := 0 + // It's alright to have an empty folders map (i.e. no folders found) but not // an empty delta token. This is because when deserializing the metadata we // remove entries for which there is no corresponding delta token/folder. If // we leave empty delta tokens then we may end up setting the State field // for collections when not actually getting delta results. - if len(du.URL) > 0 { - driveIDToDeltaLink[driveID] = du.URL - } - - newPrevPaths, err := c.UpdateCollections( - ctx, - driveID, - driveName, - items, - oldPrevPaths, - itemCollection, - excludedItemIDs, - du.Reset, - errs) - if err != nil { - return nil, false, clues.Stack(err) + if len(delta.URL) > 0 { + deltaURLs[driveID] = delta.URL + numDeltas++ } // Avoid the edge case where there's no paths but we do have a valid delta // token. We can accomplish this by adding an empty paths map for this // drive. If we don't have this then the next backup won't use the delta // token because it thinks the folder paths weren't persisted. - driveIDToPrevPaths[driveID] = map[string]string{} - maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths) + folderPaths[driveID] = map[string]string{} + maps.Copy(folderPaths[driveID], paths) logger.Ctx(ictx).Infow( "persisted metadata for drive", - "num_new_paths_entries", len(newPrevPaths), - "delta_reset", du.Reset) + "num_paths_entries", len(paths), + "num_deltas_entries", numDeltas, + "delta_reset", delta.Reset) numDriveItems := c.NumItems - numPrevItems numPrevItems = c.NumItems @@ -347,7 +335,7 @@ func (c *Collections) Get( err = c.addURLCacheToDriveCollections( ictx, driveID, - prevDeltaLink, + prevDelta, errs) if err != nil { return nil, false, err @@ -356,8 +344,8 @@ func (c *Collections) Get( // For both cases we don't need to do set difference on folder map if the // delta token was valid because we should see all the changes. - if !du.Reset { - if len(excludedItemIDs) == 0 { + if !delta.Reset { + if len(excluded) == 0 { continue } @@ -366,7 +354,7 @@ func (c *Collections) Get( return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx) } - ssmb.Add(p.String(), excludedItemIDs) + ssmb.Add(p.String(), excluded) continue } @@ -381,11 +369,13 @@ func (c *Collections) Get( foundFolders[id] = struct{}{} } - for fldID, p := range oldPrevPaths { + for fldID, p := range oldPaths { if _, ok := foundFolders[fldID]; ok { continue } + delete(paths, fldID) + prevPath, err := path.FromDataLayerPath(p, false) if err != nil { err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p) @@ -453,14 +443,14 @@ func (c *Collections) Get( // empty/missing and default to a full backup. logger.CtxErr(ctx, err).Info("making metadata collection path prefixes") - return collections, canUsePrevBackup, nil + return collections, canUsePreviousBackup, nil } md, err := graph.MakeMetadataCollection( pathPrefix, []graph.MetadataCollectionEntry{ - graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths), - graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink), + graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths), + graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs), }, c.statusUpdater) @@ -473,7 +463,7 @@ func (c *Collections) Get( collections = append(collections, md) } - return collections, canUsePrevBackup, nil + return collections, canUsePreviousBackup, nil } // addURLCacheToDriveCollections adds an URL cache to all collections belonging to @@ -487,7 +477,7 @@ func (c *Collections) addURLCacheToDriveCollections( driveID, prevDelta, urlCacheRefreshInterval, - c.handler, + c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()), errs) if err != nil { return err @@ -543,21 +533,22 @@ func updateCollectionPaths( func (c *Collections) handleDelete( itemID, driveID string, - oldPrevPaths, currPrevPaths, newPrevPaths map[string]string, + oldPaths, newPaths map[string]string, isFolder bool, excluded map[string]struct{}, + itemCollection map[string]map[string]string, invalidPrevDelta bool, ) error { if !isFolder { // Try to remove the item from the Collection if an entry exists for this // item. This handles cases where an item was created and deleted during the // same delta query. - if parentID, ok := currPrevPaths[itemID]; ok { + if parentID, ok := itemCollection[driveID][itemID]; ok { if col := c.CollectionMap[driveID][parentID]; col != nil { col.Remove(itemID) } - delete(currPrevPaths, itemID) + delete(itemCollection[driveID], itemID) } // Don't need to add to exclude list if the delta is invalid since the @@ -578,7 +569,7 @@ func (c *Collections) handleDelete( var prevPath path.Path - prevPathStr, ok := oldPrevPaths[itemID] + prevPathStr, ok := oldPaths[itemID] if ok { var err error @@ -595,7 +586,7 @@ func (c *Collections) handleDelete( // Nested folders also return deleted delta results so we don't have to // worry about doing a prefix search in the map to remove the subtree of // the deleted folder/package. - delete(newPrevPaths, itemID) + delete(newPaths, itemID) if prevPath == nil || invalidPrevDelta { // It is possible that an item was created and deleted between two delta @@ -685,29 +676,21 @@ func (c *Collections) getCollectionPath( // UpdateCollections initializes and adds the provided drive items to Collections // A new collection is created for every drive folder (or package). -// oldPrevPaths is the unchanged data that was loaded from the metadata file. -// This map is not modified during the call. -// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in -// the returned results. Items are added to this collection throughout the call. -// newPrevPaths, ie: the items added during this call, get returned as a map. +// oldPaths is the unchanged data that was loaded from the metadata file. +// newPaths starts as a copy of oldPaths and is updated as changes are found in +// the returned results. func (c *Collections) UpdateCollections( ctx context.Context, driveID, driveName string, items []models.DriveItemable, - oldPrevPaths map[string]string, - currPrevPaths map[string]string, + oldPaths map[string]string, + newPaths map[string]string, excluded map[string]struct{}, + itemCollection map[string]map[string]string, invalidPrevDelta bool, errs *fault.Bus, -) (map[string]string, error) { - var ( - el = errs.Local() - newPrevPaths = map[string]string{} - ) - - if !invalidPrevDelta { - maps.Copy(newPrevPaths, oldPrevPaths) - } +) error { + el := errs.Local() for _, item := range items { if el.Failure() != nil { @@ -717,12 +700,8 @@ func (c *Collections) UpdateCollections( var ( itemID = ptr.Val(item.GetId()) itemName = ptr.Val(item.GetName()) + ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName)) isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil - ictx = clues.Add( - ctx, - "item_id", itemID, - "item_name", clues.Hide(itemName), - "item_is_folder", isFolder) ) if item.GetMalware() != nil { @@ -744,13 +723,13 @@ func (c *Collections) UpdateCollections( if err := c.handleDelete( itemID, driveID, - oldPrevPaths, - currPrevPaths, - newPrevPaths, + oldPaths, + newPaths, isFolder, excluded, + itemCollection, invalidPrevDelta); err != nil { - return nil, clues.Stack(err).WithClues(ictx) + return clues.Stack(err).WithClues(ictx) } continue @@ -776,13 +755,13 @@ func (c *Collections) UpdateCollections( // Deletions are handled above so this is just moves/renames. var prevPath path.Path - prevPathStr, ok := oldPrevPaths[itemID] + prevPathStr, ok := oldPaths[itemID] if ok { prevPath, err = path.FromDataLayerPath(prevPathStr, false) if err != nil { el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path"). WithClues(ictx). - With("prev_path_string", path.LoggableDir(prevPathStr))) + With("path_string", prevPathStr)) } } else if item.GetRoot() != nil { // Root doesn't move or get renamed. @@ -792,11 +771,11 @@ func (c *Collections) UpdateCollections( // Moved folders don't cause delta results for any subfolders nested in // them. We need to go through and update paths to handle that. We only // update newPaths so we don't accidentally clobber previous deletes. - updatePath(newPrevPaths, itemID, collectionPath.String()) + updatePath(newPaths, itemID, collectionPath.String()) found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath) if err != nil { - return nil, clues.Stack(err).WithClues(ictx) + return clues.Stack(err).WithClues(ictx) } if found { @@ -819,7 +798,7 @@ func (c *Collections) UpdateCollections( invalidPrevDelta, nil) if err != nil { - return nil, clues.Stack(err).WithClues(ictx) + return clues.Stack(err).WithClues(ictx) } col.driveName = driveName @@ -841,38 +820,35 @@ func (c *Collections) UpdateCollections( case item.GetFile() != nil: // Deletions are handled above so this is just moves/renames. if len(ptr.Val(item.GetParentReference().GetId())) == 0 { - return nil, clues.New("file without parent ID").WithClues(ictx) + return clues.New("file without parent ID").WithClues(ictx) } // Get the collection for this item. parentID := ptr.Val(item.GetParentReference().GetId()) ictx = clues.Add(ictx, "parent_id", parentID) - collection, ok := c.CollectionMap[driveID][parentID] - if !ok { - return nil, clues.New("item seen before parent folder").WithClues(ictx) + collection, found := c.CollectionMap[driveID][parentID] + if !found { + return clues.New("item seen before parent folder").WithClues(ictx) } - // This will only kick in if the file was moved multiple times - // within a single delta query. We delete the file from the previous - // collection so that it doesn't appear in two places. - prevParentContainerID, ok := currPrevPaths[itemID] - if ok { - prevColl, found := c.CollectionMap[driveID][prevParentContainerID] + // Delete the file from previous collection. This will + // only kick in if the file was moved multiple times + // within a single delta query + icID, found := itemCollection[driveID][itemID] + if found { + pcollection, found := c.CollectionMap[driveID][icID] if !found { - return nil, clues.New("previous collection not found"). - With("prev_parent_container_id", prevParentContainerID). - WithClues(ictx) + return clues.New("previous collection not found").WithClues(ictx) } - if ok := prevColl.Remove(itemID); !ok { - return nil, clues.New("removing item from prev collection"). - With("prev_parent_container_id", prevParentContainerID). - WithClues(ictx) + removed := pcollection.Remove(itemID) + if !removed { + return clues.New("removing from prev collection").WithClues(ictx) } } - currPrevPaths[itemID] = parentID + itemCollection[driveID][itemID] = parentID if collection.Add(item) { c.NumItems++ @@ -893,13 +869,11 @@ func (c *Collections) UpdateCollections( } default: - el.AddRecoverable(ictx, clues.New("item is neither folder nor file"). - WithClues(ictx). - Label(fault.LabelForceNoBackupCreation)) + return clues.New("item type not supported").WithClues(ictx) } } - return newPrevPaths, el.Failure() + return el.Failure() } type dirScopeChecker interface { diff --git a/src/internal/m365/collection/drive/collections_test.go b/src/internal/m365/collection/drive/collections_test.go index 622f5029c..1e25d16c0 100644 --- a/src/internal/m365/collection/drive/collections_test.go +++ b/src/internal/m365/collection/drive/collections_test.go @@ -8,6 +8,7 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -136,7 +137,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath) tests := []struct { - name string + testCase string items []models.DriveItemable inputFolderMap map[string]string scope selectors.OneDriveScope @@ -146,11 +147,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedContainerCount int expectedFileCount int expectedSkippedCount int - expectedPrevPaths map[string]string + expectedMetadataPaths map[string]string expectedExcludes map[string]struct{} }{ { - name: "Invalid item", + testCase: "Invalid item", items: []models.DriveItemable{ driveRootItem("root"), driveItem("item", "item", testBaseDrivePath, "root", false, false, false), @@ -162,13 +163,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), }, expectedContainerCount: 1, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - name: "Single File", + testCase: "Single File", items: []models.DriveItemable{ driveRootItem("root"), driveItem("file", "file", testBaseDrivePath, "root", true, false, false), @@ -183,13 +184,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 1, expectedContainerCount: 1, // Root folder is skipped since it's always present. - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: getDelList("file"), }, { - name: "Single Folder", + testCase: "Single Folder", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -201,7 +202,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), "folder": expectedStatePath(data.NewState, folder), }, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), }, @@ -210,7 +211,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - name: "Single Package", + testCase: "Single Package", items: []models.DriveItemable{ driveRootItem("root"), driveItem("package", "package", testBaseDrivePath, "root", false, false, true), @@ -222,7 +223,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), "package": expectedStatePath(data.NewState, pkg), }, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "package": expectedPath("/package"), }, @@ -231,7 +232,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - name: "1 root file, 1 folder, 1 package, 2 files, 3 collections", + testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -251,7 +252,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 5, expectedFileCount: 3, expectedContainerCount: 3, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "package": expectedPath("/package"), @@ -259,7 +260,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"), }, { - name: "contains folder selector", + testCase: "contains folder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -284,7 +285,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedContainerCount: 3, // just "folder" isn't added here because the include check is done on the // parent path since we only check later if something is a folder or not. - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), "folder2": expectedPath(folderSub + folder), @@ -292,7 +293,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("fileInFolder", "fileInFolder2"), }, { - name: "prefix subfolder selector", + testCase: "prefix subfolder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -315,14 +316,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 3, expectedFileCount: 1, expectedContainerCount: 2, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "subfolder": expectedPath(folderSub), "folder2": expectedPath(folderSub + folder), }, expectedExcludes: getDelList("fileInFolder2"), }, { - name: "match subfolder selector", + testCase: "match subfolder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -343,13 +344,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 1, expectedContainerCount: 1, // No child folders for subfolder so nothing here. - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "subfolder": expectedPath(folderSub), }, expectedExcludes: getDelList("fileInSubfolder"), }, { - name: "not moved folder tree", + testCase: "not moved folder tree", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -367,7 +368,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), @@ -375,7 +376,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - name: "moved folder tree", + testCase: "moved folder tree", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -393,7 +394,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), @@ -401,7 +402,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - name: "moved folder tree with file no previous", + testCase: "moved folder tree with file no previous", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -418,14 +419,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder2"), }, expectedExcludes: getDelList("file"), }, { - name: "moved folder tree with file no previous 1", + testCase: "moved folder tree with file no previous 1", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -441,14 +442,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), }, expectedExcludes: getDelList("file"), }, { - name: "moved folder tree and subfolder 1", + testCase: "moved folder tree and subfolder 1", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -468,7 +469,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 0, expectedContainerCount: 3, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath("/subfolder"), @@ -476,7 +477,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - name: "moved folder tree and subfolder 2", + testCase: "moved folder tree and subfolder 2", items: []models.DriveItemable{ driveRootItem("root"), driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false), @@ -496,7 +497,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 0, expectedContainerCount: 3, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath("/subfolder"), @@ -504,7 +505,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - name: "move subfolder when moving parent", + testCase: "move subfolder when moving parent", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false), @@ -538,7 +539,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 5, expectedFileCount: 2, expectedContainerCount: 4, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "folder2": expectedPath("/folder2"), @@ -547,7 +548,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"), }, { - name: "moved folder tree multiple times", + testCase: "moved folder tree multiple times", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -567,7 +568,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder2"), "subfolder": expectedPath("/folder2/subfolder"), @@ -575,7 +576,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("file"), }, { - name: "deleted folder and package", + testCase: "deleted folder and package", items: []models.DriveItemable{ driveRootItem("root"), // root is always present, but not necessary here delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -596,13 +597,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - name: "delete folder without previous", + testCase: "delete folder without previous", items: []models.DriveItemable{ driveRootItem("root"), delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -618,13 +619,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - name: "delete folder tree move subfolder", + testCase: "delete folder tree move subfolder", items: []models.DriveItemable{ driveRootItem("root"), delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -645,14 +646,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "subfolder": expectedPath("/subfolder"), }, expectedExcludes: map[string]struct{}{}, }, { - name: "delete file", + testCase: "delete file", items: []models.DriveItemable{ driveRootItem("root"), delItem("item", testBaseDrivePath, "root", true, false, false), @@ -668,13 +669,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 1, expectedContainerCount: 1, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: getDelList("item"), }, { - name: "item before parent errors", + testCase: "item before parent errors", items: []models.DriveItemable{ driveRootItem("root"), driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false), @@ -689,11 +690,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedPrevPaths: nil, - expectedExcludes: map[string]struct{}{}, + expectedMetadataPaths: map[string]string{ + "root": expectedPath(""), + }, + expectedExcludes: map[string]struct{}{}, }, { - name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware", + testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -714,7 +717,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 2, expectedContainerCount: 3, expectedSkippedCount: 1, - expectedPrevPaths: map[string]string{ + expectedMetadataPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "package": expectedPath("/package"), @@ -723,23 +726,26 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { }, } - for _, test := range tests { - suite.Run(test.name, func() { + for _, tt := range tests { + suite.Run(tt.testCase, func() { t := suite.T() ctx, flush := tester.NewContext(t) defer flush() var ( - excludes = map[string]struct{}{} - currPrevPaths = map[string]string{} - errs = fault.New(true) + excludes = map[string]struct{}{} + outputFolderMap = map[string]string{} + itemCollection = map[string]map[string]string{ + driveID: {}, + } + errs = fault.New(true) ) - maps.Copy(currPrevPaths, test.inputFolderMap) + maps.Copy(outputFolderMap, tt.inputFolderMap) c := NewCollections( - &itemBackupHandler{api.Drives{}, user, test.scope}, + &itemBackupHandler{api.Drives{}, user, tt.scope}, tenant, user, nil, @@ -747,24 +753,25 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { c.CollectionMap[driveID] = map[string]*Collection{} - newPrevPaths, err := c.UpdateCollections( + err := c.UpdateCollections( ctx, driveID, "General", - test.items, - test.inputFolderMap, - currPrevPaths, + tt.items, + tt.inputFolderMap, + outputFolderMap, excludes, + itemCollection, false, errs) - test.expect(t, err, clues.ToCore(err)) - assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections") - assert.Equal(t, test.expectedItemCount, c.NumItems, "item count") - assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count") - assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count") - assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items") + tt.expect(t, err, clues.ToCore(err)) + assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections") + assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count") + assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count") + assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count") + assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items") - for id, sp := range test.expectedCollectionIDs { + for id, sp := range tt.expectedCollectionIDs { if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) { // Skip collections we don't find so we don't get an NPE. continue @@ -775,8 +782,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id) } - assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths") - assert.Equal(t, test.expectedExcludes, excludes, "exclude list") + assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths") + assert.Equal(t, tt.expectedExcludes, excludes, "exclude list") }) } } @@ -1298,8 +1305,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1337,8 +1343,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1415,8 +1420,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &empty, // probably will never happen with graph - ResetDelta: true, + DeltaLink: &empty, // probably will never happen with graph }, }, }, @@ -1453,8 +1457,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - NextLink: &next, - ResetDelta: true, + NextLink: &next, }, { Values: []models.DriveItemable{ @@ -1462,8 +1465,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1505,8 +1507,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, driveID2: { @@ -1516,8 +1517,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false), driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false), }, - DeltaLink: &delta2, - ResetDelta: true, + DeltaLink: &delta2, }, }, }, @@ -1569,8 +1569,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, driveID2: { @@ -1580,8 +1579,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath2, "root", false, true, false), driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false), }, - DeltaLink: &delta2, - ResetDelta: true, + DeltaLink: &delta2, }, }, }, @@ -1639,6 +1637,87 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedFolderPaths: nil, expectedDelList: nil, }, + { + name: "OneDrive_OneItemPage_DeltaError", + drives: []models.Driveable{drive1}, + items: map[string][]apiMock.PagerResult[models.DriveItemable]{ + driveID1: { + { + Err: getDeltaError(), + }, + { + Values: []models.DriveItemable{ + driveRootItem("root"), + driveItem("file", "file", driveBasePath1, "root", true, false, false), + }, + DeltaLink: &delta, + }, + }, + }, + canUsePreviousBackup: true, + errCheck: assert.NoError, + expectedCollections: map[string]map[data.CollectionState][]string{ + rootFolderPath1: {data.NotMovedState: {"file"}}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + driveID1: { + "root": rootFolderPath1, + }, + }, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + }, + }, + { + name: "OneDrive_TwoItemPage_DeltaError", + drives: []models.Driveable{drive1}, + items: map[string][]apiMock.PagerResult[models.DriveItemable]{ + driveID1: { + { + Err: getDeltaError(), + }, + { + Values: []models.DriveItemable{ + driveRootItem("root"), + driveItem("file", "file", driveBasePath1, "root", true, false, false), + }, + NextLink: &next, + }, + { + Values: []models.DriveItemable{ + driveRootItem("root"), + driveItem("folder", "folder", driveBasePath1, "root", false, true, false), + driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false), + }, + DeltaLink: &delta, + }, + }, + }, + canUsePreviousBackup: true, + errCheck: assert.NoError, + expectedCollections: map[string]map[data.CollectionState][]string{ + rootFolderPath1: {data.NotMovedState: {"file"}}, + expectedPath1("/folder"): {data.NewState: {"folder", "file2"}}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + driveID1: { + "root": rootFolderPath1, + "folder": folderPath1, + }, + }, + expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), + doNotMergeItems: map[string]bool{ + rootFolderPath1: true, + folderPath1: true, + }, + }, { name: "OneDrive_TwoItemPage_NoDeltaError", drives: []models.Driveable{drive1}, @@ -1691,14 +1770,16 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { + { + Err: getDeltaError(), + }, { Values: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1736,14 +1817,16 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { + { + Err: getDeltaError(), + }, { Values: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1800,8 +1883,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false), malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1831,10 +1913,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedSkippedCount: 2, }, { - name: "One Drive Deleted Folder In New Results", + name: "One Drive Delta Error Deleted Folder In New Results", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { + { + Err: getDeltaError(), + }, { Values: []models.DriveItemable{ driveRootItem("root"), @@ -1851,8 +1936,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { delItem("folder2", driveBasePath1, "root", false, true, false), delItem("file2", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta2, - ResetDelta: true, + DeltaLink: &delta2, }, }, }, @@ -1887,17 +1971,19 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, { - name: "One Drive Random Folder Delete", + name: "One Drive Delta Error Random Folder Delete", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { + { + Err: getDeltaError(), + }, { Values: []models.DriveItemable{ driveRootItem("root"), delItem("folder", driveBasePath1, "root", false, true, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1928,17 +2014,19 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, { - name: "One Drive Random Item Delete", + name: "One Drive Delta Error Random Item Delete", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { + { + Err: getDeltaError(), + }, { Values: []models.DriveItemable{ driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -1984,8 +2072,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { delItem("folder", driveBasePath1, "root", false, true, false), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta2, - ResetDelta: true, + DeltaLink: &delta2, }, }, }, @@ -2028,8 +2115,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -2067,8 +2153,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("folder", driveBasePath1, "root", false, true, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -2103,8 +2188,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, - ResetDelta: true, + DeltaLink: &delta, }, }, }, @@ -2186,7 +2270,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { mbh := mock.DefaultOneDriveBH("a-user") mbh.DrivePagerV = mockDrivePager mbh.ItemPagerV = itemPagers - mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items) c := NewCollections( mbh, @@ -2417,6 +2500,121 @@ func delItem( return item } +func getDeltaError() error { + syncStateNotFound := "SyncStateNotFound" + me := odataerrors.NewMainError() + me.SetCode(&syncStateNotFound) + + deltaError := odataerrors.NewODataError() + deltaError.SetErrorEscaped(me) + + return deltaError +} + +func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() { + next := "next" + delta := "delta" + prevDelta := "prev-delta" + + table := []struct { + name string + items []apiMock.PagerResult[models.DriveItemable] + deltaURL string + prevDeltaSuccess bool + prevDelta string + err error + }{ + { + name: "delta on first run", + deltaURL: delta, + items: []apiMock.PagerResult[models.DriveItemable]{ + {DeltaLink: &delta}, + }, + prevDeltaSuccess: true, + prevDelta: prevDelta, + }, + { + name: "empty prev delta", + deltaURL: delta, + items: []apiMock.PagerResult[models.DriveItemable]{ + {DeltaLink: &delta}, + }, + prevDeltaSuccess: false, + prevDelta: "", + }, + { + name: "next then delta", + deltaURL: delta, + items: []apiMock.PagerResult[models.DriveItemable]{ + {NextLink: &next}, + {DeltaLink: &delta}, + }, + prevDeltaSuccess: true, + prevDelta: prevDelta, + }, + { + name: "invalid prev delta", + deltaURL: delta, + items: []apiMock.PagerResult[models.DriveItemable]{ + {Err: getDeltaError()}, + {DeltaLink: &delta}, // works on retry + }, + prevDelta: prevDelta, + prevDeltaSuccess: false, + }, + { + name: "fail a normal delta query", + items: []apiMock.PagerResult[models.DriveItemable]{ + {NextLink: &next}, + {Err: assert.AnError}, + }, + prevDelta: prevDelta, + prevDeltaSuccess: true, + err: assert.AnError, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + itemPager := &apiMock.DeltaPager[models.DriveItemable]{ + ToReturn: test.items, + } + + collectorFunc := func( + ctx context.Context, + driveID, driveName string, + driveItems []models.DriveItemable, + oldPaths map[string]string, + newPaths map[string]string, + excluded map[string]struct{}, + itemCollection map[string]map[string]string, + doNotMergeItems bool, + errs *fault.Bus, + ) error { + return nil + } + + delta, _, _, err := collectItems( + ctx, + itemPager, + "", + "General", + collectorFunc, + map[string]string{}, + test.prevDelta, + fault.New(true)) + + require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err)) + require.Equal(t, test.deltaURL, delta.URL, "delta url") + require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset") + }) + } +} + func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() { driveID := "test-drive" collCount := 3 diff --git a/src/internal/m365/collection/drive/handlers.go b/src/internal/m365/collection/drive/handlers.go index d341cb1ba..7b0064546 100644 --- a/src/internal/m365/collection/drive/handlers.go +++ b/src/internal/m365/collection/drive/handlers.go @@ -36,7 +36,6 @@ type BackupHandler interface { GetItemPermissioner GetItemer NewDrivePagerer - EnumerateDriveItemsDeltaer // PathPrefix constructs the service and category specific path prefix for // the given values. @@ -51,7 +50,7 @@ type BackupHandler interface { // ServiceCat returns the service and category used by this implementation. ServiceCat() (path.ServiceType, path.CategoryType) - + NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable] // FormatDisplayPath creates a human-readable string to represent the // provided path. FormatDisplayPath(driveName string, parentPath *path.Builder) string @@ -80,17 +79,6 @@ type GetItemer interface { ) (models.DriveItemable, error) } -type EnumerateDriveItemsDeltaer interface { - EnumerateDriveItemsDelta( - ctx context.Context, - driveID, prevDeltaLink string, - ) ( - []models.DriveItemable, - api.DeltaUpdate, - error, - ) -} - // --------------------------------------------------------------------------- // restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/item_collector.go b/src/internal/m365/collection/drive/item_collector.go new file mode 100644 index 000000000..b2ff41831 --- /dev/null +++ b/src/internal/m365/collection/drive/item_collector.go @@ -0,0 +1,142 @@ +package drive + +import ( + "context" + + "github.com/microsoftgraph/msgraph-sdk-go/models" + "golang.org/x/exp/maps" + + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +// DeltaUpdate holds the results of a current delta token. It normally +// gets produced when aggregating the addition and removal of items in +// a delta-queryable folder. +// FIXME: This is same as exchange.api.DeltaUpdate +type DeltaUpdate struct { + // the deltaLink itself + URL string + // true if the old delta was marked as invalid + Reset bool +} + +// itemCollector functions collect the items found in a drive +type itemCollector func( + ctx context.Context, + driveID, driveName string, + driveItems []models.DriveItemable, + oldPaths map[string]string, + newPaths map[string]string, + excluded map[string]struct{}, + itemCollections map[string]map[string]string, + validPrevDelta bool, + errs *fault.Bus, +) error + +// collectItems will enumerate all items in the specified drive and hand them to the +// provided `collector` method +func collectItems( + ctx context.Context, + pager api.DeltaPager[models.DriveItemable], + driveID, driveName string, + collector itemCollector, + oldPaths map[string]string, + prevDelta string, + errs *fault.Bus, +) ( + DeltaUpdate, + map[string]string, // newPaths + map[string]struct{}, // excluded + error, +) { + var ( + newDeltaURL = "" + newPaths = map[string]string{} + excluded = map[string]struct{}{} + invalidPrevDelta = len(prevDelta) == 0 + + // itemCollection is used to identify which collection a + // file belongs to. This is useful to delete a file from the + // collection it was previously in, in case it was moved to a + // different collection within the same delta query + // drive ID -> item ID -> item ID + itemCollection = map[string]map[string]string{ + driveID: {}, + } + ) + + if !invalidPrevDelta { + maps.Copy(newPaths, oldPaths) + pager.SetNextLink(prevDelta) + } + + for { + // assume delta urls here, which allows single-token consumption + page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) + + if graph.IsErrInvalidDelta(err) { + logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) + + invalidPrevDelta = true + newPaths = map[string]string{} + + pager.Reset(ctx) + + continue + } + + if err != nil { + return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page") + } + + vals := page.GetValue() + + err = collector( + ctx, + driveID, + driveName, + vals, + oldPaths, + newPaths, + excluded, + itemCollection, + invalidPrevDelta, + errs) + if err != nil { + return DeltaUpdate{}, nil, nil, err + } + + nextLink, deltaLink := api.NextAndDeltaLink(page) + + if len(deltaLink) > 0 { + newDeltaURL = deltaLink + } + + // Check if there are more items + if len(nextLink) == 0 { + break + } + + logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink) + pager.SetNextLink(nextLink) + } + + return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil +} + +// newItem initializes a `models.DriveItemable` that can be used as input to `createItem` +func newItem(name string, folder bool) *models.DriveItem { + itemToCreate := models.NewDriveItem() + itemToCreate.SetName(&name) + + if folder { + itemToCreate.SetFolder(models.NewFolder()) + } else { + itemToCreate.SetFile(models.NewFile()) + } + + return itemToCreate +} diff --git a/src/internal/m365/collection/drive/item_handler.go b/src/internal/m365/collection/drive/item_handler.go index 5f48d313e..4a62f35e3 100644 --- a/src/internal/m365/collection/drive/item_handler.go +++ b/src/internal/m365/collection/drive/item_handler.go @@ -87,6 +87,13 @@ func (h itemBackupHandler) NewDrivePager( return h.ac.NewUserDrivePager(resourceOwner, fields) } +func (h itemBackupHandler) NewItemPager( + driveID, link string, + fields []string, +) api.DeltaPager[models.DriveItemable] { + return h.ac.NewDriveItemDeltaPager(driveID, link, fields) +} + func (h itemBackupHandler) AugmentItemInfo( dii details.ItemInfo, item models.DriveItemable, @@ -132,13 +139,6 @@ func (h itemBackupHandler) IncludesDir(dir string) bool { return h.scope.Matches(selectors.OneDriveFolder, dir) } -func (h itemBackupHandler) EnumerateDriveItemsDelta( - ctx context.Context, - driveID, prevDeltaLink string, -) ([]models.DriveItemable, api.DeltaUpdate, error) { - return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) -} - // --------------------------------------------------------------------------- // Restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/item_test.go b/src/internal/m365/collection/drive/item_test.go index aaf6362db..05dcf9e5a 100644 --- a/src/internal/m365/collection/drive/item_test.go +++ b/src/internal/m365/collection/drive/item_test.go @@ -20,6 +20,8 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -58,6 +60,83 @@ func (suite *ItemIntegrationSuite) SetupSuite() { suite.userDriveID = ptr.Val(odDrives[0].GetId()) } +// TestItemReader is an integration test that makes a few assumptions +// about the test environment +// 1) It assumes the test user has a drive +// 2) It assumes the drive has a file it can use to test `driveItemReader` +// The test checks these in below +func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var driveItem models.DriveItemable + // This item collector tries to find "a" drive item that is a non-empty + // file to test the reader function + itemCollector := func( + _ context.Context, + _, _ string, + items []models.DriveItemable, + _ map[string]string, + _ map[string]string, + _ map[string]struct{}, + _ map[string]map[string]string, + _ bool, + _ *fault.Bus, + ) error { + if driveItem != nil { + return nil + } + + for _, item := range items { + if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 { + driveItem = item + break + } + } + + return nil + } + + ip := suite.service.ac. + Drives(). + NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault()) + + _, _, _, err := collectItems( + ctx, + ip, + suite.userDriveID, + "General", + itemCollector, + map[string]string{}, + "", + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + + // Test Requirement 2: Need a file + require.NotEmpty( + t, + driveItem, + "no file item found for user %s drive %s", + suite.user, + suite.userDriveID) + + bh := itemBackupHandler{ + suite.service.ac.Drives(), + suite.user, + (&selectors.OneDriveBackup{}).Folders(selectors.Any())[0], + } + + // Read data for the file + itemData, err := downloadItem(ctx, bh, driveItem) + require.NoError(t, err, clues.ToCore(err)) + + size, err := io.Copy(io.Discard, itemData) + require.NoError(t, err, clues.ToCore(err)) + require.NotZero(t, size) +} + // TestItemWriter is an integration test for uploading data to OneDrive // It creates a new folder with a new item and writes data to it func (suite *ItemIntegrationSuite) TestItemWriter() { @@ -92,7 +171,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { ctx, test.driveID, ptr.Val(root.GetId()), - api.NewDriveItem(newFolderName, true), + newItem(newFolderName, true), control.Copy) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, newFolder.GetId()) @@ -104,7 +183,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { ctx, test.driveID, ptr.Val(newFolder.GetId()), - api.NewDriveItem(newItemName, false), + newItem(newItemName, false), control.Copy) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, newItem.GetId()) @@ -238,7 +317,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "success", itemFunc: func() models.DriveItemable { - di := api.NewDriveItem("test", false) + di := newItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -257,7 +336,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "success, content url set instead of download url", itemFunc: func() models.DriveItemable { - di := api.NewDriveItem("test", false) + di := newItem("test", false) di.SetAdditionalData(map[string]any{ "@content.downloadUrl": url, }) @@ -276,7 +355,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "api getter returns error", itemFunc: func() models.DriveItemable { - di := api.NewDriveItem("test", false) + di := newItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -292,7 +371,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "download url is empty", itemFunc: func() models.DriveItemable { - di := api.NewDriveItem("test", false) + di := newItem("test", false) return di }, GetFunc: func(ctx context.Context, url string) (*http.Response, error) { @@ -307,7 +386,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "malware", itemFunc: func() models.DriveItemable { - di := api.NewDriveItem("test", false) + di := newItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -329,7 +408,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "non-2xx http response", itemFunc: func() models.DriveItemable { - di := api.NewDriveItem("test", false) + di := newItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -378,7 +457,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead url = "https://example.com" itemFunc = func() models.DriveItemable { - di := api.NewDriveItem("test", false) + di := newItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) diff --git a/src/internal/m365/collection/drive/library_handler.go b/src/internal/m365/collection/drive/library_handler.go index e5ee109ec..74ec182d9 100644 --- a/src/internal/m365/collection/drive/library_handler.go +++ b/src/internal/m365/collection/drive/library_handler.go @@ -92,6 +92,13 @@ func (h libraryBackupHandler) NewDrivePager( return h.ac.NewSiteDrivePager(resourceOwner, fields) } +func (h libraryBackupHandler) NewItemPager( + driveID, link string, + fields []string, +) api.DeltaPager[models.DriveItemable] { + return h.ac.NewDriveItemDeltaPager(driveID, link, fields) +} + func (h libraryBackupHandler) AugmentItemInfo( dii details.ItemInfo, item models.DriveItemable, @@ -170,13 +177,6 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool { return h.scope.Matches(selectors.SharePointLibraryFolder, dir) } -func (h libraryBackupHandler) EnumerateDriveItemsDelta( - ctx context.Context, - driveID, prevDeltaLink string, -) ([]models.DriveItemable, api.DeltaUpdate, error) { - return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) -} - // --------------------------------------------------------------------------- // Restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/restore.go b/src/internal/m365/collection/drive/restore.go index 4718552d1..7a9017744 100644 --- a/src/internal/m365/collection/drive/restore.go +++ b/src/internal/m365/collection/drive/restore.go @@ -671,7 +671,7 @@ func createFolder( ctx, driveID, parentFolderID, - api.NewDriveItem(folderName, true), + newItem(folderName, true), control.Replace) // ErrItemAlreadyExistsConflict can only occur for folders if the @@ -692,7 +692,7 @@ func createFolder( ctx, driveID, parentFolderID, - api.NewDriveItem(folderName, true), + newItem(folderName, true), control.Copy) if err != nil { return nil, clues.Wrap(err, "creating folder") @@ -733,7 +733,7 @@ func restoreFile( } var ( - item = api.NewDriveItem(name, false) + item = newItem(name, false) collisionKey = api.DriveItemCollisionKey(item) collision api.DriveItemIDType shouldDeleteOriginal bool diff --git a/src/internal/m365/collection/drive/url_cache.go b/src/internal/m365/collection/drive/url_cache.go index ef78d48f5..1a8cc7899 100644 --- a/src/internal/m365/collection/drive/url_cache.go +++ b/src/internal/m365/collection/drive/url_cache.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/common/str" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( @@ -46,7 +47,7 @@ type urlCache struct { refreshMu sync.Mutex deltaQueryCount int - edid EnumerateDriveItemsDeltaer + itemPager api.DeltaPager[models.DriveItemable] errs *fault.Bus } @@ -55,10 +56,13 @@ type urlCache struct { func newURLCache( driveID, prevDelta string, refreshInterval time.Duration, - edid EnumerateDriveItemsDeltaer, + itemPager api.DeltaPager[models.DriveItemable], errs *fault.Bus, ) (*urlCache, error) { - err := validateCacheParams(driveID, refreshInterval, edid) + err := validateCacheParams( + driveID, + refreshInterval, + itemPager) if err != nil { return nil, clues.Wrap(err, "cache params") } @@ -67,9 +71,9 @@ func newURLCache( idToProps: make(map[string]itemProps), lastRefreshTime: time.Time{}, driveID: driveID, - edid: edid, prevDelta: prevDelta, refreshInterval: refreshInterval, + itemPager: itemPager, errs: errs, }, nil @@ -79,7 +83,7 @@ func newURLCache( func validateCacheParams( driveID string, refreshInterval time.Duration, - edid EnumerateDriveItemsDeltaer, + itemPager api.DeltaPager[models.DriveItemable], ) error { if len(driveID) == 0 { return clues.New("drive id is empty") @@ -89,8 +93,8 @@ func validateCacheParams( return clues.New("invalid refresh interval") } - if edid == nil { - return clues.New("nil item enumerator") + if itemPager == nil { + return clues.New("nil item pager") } return nil @@ -156,23 +160,44 @@ func (uc *urlCache) refreshCache( // Issue a delta query to graph logger.Ctx(ctx).Info("refreshing url cache") - items, du, err := uc.edid.EnumerateDriveItemsDelta(ctx, uc.driveID, uc.prevDelta) + err := uc.deltaQuery(ctx) if err != nil { + // clear cache uc.idToProps = make(map[string]itemProps) - return clues.Stack(err) - } - uc.deltaQueryCount++ - - if err := uc.updateCache(ctx, items, uc.errs); err != nil { - return clues.Stack(err) + return err } logger.Ctx(ctx).Info("url cache refreshed") // Update last refresh time uc.lastRefreshTime = time.Now() - uc.prevDelta = du.URL + + return nil +} + +// deltaQuery performs a delta query on the drive and update the cache +func (uc *urlCache) deltaQuery( + ctx context.Context, +) error { + logger.Ctx(ctx).Debug("starting delta query") + // Reset item pager to remove any previous state + uc.itemPager.Reset(ctx) + + _, _, _, err := collectItems( + ctx, + uc.itemPager, + uc.driveID, + "", + uc.updateCache, + map[string]string{}, + uc.prevDelta, + uc.errs) + if err != nil { + return clues.Wrap(err, "delta query") + } + + uc.deltaQueryCount++ return nil } @@ -199,7 +224,13 @@ func (uc *urlCache) readCache( // It assumes that cacheMu is held by caller in write mode func (uc *urlCache) updateCache( ctx context.Context, + _, _ string, items []models.DriveItemable, + _ map[string]string, + _ map[string]string, + _ map[string]struct{}, + _ map[string]map[string]string, + _ bool, errs *fault.Bus, ) error { el := errs.Local() diff --git a/src/internal/m365/collection/drive/url_cache_test.go b/src/internal/m365/collection/drive/url_cache_test.go index c8e23864f..5b35ddff2 100644 --- a/src/internal/m365/collection/drive/url_cache_test.go +++ b/src/internal/m365/collection/drive/url_cache_test.go @@ -1,6 +1,7 @@ package drive import ( + "context" "errors" "io" "math/rand" @@ -17,19 +18,15 @@ import ( "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/m365/graph" - "github.com/alcionai/corso/src/internal/m365/service/onedrive/mock" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/services/m365/api" + apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) -// --------------------------------------------------------------------------- -// integration -// --------------------------------------------------------------------------- - type URLCacheIntegrationSuite struct { tester.Suite ac api.Client @@ -71,10 +68,11 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() { // url cache func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { var ( - t = suite.T() - ac = suite.ac.Drives() - driveID = suite.driveID - newFolderName = testdata.DefaultRestoreConfig("folder").Location + t = suite.T() + ac = suite.ac.Drives() + driveID = suite.driveID + newFolderName = testdata.DefaultRestoreConfig("folder").Location + driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault()) ) ctx, flush := tester.NewContext(t) @@ -84,11 +82,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { root, err := ac.GetRootFolder(ctx, driveID) require.NoError(t, err, clues.ToCore(err)) - newFolder, err := ac.PostItemInContainer( + newFolder, err := ac.Drives().PostItemInContainer( ctx, driveID, ptr.Val(root.GetId()), - api.NewDriveItem(newFolderName, true), + newItem(newFolderName, true), control.Copy) require.NoError(t, err, clues.ToCore(err)) @@ -96,10 +94,33 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { nfid := ptr.Val(newFolder.GetId()) + collectorFunc := func( + context.Context, + string, + string, + []models.DriveItemable, + map[string]string, + map[string]string, + map[string]struct{}, + map[string]map[string]string, + bool, + *fault.Bus, + ) error { + return nil + } + // Get the previous delta to feed into url cache - _, du, err := ac.EnumerateDriveItemsDelta(ctx, suite.driveID, "") + prevDelta, _, _, err := collectItems( + ctx, + suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()), + suite.driveID, + "drive-name", + collectorFunc, + map[string]string{}, + "", + fault.New(true)) require.NoError(t, err, clues.ToCore(err)) - require.NotEmpty(t, du.URL) + require.NotNil(t, prevDelta.URL) // Create a bunch of files in the new folder var items []models.DriveItemable @@ -107,11 +128,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { for i := 0; i < 5; i++ { newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting) - item, err := ac.PostItemInContainer( + item, err := ac.Drives().PostItemInContainer( ctx, driveID, nfid, - api.NewDriveItem(newItemName, false), + newItem(newItemName, false), control.Copy) require.NoError(t, err, clues.ToCore(err)) @@ -121,9 +142,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { // Create a new URL cache with a long TTL uc, err := newURLCache( suite.driveID, - du.URL, + prevDelta.URL, 1*time.Hour, - suite.ac.Drives(), + driveItemPager, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -174,10 +195,6 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { require.Equal(t, 1, uc.deltaQueryCount) } -// --------------------------------------------------------------------------- -// unit -// --------------------------------------------------------------------------- - type URLCacheUnitSuite struct { tester.Suite } @@ -188,20 +205,27 @@ func TestURLCacheUnitSuite(t *testing.T) { func (suite *URLCacheUnitSuite) TestGetItemProperties() { deltaString := "delta" + next := "next" driveID := "drive1" table := []struct { name string - pagerItems map[string][]models.DriveItemable - pagerErr map[string]error + pagerResult map[string][]apiMock.PagerResult[models.DriveItemable] expectedItemProps map[string]itemProps expectedErr require.ErrorAssertionFunc cacheAssert func(*urlCache, time.Time) }{ { name: "single item in cache", - pagerItems: map[string][]models.DriveItemable{ - driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)}, + pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + driveID: { + { + Values: []models.DriveItemable{ + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + }, + DeltaLink: &deltaString, + }, + }, }, expectedItemProps: map[string]itemProps{ "1": { @@ -218,13 +242,18 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "multiple items in cache", - pagerItems: map[string][]models.DriveItemable{ + pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID: { - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("3", "file3", "root", "root", "https://dummy3.com", false), - fileItem("4", "file4", "root", "root", "https://dummy4.com", false), - fileItem("5", "file5", "root", "root", "https://dummy5.com", false), + { + Values: []models.DriveItemable{ + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("3", "file3", "root", "root", "https://dummy3.com", false), + fileItem("4", "file4", "root", "root", "https://dummy4.com", false), + fileItem("5", "file5", "root", "root", "https://dummy5.com", false), + }, + DeltaLink: &deltaString, + }, }, }, expectedItemProps: map[string]itemProps{ @@ -258,13 +287,18 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "duplicate items with potentially new urls", - pagerItems: map[string][]models.DriveItemable{ + pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID: { - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("3", "file3", "root", "root", "https://dummy3.com", false), - fileItem("1", "file1", "root", "root", "https://test1.com", false), - fileItem("2", "file2", "root", "root", "https://test2.com", false), + { + Values: []models.DriveItemable{ + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("3", "file3", "root", "root", "https://dummy3.com", false), + fileItem("1", "file1", "root", "root", "https://test1.com", false), + fileItem("2", "file2", "root", "root", "https://test2.com", false), + }, + DeltaLink: &deltaString, + }, }, }, expectedItemProps: map[string]itemProps{ @@ -290,11 +324,16 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "deleted items", - pagerItems: map[string][]models.DriveItemable{ + pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID: { - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("1", "file1", "root", "root", "https://dummy1.com", true), + { + Values: []models.DriveItemable{ + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("1", "file1", "root", "root", "https://dummy1.com", true), + }, + DeltaLink: &deltaString, + }, }, }, expectedItemProps: map[string]itemProps{ @@ -316,8 +355,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "item not found in cache", - pagerItems: map[string][]models.DriveItemable{ - driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)}, + pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + driveID: { + { + Values: []models.DriveItemable{ + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + }, + DeltaLink: &deltaString, + }, + }, }, expectedItemProps: map[string]itemProps{ "2": {}, @@ -330,10 +376,23 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, }, { - name: "delta query error", - pagerItems: map[string][]models.DriveItemable{}, - pagerErr: map[string]error{ - driveID: errors.New("delta query error"), + name: "multi-page delta query error", + pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + driveID: { + { + Values: []models.DriveItemable{ + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + }, + NextLink: &next, + }, + { + Values: []models.DriveItemable{ + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + }, + DeltaLink: &deltaString, + Err: errors.New("delta query error"), + }, + }, }, expectedItemProps: map[string]itemProps{ "1": {}, @@ -349,10 +408,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { { name: "folder item", - pagerItems: map[string][]models.DriveItemable{ + pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID: { - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - driveItem("2", "folder2", "root", "root", false, true, false), + { + Values: []models.DriveItemable{ + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + driveItem("2", "folder2", "root", "root", false, true, false), + }, + DeltaLink: &deltaString, + }, }, }, expectedItemProps: map[string]itemProps{ @@ -373,17 +437,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { ctx, flush := tester.NewContext(t) defer flush() - medi := mock.EnumeratesDriveItemsDelta{ - Items: test.pagerItems, - Err: test.pagerErr, - DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}}, + itemPager := &apiMock.DeltaPager[models.DriveItemable]{ + ToReturn: test.pagerResult[driveID], } cache, err := newURLCache( driveID, "", 1*time.Hour, - &medi, + itemPager, fault.New(true)) require.NoError(suite.T(), err, clues.ToCore(err)) @@ -418,17 +480,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { // Test needsRefresh func (suite *URLCacheUnitSuite) TestNeedsRefresh() { - var ( - t = suite.T() - driveID = "drive1" - refreshInterval = 1 * time.Second - ) + driveID := "drive1" + t := suite.T() + refreshInterval := 1 * time.Second cache, err := newURLCache( driveID, "", refreshInterval, - &mock.EnumeratesDriveItemsDelta{}, + &apiMock.DeltaPager[models.DriveItemable]{}, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -450,12 +510,14 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() { require.False(t, cache.needsRefresh()) } +// Test newURLCache func (suite *URLCacheUnitSuite) TestNewURLCache() { + // table driven tests table := []struct { name string driveID string refreshInt time.Duration - itemPager EnumerateDriveItemsDeltaer + itemPager api.DeltaPager[models.DriveItemable] errors *fault.Bus expectedErr require.ErrorAssertionFunc }{ @@ -463,7 +525,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "invalid driveID", driveID: "", refreshInt: 1 * time.Hour, - itemPager: &mock.EnumeratesDriveItemsDelta{}, + itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, errors: fault.New(true), expectedErr: require.Error, }, @@ -471,12 +533,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "invalid refresh interval", driveID: "drive1", refreshInt: 100 * time.Millisecond, - itemPager: &mock.EnumeratesDriveItemsDelta{}, + itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, errors: fault.New(true), expectedErr: require.Error, }, { - name: "invalid item enumerator", + name: "invalid itemPager", driveID: "drive1", refreshInt: 1 * time.Hour, itemPager: nil, @@ -487,7 +549,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "valid", driveID: "drive1", refreshInt: 1 * time.Hour, - itemPager: &mock.EnumeratesDriveItemsDelta{}, + itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, errors: fault.New(true), expectedErr: require.NoError, }, diff --git a/src/internal/m365/collection/groups/backup_test.go b/src/internal/m365/collection/groups/backup_test.go index a372922ba..899b6ceea 100644 --- a/src/internal/m365/collection/groups/backup_test.go +++ b/src/internal/m365/collection/groups/backup_test.go @@ -2,6 +2,7 @@ package groups import ( "context" + "fmt" "testing" "time" @@ -526,6 +527,8 @@ func (suite *BackupIntgSuite) TestCreateCollections() { require.NotEmpty(t, c.FullPath().Folder(false)) + fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false)) + // TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection // interface. if !assert.Implements(t, (*data.LocationPather)(nil), c) { @@ -534,6 +537,8 @@ func (suite *BackupIntgSuite) TestCreateCollections() { loc := c.(data.LocationPather).LocationPath().String() + fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String()) + require.NotEmpty(t, loc) delete(test.channelNames, loc) diff --git a/src/internal/m365/service/onedrive/mock/handlers.go b/src/internal/m365/service/onedrive/mock/handlers.go index f7d9ce293..f0e0286d5 100644 --- a/src/internal/m365/service/onedrive/mock/handlers.go +++ b/src/internal/m365/service/onedrive/mock/handlers.go @@ -8,13 +8,11 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/alcionai/corso/src/internal/common/ptr" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" - apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) // --------------------------------------------------------------------------- @@ -24,8 +22,6 @@ import ( type BackupHandler struct { ItemInfo details.ItemInfo - DriveItemEnumeration EnumeratesDriveItemsDelta - GI GetsItem GIP GetsItemPermission @@ -59,7 +55,6 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler { OneDrive: &details.OneDriveInfo{}, Extension: &details.ExtensionData{}, }, - DriveItemEnumeration: EnumeratesDriveItemsDelta{}, GI: GetsItem{Err: clues.New("not defined")}, GIP: GetsItemPermission{Err: clues.New("not defined")}, PathPrefixFn: defaultOneDrivePathPrefixer, @@ -129,6 +124,10 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl return h.DrivePagerV } +func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] { + return h.ItemPagerV[driveID] +} + func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string { return "/" + pb.String() } @@ -153,13 +152,6 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R return h.GetResps[c], h.GetErrs[c] } -func (h BackupHandler) EnumerateDriveItemsDelta( - ctx context.Context, - driveID, prevDeltaLink string, -) ([]models.DriveItemable, api.DeltaUpdate, error) { - return h.DriveItemEnumeration.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) -} - func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) { return h.GI.GetItem(ctx, "", "") } @@ -262,65 +254,6 @@ func (m GetsItem) GetItem( return m.Item, m.Err } -// --------------------------------------------------------------------------- -// Enumerates Drive Items -// --------------------------------------------------------------------------- - -type EnumeratesDriveItemsDelta struct { - Items map[string][]models.DriveItemable - DeltaUpdate map[string]api.DeltaUpdate - Err map[string]error -} - -func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta( - _ context.Context, - driveID, _ string, -) ( - []models.DriveItemable, - api.DeltaUpdate, - error, -) { - return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID] -} - -func PagerResultToEDID( - m map[string][]apiMock.PagerResult[models.DriveItemable], -) EnumeratesDriveItemsDelta { - edi := EnumeratesDriveItemsDelta{ - Items: map[string][]models.DriveItemable{}, - DeltaUpdate: map[string]api.DeltaUpdate{}, - Err: map[string]error{}, - } - - for driveID, results := range m { - var ( - err error - items = []models.DriveItemable{} - deltaUpdate api.DeltaUpdate - ) - - for _, pr := range results { - items = append(items, pr.Values...) - - if pr.DeltaLink != nil { - deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)} - } - - if pr.Err != nil { - err = pr.Err - } - - deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta - } - - edi.Items[driveID] = items - edi.Err[driveID] = err - edi.DeltaUpdate[driveID] = deltaUpdate - } - - return edi -} - // --------------------------------------------------------------------------- // Get Item Permissioner // --------------------------------------------------------------------------- diff --git a/src/internal/m365/service/sharepoint/backup_test.go b/src/internal/m365/service/sharepoint/backup_test.go index 12acf2dcd..bcd37dd6b 100644 --- a/src/internal/m365/service/sharepoint/backup_test.go +++ b/src/internal/m365/service/sharepoint/backup_test.go @@ -90,9 +90,12 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { var ( paths = map[string]string{} - currPaths = map[string]string{} + newPaths = map[string]string{} excluded = map[string]struct{}{} - collMap = map[string]map[string]*drive.Collection{ + itemColls = map[string]map[string]string{ + driveID: {}, + } + collMap = map[string]map[string]*drive.Collection{ driveID: {}, } ) @@ -106,14 +109,15 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { c.CollectionMap = collMap - _, err := c.UpdateCollections( + err := c.UpdateCollections( ctx, driveID, "General", test.items, paths, - currPaths, + newPaths, excluded, + itemColls, true, fault.New(true)) diff --git a/src/pkg/fault/fault.go b/src/pkg/fault/fault.go index 1ce6162ce..488656fa4 100644 --- a/src/pkg/fault/fault.go +++ b/src/pkg/fault/fault.go @@ -384,20 +384,20 @@ func (pec printableErrCore) Values() []string { // funcs, and the function that spawned the local bus should always // return `local.Failure()` to ensure that hard failures are propagated // back upstream. -func (e *Bus) Local() *LocalBus { - return &LocalBus{ +func (e *Bus) Local() *localBus { + return &localBus{ mu: &sync.Mutex{}, bus: e, } } -type LocalBus struct { +type localBus struct { mu *sync.Mutex bus *Bus current error } -func (e *LocalBus) AddRecoverable(ctx context.Context, err error) { +func (e *localBus) AddRecoverable(ctx context.Context, err error) { if err == nil { return } @@ -422,7 +422,7 @@ func (e *LocalBus) AddRecoverable(ctx context.Context, err error) { // 2. Skipping avoids a permanent and consistent failure. If // the underlying reason is transient or otherwise recoverable, // the item should not be skipped. -func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) { +func (e *localBus) AddSkip(ctx context.Context, s *Skipped) { if s == nil { return } @@ -437,7 +437,7 @@ func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) { // It does not return the underlying bus.Failure(), only the failure // that was recorded within the local bus instance. This error should // get returned by any func which created a local bus. -func (e *LocalBus) Failure() error { +func (e *localBus) Failure() error { return e.current } diff --git a/src/pkg/selectors/exchange.go b/src/pkg/selectors/exchange.go index 987165199..68f45263c 100644 --- a/src/pkg/selectors/exchange.go +++ b/src/pkg/selectors/exchange.go @@ -697,7 +697,7 @@ func (s ExchangeScope) IncludesCategory(cat exchangeCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s ExchangeScope) IsAny(cat exchangeCategory) bool { - return IsAnyTarget(s, cat) + return isAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/groups.go b/src/pkg/selectors/groups.go index e6399fbf1..584887bfb 100644 --- a/src/pkg/selectors/groups.go +++ b/src/pkg/selectors/groups.go @@ -699,7 +699,7 @@ func (s GroupsScope) IncludesCategory(cat groupsCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s GroupsScope) IsAny(cat groupsCategory) bool { - return IsAnyTarget(s, cat) + return isAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/onedrive.go b/src/pkg/selectors/onedrive.go index f97ceccaf..5d1538a89 100644 --- a/src/pkg/selectors/onedrive.go +++ b/src/pkg/selectors/onedrive.go @@ -484,7 +484,7 @@ func (s OneDriveScope) Matches(cat oneDriveCategory, target string) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s OneDriveScope) IsAny(cat oneDriveCategory) bool { - return IsAnyTarget(s, cat) + return isAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/scopes.go b/src/pkg/selectors/scopes.go index 6e2eb86e9..aec624486 100644 --- a/src/pkg/selectors/scopes.go +++ b/src/pkg/selectors/scopes.go @@ -694,7 +694,7 @@ func matchesPathValues[T scopeT, C categoryT]( return false } - if IsAnyTarget(sc, cc) { + if isAnyTarget(sc, cc) { // continue, not return: all path keys must match the entry to succeed continue } @@ -795,7 +795,7 @@ func isNoneTarget[T scopeT, C categoryT](s T, cat C) bool { // returns true if the category is included in the scope's category type, // and the value is set to Any(). -func IsAnyTarget[T scopeT, C categoryT](s T, cat C) bool { +func isAnyTarget[T scopeT, C categoryT](s T, cat C) bool { if !typeAndCategoryMatches(cat, s.categorizer()) { return false } diff --git a/src/pkg/selectors/scopes_test.go b/src/pkg/selectors/scopes_test.go index 0a44df160..6bf1e3ad9 100644 --- a/src/pkg/selectors/scopes_test.go +++ b/src/pkg/selectors/scopes_test.go @@ -125,14 +125,14 @@ func (suite *SelectorScopesSuite) TestGetCatValue() { func (suite *SelectorScopesSuite) TestIsAnyTarget() { t := suite.T() stub := stubScope("") - assert.True(t, IsAnyTarget(stub, rootCatStub)) - assert.True(t, IsAnyTarget(stub, leafCatStub)) - assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf"))) + assert.True(t, isAnyTarget(stub, rootCatStub)) + assert.True(t, isAnyTarget(stub, leafCatStub)) + assert.False(t, isAnyTarget(stub, mockCategorizer("smarf"))) stub = stubScope("none") - assert.False(t, IsAnyTarget(stub, rootCatStub)) - assert.False(t, IsAnyTarget(stub, leafCatStub)) - assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf"))) + assert.False(t, isAnyTarget(stub, rootCatStub)) + assert.False(t, isAnyTarget(stub, leafCatStub)) + assert.False(t, isAnyTarget(stub, mockCategorizer("smarf"))) } var reduceTestTable = []struct { diff --git a/src/pkg/selectors/sharepoint.go b/src/pkg/selectors/sharepoint.go index 68f6655e5..f35aa10b5 100644 --- a/src/pkg/selectors/sharepoint.go +++ b/src/pkg/selectors/sharepoint.go @@ -625,7 +625,7 @@ func (s SharePointScope) IncludesCategory(cat sharePointCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s SharePointScope) IsAny(cat sharePointCategory) bool { - return IsAnyTarget(s, cat) + return isAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/services/m365/api/config.go b/src/pkg/services/m365/api/config.go index 8a5be9d23..0a0bb913d 100644 --- a/src/pkg/services/m365/api/config.go +++ b/src/pkg/services/m365/api/config.go @@ -101,7 +101,7 @@ func idAnd(ss ...string) []string { // exported // --------------------------------------------------------------------------- -func DefaultDriveItemProps() []string { +func DriveItemSelectDefault() []string { return idAnd( "content.downloadUrl", "createdBy", diff --git a/src/pkg/services/m365/api/delta.go b/src/pkg/services/m365/api/delta.go new file mode 100644 index 000000000..dc24961f0 --- /dev/null +++ b/src/pkg/services/m365/api/delta.go @@ -0,0 +1,11 @@ +package api + +// DeltaUpdate holds the results of a current delta token. It normally +// gets produced when aggregating the addition and removal of items in +// a delta-queryable folder. +type DeltaUpdate struct { + // the deltaLink itself + URL string + // true if the old delta was marked as invalid + Reset bool +} diff --git a/src/pkg/services/m365/api/drive.go b/src/pkg/services/m365/api/drive.go index 374fa545c..4c3b9b312 100644 --- a/src/pkg/services/m365/api/drive.go +++ b/src/pkg/services/m365/api/drive.go @@ -351,10 +351,6 @@ func (c Drives) PostItemLinkShareUpdate( return itm, nil } -// --------------------------------------------------------------------------- -// helper funcs -// --------------------------------------------------------------------------- - // DriveItemCollisionKeyy constructs a key from the item name. // collision keys are used to identify duplicate item conflicts for handling advanced restoration config. func DriveItemCollisionKey(item models.DriveItemable) string { @@ -364,17 +360,3 @@ func DriveItemCollisionKey(item models.DriveItemable) string { return ptr.Val(item.GetName()) } - -// NewDriveItem initializes a `models.DriveItemable` with either a folder or file entry. -func NewDriveItem(name string, folder bool) *models.DriveItem { - itemToCreate := models.NewDriveItem() - itemToCreate.SetName(&name) - - if folder { - itemToCreate.SetFolder(models.NewFolder()) - } else { - itemToCreate.SetFile(models.NewFile()) - } - - return itemToCreate -} diff --git a/src/pkg/services/m365/api/drive_pager.go b/src/pkg/services/m365/api/drive_pager.go index e5523d35f..c592fa656 100644 --- a/src/pkg/services/m365/api/drive_pager.go +++ b/src/pkg/services/m365/api/drive_pager.go @@ -15,11 +15,6 @@ import ( "github.com/alcionai/corso/src/pkg/logger" ) -type DriveItemIDType struct { - ItemID string - IsFolder bool -} - // --------------------------------------------------------------------------- // non-delta item pager // --------------------------------------------------------------------------- @@ -70,6 +65,11 @@ func (p *driveItemPageCtrl) ValidModTimes() bool { return true } +type DriveItemIDType struct { + ItemID string + IsFolder bool +} + func (c Drives) GetItemsInContainerByCollisionKey( ctx context.Context, driveID, containerID string, @@ -131,9 +131,9 @@ type DriveItemDeltaPageCtrl struct { options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration } -func (c Drives) newDriveItemDeltaPager( - driveID, prevDeltaLink string, - selectProps ...string, +func (c Drives) NewDriveItemDeltaPager( + driveID, link string, + selectFields []string, ) *DriveItemDeltaPageCtrl { preferHeaderItems := []string{ "deltashowremovedasdeleted", @@ -142,32 +142,28 @@ func (c Drives) newDriveItemDeltaPager( "hierarchicalsharing", } - options := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{ - Headers: newPreferHeaders(preferHeaderItems...), - QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{}, - } - - if len(selectProps) > 0 { - options.QueryParameters.Select = selectProps - } - - builder := c.Stable. - Client(). - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId(onedrive.RootID). - Delta() - - if len(prevDeltaLink) > 0 { - builder = drives.NewItemItemsItemDeltaRequestBuilder(prevDeltaLink, c.Stable.Adapter()) + requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{ + Headers: newPreferHeaders(preferHeaderItems...), + QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{ + Select: selectFields, + }, } res := &DriveItemDeltaPageCtrl{ gs: c.Stable, driveID: driveID, - options: options, - builder: builder, + options: requestConfig, + builder: c.Stable. + Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(onedrive.RootID). + Delta(), + } + + if len(link) > 0 { + res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, c.Stable.Adapter()) } return res @@ -197,27 +193,6 @@ func (p *DriveItemDeltaPageCtrl) ValidModTimes() bool { return true } -// EnumerateDriveItems will enumerate all items in the specified drive and hand them to the -// provided `collector` method -func (c Drives) EnumerateDriveItemsDelta( - ctx context.Context, - driveID string, - prevDeltaLink string, -) ( - []models.DriveItemable, - DeltaUpdate, - error, -) { - pager := c.newDriveItemDeltaPager(driveID, prevDeltaLink, DefaultDriveItemProps()...) - - items, du, err := deltaEnumerateItems[models.DriveItemable](ctx, pager, prevDeltaLink) - if err != nil { - return nil, du, clues.Stack(err) - } - - return items, du, nil -} - // --------------------------------------------------------------------------- // user's drives pager // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/drive_pager_test.go b/src/pkg/services/m365/api/drive_pager_test.go index b75c3d320..f28277eee 100644 --- a/src/pkg/services/m365/api/drive_pager_test.go +++ b/src/pkg/services/m365/api/drive_pager_test.go @@ -178,18 +178,3 @@ func (suite *DrivePagerIntgSuite) TestDrives_GetItemIDsInContainer() { }) } } - -func (suite *DrivePagerIntgSuite) TestEnumerateDriveItems() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - items, du, err := suite.its. - ac. - Drives(). - EnumerateDriveItemsDelta(ctx, suite.its.user.driveID, "") - require.NoError(t, err, clues.ToCore(err)) - require.NotEmpty(t, items, "no items found in user's drive") - assert.NotEmpty(t, du.URL, "should have a delta link") -} diff --git a/src/pkg/services/m365/api/drive_test.go b/src/pkg/services/m365/api/drive_test.go index 1f9ccadca..28173c27a 100644 --- a/src/pkg/services/m365/api/drive_test.go +++ b/src/pkg/services/m365/api/drive_test.go @@ -17,7 +17,6 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" - "github.com/alcionai/corso/src/pkg/services/m365/api" ) type DriveAPIIntgSuite struct { @@ -51,6 +50,20 @@ func (suite *DriveAPIIntgSuite) TestDrives_CreatePagerAndGetPage() { assert.NotNil(t, a) } +// newItem initializes a `models.DriveItemable` that can be used as input to `createItem` +func newItem(name string, folder bool) *models.DriveItem { + itemToCreate := models.NewDriveItem() + itemToCreate.SetName(&name) + + if folder { + itemToCreate.SetFolder(models.NewFolder()) + } else { + itemToCreate.SetFile(models.NewFile()) + } + + return itemToCreate +} + func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { t := suite.T() @@ -65,12 +78,12 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { ctx, suite.its.user.driveID, suite.its.user.driveRootFolderID, - api.NewDriveItem(rc.Location, true), + newItem(rc.Location, true), control.Replace) require.NoError(t, err, clues.ToCore(err)) // generate a folder to use for collision testing - folder := api.NewDriveItem("collision", true) + folder := newItem("collision", true) origFolder, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -80,7 +93,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { require.NoError(t, err, clues.ToCore(err)) // generate an item to use for collision testing - file := api.NewDriveItem("collision.txt", false) + file := newItem("collision.txt", false) origFile, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -228,7 +241,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr ctx, suite.its.user.driveID, suite.its.user.driveRootFolderID, - api.NewDriveItem(rc.Location, true), + newItem(rc.Location, true), // skip instead of replace here to get // an ErrItemAlreadyExistsConflict, just in case. control.Skip) @@ -236,7 +249,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr // generate items within that folder for i := 0; i < 5; i++ { - file := api.NewDriveItem(fmt.Sprintf("collision_%d.txt", i), false) + file := newItem(fmt.Sprintf("collision_%d.txt", i), false) f, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -252,7 +265,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr ctx, suite.its.user.driveID, ptr.Val(folder.GetParentReference().GetId()), - api.NewDriveItem(rc.Location, true), + newItem(rc.Location, true), control.Replace) require.NoError(t, err, clues.ToCore(err)) require.NotEmpty(t, ptr.Val(resultFolder.GetId())) diff --git a/src/pkg/services/m365/api/item_pager.go b/src/pkg/services/m365/api/item_pager.go index f991f2345..5effcb7a6 100644 --- a/src/pkg/services/m365/api/item_pager.go +++ b/src/pkg/services/m365/api/item_pager.go @@ -13,20 +13,6 @@ import ( "github.com/alcionai/corso/src/pkg/logger" ) -// --------------------------------------------------------------------------- -// common structs -// --------------------------------------------------------------------------- - -// DeltaUpdate holds the results of a current delta token. It normally -// gets produced when aggregating the addition and removal of items in -// a delta-queryable folder. -type DeltaUpdate struct { - // the deltaLink itself - URL string - // true if the old delta was marked as invalid - Reset bool -} - // --------------------------------------------------------------------------- // common interfaces // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/mock/pager.go b/src/pkg/services/m365/api/mock/pager.go index bccf5b428..b1818ac17 100644 --- a/src/pkg/services/m365/api/mock/pager.go +++ b/src/pkg/services/m365/api/mock/pager.go @@ -32,11 +32,10 @@ func (dnl *DeltaNextLinkValues[T]) GetOdataDeltaLink() *string { } type PagerResult[T any] struct { - Values []T - NextLink *string - DeltaLink *string - ResetDelta bool - Err error + Values []T + NextLink *string + DeltaLink *string + Err error } // --------------------------------------------------------------------------- From c4f6ad791e64bfc85629a06c52c45e2eb3588166 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 2 Oct 2023 14:44:33 -0600 Subject: [PATCH 21/26] provide expected repo cfg in e2e tests (#4420) nightly tests were missing expected repo config due to, breaking api expectations, causing unexpected failures due to test env setup. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :bug: Bugfix - [x] :robot: Supportability/Tests #### Test Plan - [x] :green_heart: E2E --- src/cli/backup/exchange_e2e_test.go | 8 ++++---- src/cli/backup/groups_e2e_test.go | 8 ++++---- src/cli/backup/helpers_test.go | 6 +++++- src/cli/backup/onedrive_e2e_test.go | 5 +++-- src/cli/backup/sharepoint_e2e_test.go | 5 +++-- src/cli/repo/filesystem_e2e_test.go | 2 +- src/cli/repo/s3_e2e_test.go | 2 +- 7 files changed, 21 insertions(+), 15 deletions(-) diff --git a/src/cli/backup/exchange_e2e_test.go b/src/cli/backup/exchange_e2e_test.go index 2175a50e6..0807addc6 100644 --- a/src/cli/backup/exchange_e2e_test.go +++ b/src/cli/backup/exchange_e2e_test.go @@ -55,7 +55,7 @@ func (suite *NoBackupExchangeE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) } func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() { @@ -109,7 +109,7 @@ func (suite *BackupExchangeE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) } func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() { @@ -336,7 +336,7 @@ func (suite *PreparedBackupExchangeE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) suite.backupOps = make(map[path.CategoryType]string) var ( @@ -579,7 +579,7 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.ExchangeService) m365UserID := tconfig.M365UserID(t) users := []string{m365UserID} diff --git a/src/cli/backup/groups_e2e_test.go b/src/cli/backup/groups_e2e_test.go index 986979a4f..87ef93d2b 100644 --- a/src/cli/backup/groups_e2e_test.go +++ b/src/cli/backup/groups_e2e_test.go @@ -56,7 +56,7 @@ func (suite *NoBackupGroupsE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) } func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() { @@ -110,7 +110,7 @@ func (suite *BackupGroupsE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) } func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() { @@ -287,7 +287,7 @@ func (suite *PreparedBackupGroupsE2ESuite) SetupSuite() { defer flush() suite.its = newIntegrationTesterSetup(t) - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) suite.backupOps = make(map[path.CategoryType]string) var ( @@ -515,7 +515,7 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.GroupsService) m365GroupID := tconfig.M365GroupID(t) groups := []string{m365GroupID} diff --git a/src/cli/backup/helpers_test.go b/src/cli/backup/helpers_test.go index 14486f703..e3023f834 100644 --- a/src/cli/backup/helpers_test.go +++ b/src/cli/backup/helpers_test.go @@ -21,6 +21,7 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/services/m365/api" "github.com/alcionai/corso/src/pkg/services/m365/api/mock" @@ -132,6 +133,7 @@ type dependencies struct { func prepM365Test( t *testing.T, ctx context.Context, //revive:disable-line:context-as-argument + pst path.ServiceType, ) dependencies { var ( acct = tconfig.NewM365Account(t) @@ -159,7 +161,9 @@ func prepM365Test( repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = repo.Initialize(ctx, repository.InitConfig{}) + err = repo.Initialize(ctx, repository.InitConfig{ + Service: pst, + }) require.NoError(t, err, clues.ToCore(err)) return dependencies{ diff --git a/src/cli/backup/onedrive_e2e_test.go b/src/cli/backup/onedrive_e2e_test.go index f4b2c0bdc..a2bac18b6 100644 --- a/src/cli/backup/onedrive_e2e_test.go +++ b/src/cli/backup/onedrive_e2e_test.go @@ -20,6 +20,7 @@ import ( "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" selTD "github.com/alcionai/corso/src/pkg/selectors/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -48,7 +49,7 @@ func (suite *NoBackupOneDriveE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.OneDriveService) } func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() { @@ -139,7 +140,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.OneDriveService) var ( m365UserID = tconfig.M365UserID(t) diff --git a/src/cli/backup/sharepoint_e2e_test.go b/src/cli/backup/sharepoint_e2e_test.go index bfb67f85a..7d7728020 100644 --- a/src/cli/backup/sharepoint_e2e_test.go +++ b/src/cli/backup/sharepoint_e2e_test.go @@ -20,6 +20,7 @@ import ( "github.com/alcionai/corso/src/internal/operations" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors/testdata" storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" @@ -46,7 +47,7 @@ func (suite *NoBackupSharePointE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.SharePointService) } func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() { @@ -103,7 +104,7 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() { ctx, flush := tester.NewContext(t) defer flush() - suite.dpnd = prepM365Test(t, ctx) + suite.dpnd = prepM365Test(t, ctx, path.SharePointService) var ( m365SiteID = tconfig.M365SiteID(t) diff --git a/src/cli/repo/filesystem_e2e_test.go b/src/cli/repo/filesystem_e2e_test.go index 6a76e3fa8..faeb5a5b1 100644 --- a/src/cli/repo/filesystem_e2e_test.go +++ b/src/cli/repo/filesystem_e2e_test.go @@ -131,7 +131,7 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() { // init the repo first r, err := repository.New( ctx, - account.Account{}, + tconfig.NewM365Account(t), st, control.DefaultOptions(), repository.NewRepoID) diff --git a/src/cli/repo/s3_e2e_test.go b/src/cli/repo/s3_e2e_test.go index e1d65c4f3..2c19b48c8 100644 --- a/src/cli/repo/s3_e2e_test.go +++ b/src/cli/repo/s3_e2e_test.go @@ -207,7 +207,7 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() { // init the repo first r, err := repository.New( ctx, - account.Account{}, + tconfig.NewM365Account(t), st, control.DefaultOptions(), repository.NewRepoID) From 19111fe1364f826cf610d47a5f6d9f8278f6e7f8 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 2 Oct 2023 17:52:45 -0600 Subject: [PATCH 22/26] export path string to service type (#4428) Also removes the unused teams service consts --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/pkg/path/category_type.go | 6 +----- src/pkg/path/service_category_test.go | 2 +- src/pkg/path/service_type.go | 24 +++++++++--------------- src/pkg/path/servicetype_string.go | 6 ++---- 4 files changed, 13 insertions(+), 25 deletions(-) diff --git a/src/pkg/path/category_type.go b/src/pkg/path/category_type.go index b8c20020f..c403e3c19 100644 --- a/src/pkg/path/category_type.go +++ b/src/pkg/path/category_type.go @@ -96,14 +96,10 @@ var serviceCategories = map[ServiceType]map[CategoryType]struct{}{ ChannelMessagesCategory: {}, LibrariesCategory: {}, }, - TeamsService: { - ChannelMessagesCategory: {}, - LibrariesCategory: {}, - }, } func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, error) { - service := toServiceType(s) + service := ToServiceType(s) if service == UnknownService { return UnknownService, UnknownCategory, clues.Stack(ErrorUnknownService).With("service", fmt.Sprintf("%q", s)) } diff --git a/src/pkg/path/service_category_test.go b/src/pkg/path/service_category_test.go index d2b19b244..2d98ed49c 100644 --- a/src/pkg/path/service_category_test.go +++ b/src/pkg/path/service_category_test.go @@ -157,7 +157,7 @@ func (suite *ServiceCategoryUnitSuite) TestToServiceType() { suite.Run(test.name, func() { t := suite.T() - assert.Equal(t, test.expected, toServiceType(test.service)) + assert.Equal(t, test.expected, ToServiceType(test.service)) }) } } diff --git a/src/pkg/path/service_type.go b/src/pkg/path/service_type.go index 14847ce35..9059615a2 100644 --- a/src/pkg/path/service_type.go +++ b/src/pkg/path/service_type.go @@ -23,19 +23,17 @@ type ServiceType int //go:generate stringer -type=ServiceType -linecomment const ( UnknownService ServiceType = 0 - ExchangeService ServiceType = 1 // exchange - OneDriveService ServiceType = 2 // onedrive - SharePointService ServiceType = 3 // sharepoint - ExchangeMetadataService ServiceType = 4 // exchangeMetadata - OneDriveMetadataService ServiceType = 5 // onedriveMetadata - SharePointMetadataService ServiceType = 6 // sharepointMetadata - GroupsService ServiceType = 7 // groups - GroupsMetadataService ServiceType = 8 // groupsMetadata - TeamsService ServiceType = 9 // teams - TeamsMetadataService ServiceType = 10 // teamsMetadata + ExchangeService ServiceType = 1 // exchange + OneDriveService ServiceType = 2 // onedrive + SharePointService ServiceType = 3 // sharepoint + ExchangeMetadataService ServiceType = 4 // exchangeMetadata + OneDriveMetadataService ServiceType = 5 // onedriveMetadata + SharePointMetadataService ServiceType = 6 // sharepointMetadata + GroupsService ServiceType = 7 // groups + GroupsMetadataService ServiceType = 8 // groupsMetadata ) -func toServiceType(service string) ServiceType { +func ToServiceType(service string) ServiceType { s := strings.ToLower(service) switch s { @@ -47,8 +45,6 @@ func toServiceType(service string) ServiceType { return SharePointService case strings.ToLower(GroupsService.String()): return GroupsService - case strings.ToLower(TeamsService.String()): - return TeamsService case strings.ToLower(ExchangeMetadataService.String()): return ExchangeMetadataService case strings.ToLower(OneDriveMetadataService.String()): @@ -57,8 +53,6 @@ func toServiceType(service string) ServiceType { return SharePointMetadataService case strings.ToLower(GroupsMetadataService.String()): return GroupsMetadataService - case strings.ToLower(TeamsMetadataService.String()): - return TeamsMetadataService default: return UnknownService } diff --git a/src/pkg/path/servicetype_string.go b/src/pkg/path/servicetype_string.go index 4b9ab16ec..6fa499364 100644 --- a/src/pkg/path/servicetype_string.go +++ b/src/pkg/path/servicetype_string.go @@ -17,13 +17,11 @@ func _() { _ = x[SharePointMetadataService-6] _ = x[GroupsService-7] _ = x[GroupsMetadataService-8] - _ = x[TeamsService-9] - _ = x[TeamsMetadataService-10] } -const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadatateamsteamsMetadata" +const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadata" -var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110, 115, 128} +var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110} func (i ServiceType) String() string { if i < 0 || i >= ServiceType(len(_ServiceType_index)-1) { From 13c5b9fe5adcaf6dd95cab0455900cbbe8b86138 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Tue, 3 Oct 2023 08:34:11 -0700 Subject: [PATCH 23/26] Use separate config dirs for kopia in file system mode (#4423) Fixes possible issues of opening the incorrect repo if tests are run in parallel. Integration test for this in [model_store_test.go](https://github.com/alcionai/corso/blob/3d78183651289e2051b8690850069c9b41df6bd0/src/internal/kopia/model_store_test.go#L897) --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #4422 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/pkg/storage/testdata/storage.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pkg/storage/testdata/storage.go b/src/pkg/storage/testdata/storage.go index 227a959bb..0653ee0fd 100644 --- a/src/pkg/storage/testdata/storage.go +++ b/src/pkg/storage/testdata/storage.go @@ -68,6 +68,9 @@ func NewFilesystemStorage(t tester.TestT) storage.Storage { }, storage.CommonConfig{ Corso: GetAndInsertCorso(""), + // Use separate kopia configs for each instance. Place in a new folder to + // avoid mixing data. + KopiaCfgDir: t.TempDir(), }) require.NoError(t, err, "creating storage", clues.ToCore(err)) From fc28ca32832efb9691ae5291058ab5fb13301599 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Tue, 3 Oct 2023 09:39:03 -0700 Subject: [PATCH 24/26] Use local fs for non-retention tests (#4424) Switch kopia package tests that don't require retention to use the local file system for speed. Tests that do check retention settings require S3. Brings kopia package test runtime down from ~430s to ~130s on local machine --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * closes #4422 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/conn_test.go | 22 ++++++++++++++++++---- src/internal/kopia/model_store_test.go | 4 ++-- src/internal/kopia/wrapper_test.go | 12 ++++++------ 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/src/internal/kopia/conn_test.go b/src/internal/kopia/conn_test.go index bbd824c3d..e5c2dbdec 100644 --- a/src/internal/kopia/conn_test.go +++ b/src/internal/kopia/conn_test.go @@ -22,6 +22,20 @@ import ( storeTD "github.com/alcionai/corso/src/pkg/storage/testdata" ) +func openLocalKopiaRepo( + t tester.TestT, + ctx context.Context, //revive:disable-line:context-as-argument +) (*conn, error) { + st := storeTD.NewFilesystemStorage(t) + + k := NewConn(st) + if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil { + return nil, err + } + + return k, nil +} + func openKopiaRepo( t tester.TestT, ctx context.Context, //revive:disable-line:context-as-argument @@ -81,7 +95,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() { ctx, flush := tester.NewContext(t) defer flush() - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) k := NewConn(st) err := k.Initialize(ctx, repository.Options{}, repository.Retention{}) @@ -101,7 +115,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() { ctx, flush := tester.NewContext(t) defer flush() - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) st.Provider = storage.ProviderUnknown k := NewConn(st) @@ -115,7 +129,7 @@ func (suite *WrapperIntegrationSuite) TestConnectWithoutInitErrors() { ctx, flush := tester.NewContext(t) defer flush() - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) k := NewConn(st) err := k.Connect(ctx, repository.Options{}) @@ -408,7 +422,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() { Host: "bar", } - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) k := NewConn(st) err := k.Initialize(ctx, opts, repository.Retention{}) diff --git a/src/internal/kopia/model_store_test.go b/src/internal/kopia/model_store_test.go index 6226a14ad..db25eee57 100644 --- a/src/internal/kopia/model_store_test.go +++ b/src/internal/kopia/model_store_test.go @@ -29,7 +29,7 @@ type fooModel struct { //revive:disable-next-line:context-as-argument func getModelStore(t *testing.T, ctx context.Context) *ModelStore { - c, err := openKopiaRepo(t, ctx) + c, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) return &ModelStore{c: c, modelVersion: globalModelVersion} @@ -856,7 +856,7 @@ func openConnAndModelStore( t *testing.T, ctx context.Context, //revive:disable-line:context-as-argument ) (*conn, *ModelStore) { - st := storeTD.NewPrefixedS3Storage(t) + st := storeTD.NewFilesystemStorage(t) c := NewConn(st) err := c.Initialize(ctx, repository.Options{}, repository.Retention{}) diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 77721fc7b..7b4508465 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -184,7 +184,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_FirstRun_NoChanges() { ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) w := &Wrapper{k} @@ -204,7 +204,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_NoForce_Fails ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) w := &Wrapper{k} @@ -241,7 +241,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_Force_Succeed ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) w := &Wrapper{k} @@ -754,7 +754,7 @@ func (suite *KopiaIntegrationSuite) SetupTest() { t := suite.T() suite.ctx, suite.flush = tester.NewContext(t) - c, err := openKopiaRepo(t, suite.ctx) + c, err := openLocalKopiaRepo(t, suite.ctx) require.NoError(t, err, clues.ToCore(err)) suite.w = &Wrapper{c} @@ -1245,7 +1245,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { ctx, flush := tester.NewContext(t) defer flush() - k, err := openKopiaRepo(t, ctx) + k, err := openLocalKopiaRepo(t, ctx) require.NoError(t, err, clues.ToCore(err)) err = k.Compression(ctx, "s2-default") @@ -1559,7 +1559,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { //nolint:forbidigo suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls) - c, err := openKopiaRepo(t, suite.ctx) + c, err := openLocalKopiaRepo(t, suite.ctx) require.NoError(t, err, clues.ToCore(err)) suite.w = &Wrapper{c} From 83dec480e67cf2b099cd4d2eb762eaeeef5392a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 20:23:16 +0000 Subject: [PATCH 25/26] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-xray-sdk-go=20from=201.8.1=20to=201.8.2=20in=20/src=20(#4?= =?UTF-8?q?429)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-xray-sdk-go](https://github.com/aws/aws-xray-sdk-go) from 1.8.1 to 1.8.2.
Release notes

Sourced from github.com/aws/aws-xray-sdk-go's releases.

v1.8.2

Please refer change-log for more details

Changelog

Sourced from github.com/aws/aws-xray-sdk-go's changelog.

Release v1.8.2 (2023-09-28)

SDK Enhancements

  • Change how SDK sets the context for AWS SDK calls #PR 418

SDK Bugs

  • Suppress Panic in Emitter #PR 419
Commits
  • 2f767e4 Merge pull request #420 from wangzlei/master
  • 0eac27d Update changelog for 1.8.2
  • 065bcb0 Merge pull request #419 from wangzlei/master
  • 16febea suppress Panic in Emitter
  • 4cdaf99 Merge pull request #418 from jj22ee/set-context-alternative
  • 3d484ac set context via SetContext() instead of HTTPRequest.WithContext()
  • c59927e Merge pull request #415 from aws/dependabot/go_modules/integration-tests/dist...
  • 33c582d Bump google.golang.org/grpc in /integration-tests/distributioncheck
  • cfcff07 Disable IMDSv1 from Elastic Beanstalk
  • d94e4d3 Update IntegrationTesting.yml
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-xray-sdk-go&package-manager=go_modules&previous-version=1.8.1&new-version=1.8.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 96d163731..146e144c6 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-xray-sdk-go v1.8.1 + github.com/aws/aws-xray-sdk-go v1.8.2 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.1 github.com/h2non/gock v1.2.0 diff --git a/src/go.sum b/src/go.sum index 054c65071..d381c9e69 100644 --- a/src/go.sum +++ b/src/go.sum @@ -71,8 +71,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc= github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= -github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= +github.com/aws/aws-xray-sdk-go v1.8.2 h1:PVxNWnQG+rAYjxsmhEN97DTO57Dipg6VS0wsu6bXUB0= +github.com/aws/aws-xray-sdk-go v1.8.2/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From fd3a4eb6ff44e7092d149c7aa0501ec2bccd0423 Mon Sep 17 00:00:00 2001 From: neha_gupta Date: Wed, 4 Oct 2023 12:37:57 +0530 Subject: [PATCH 26/26] update Kopia password (#4406) --- This PR is raised as part of splitting https://github.com/alcionai/corso/pull/4397 into smaller parts It cover connecting with Kopia and updating repos password #### Does this PR need a docs update or release note? - [ ] :no_entry: No #### Type of change - [ ] :sunflower: Feature #### Issue(s) * https://github.com/alcionai/corso/issues/4061 #### Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test --- src/internal/kopia/conn.go | 20 ++++++++++++++ src/pkg/repository/repository.go | 36 ++++++++++++++++++++++++- src/pkg/repository/repository_test.go | 38 +++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) diff --git a/src/internal/kopia/conn.go b/src/internal/kopia/conn.go index ee8a9132e..7a4948787 100644 --- a/src/internal/kopia/conn.go +++ b/src/internal/kopia/conn.go @@ -578,3 +578,23 @@ func (w *conn) LoadSnapshot( func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) { return snapshotfs.SnapshotRoot(w.Repository, man) } + +func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error { + if len(password) <= 0 { + return clues.New("empty password provided") + } + + kopiaRef := NewConn(w.storage) + if err := kopiaRef.Connect(ctx, opts); err != nil { + return clues.Wrap(err, "connecting kopia client") + } + + defer kopiaRef.Close(ctx) + + kopiaRepo := kopiaRef.Repository.(repo.DirectRepository) + if err := kopiaRepo.FormatManager().ChangePassword(ctx, password); err != nil { + return clues.Wrap(err, "unable to update password") + } + + return nil +} diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index 539c3c3b1..283af8e56 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -86,7 +86,7 @@ func New( st storage.Storage, opts control.Options, configFileRepoID string, -) (repo *repository, err error) { +) (singleRepo *repository, err error) { ctx = clues.Add( ctx, "acct_provider", acct.Provider.String(), @@ -253,6 +253,40 @@ func (r *repository) Connect( return nil } +// UpdatePassword will- +// - connect to the provider storage using existing password +// - update the repo with new password +func (r *repository) UpdatePassword(ctx context.Context, password string) (err error) { + ctx = clues.Add( + ctx, + "acct_provider", r.Account.Provider.String(), + "acct_id", clues.Hide(r.Account.ID()), + "storage_provider", r.Storage.Provider.String()) + + defer func() { + if crErr := crash.Recovery(ctx, recover(), "repo connect"); crErr != nil { + err = crErr + } + }() + + progressBar := observe.MessageWithCompletion(ctx, "Connecting to repository") + defer close(progressBar) + + kopiaRef := kopia.NewConn(r.Storage) + if err := kopiaRef.Connect(ctx, r.Opts.Repo); err != nil { + return clues.Wrap(err, "connecting kopia client") + } + + err = kopiaRef.UpdatePassword(ctx, password, r.Opts.Repo) + if err != nil { + return clues.Wrap(err, "updating on kopia") + } + + defer kopiaRef.Close(ctx) + + return nil +} + func (r *repository) Close(ctx context.Context) error { if err := r.Bus.Close(); err != nil { logger.Ctx(ctx).With("err", err).Debugw("closing the event bus", clues.In(ctx).Slice()...) diff --git a/src/pkg/repository/repository_test.go b/src/pkg/repository/repository_test.go index 97456fe70..b76f2bb2c 100644 --- a/src/pkg/repository/repository_test.go +++ b/src/pkg/repository/repository_test.go @@ -240,6 +240,44 @@ func (suite *RepositoryIntegrationSuite) TestConnect() { assert.NoError(t, err, clues.ToCore(err)) } +func (suite *RepositoryIntegrationSuite) TestRepository_UpdatePassword() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + acct := tconfig.NewM365Account(t) + + // need to initialize the repository before we can test connecting to it. + st := storeTD.NewPrefixedS3Storage(t) + r, err := New( + ctx, + acct, + st, + control.DefaultOptions(), + NewRepoID) + require.NoError(t, err, clues.ToCore(err)) + + err = r.Initialize(ctx, InitConfig{}) + require.NoError(t, err, clues.ToCore(err)) + + // now re-connect + err = r.Connect(ctx, ConnConfig{}) + assert.NoError(t, err, clues.ToCore(err)) + + err = r.UpdatePassword(ctx, "newpass") + require.NoError(t, err, clues.ToCore(err)) + + tmp := st.Config["common_corsoPassphrase"] + st.Config["common_corsoPassphrase"] = "newpass" + + // now reconnect with new pass + err = r.Connect(ctx, ConnConfig{}) + assert.NoError(t, err, clues.ToCore(err)) + + st.Config["common_corsoPassphrase"] = tmp +} + func (suite *RepositoryIntegrationSuite) TestConnect_sameID() { t := suite.T()