From c4b2de5c6cee28350d105f27768ac28b08cd994e Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Sat, 4 Feb 2023 14:17:21 -0800 Subject: [PATCH 01/45] Fix test failures when run without env vars (#2356) ## Description Either skip tests that require env vars or have them use mock credentials if they don't need access to external services. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * closes #2354 ## Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/connector/discovery/api/beta_service_test.go | 2 +- src/internal/connector/graph/betasdk/beta_client_test.go | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/internal/connector/discovery/api/beta_service_test.go b/src/internal/connector/discovery/api/beta_service_test.go index ad67b3877..1a0925e96 100644 --- a/src/internal/connector/discovery/api/beta_service_test.go +++ b/src/internal/connector/discovery/api/beta_service_test.go @@ -22,7 +22,7 @@ func TestBetaUnitSuite(t *testing.T) { func (suite *BetaUnitSuite) TestBetaService_Adapter() { t := suite.T() - a := tester.NewM365Account(t) + a := tester.NewMockM365Account(t) m365, err := a.M365Config() require.NoError(t, err) diff --git a/src/internal/connector/graph/betasdk/beta_client_test.go b/src/internal/connector/graph/betasdk/beta_client_test.go index 84f2db6c5..b52d000a4 100644 --- a/src/internal/connector/graph/betasdk/beta_client_test.go +++ b/src/internal/connector/graph/betasdk/beta_client_test.go @@ -18,6 +18,12 @@ type BetaClientSuite struct { } func TestBetaClientSuite(t *testing.T) { + tester.RunOnAny( + t, + tester.CorsoCITests, + tester.CorsoGraphConnectorTests, + ) + suite.Run(t, new(BetaClientSuite)) } From ac8fe1e9c142538856e18e1ba03bfab97717bff6 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Sun, 5 Feb 2023 17:12:48 +0530 Subject: [PATCH 02/45] Pass in prev delta to collectItems (#2371) ## Description Pass the previous delta url fetched from metadata to collectItems and make sure we are using that when fetching the pages. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * https://github.com/alcionai/corso/issues/2123 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- .../connector/onedrive/collections.go | 9 +- .../connector/onedrive/collections_test.go | 92 +++++++++++++++++++ src/internal/connector/onedrive/drive.go | 47 ++++++++-- src/internal/connector/onedrive/item_test.go | 1 + 4 files changed, 138 insertions(+), 11 deletions(-) diff --git a/src/internal/connector/onedrive/collections.go b/src/internal/connector/onedrive/collections.go index 50c5323d9..a5adb9d34 100644 --- a/src/internal/connector/onedrive/collections.go +++ b/src/internal/connector/onedrive/collections.go @@ -251,7 +251,7 @@ func (c *Collections) Get( ctx context.Context, prevMetadata []data.Collection, ) ([]data.Collection, map[string]struct{}, error) { - _, _, err := deserializeMetadata(ctx, prevMetadata) + prevDeltas, _, err := deserializeMetadata(ctx, prevMetadata) if err != nil { return nil, nil, err } @@ -287,6 +287,8 @@ func (c *Collections) Get( driveID := *d.GetId() driveName := *d.GetName() + prevDelta := prevDeltas[driveID] + delta, paths, excluded, err := collectItems( ctx, c.itemPagerFunc( @@ -297,6 +299,7 @@ func (c *Collections) Get( driveID, driveName, c.UpdateCollections, + prevDelta, ) if err != nil { return nil, nil, err @@ -307,8 +310,8 @@ func (c *Collections) Get( // remove entries for which there is no corresponding delta token/folder. If // we leave empty delta tokens then we may end up setting the State field // for collections when not actually getting delta results. - if len(delta) > 0 { - deltaURLs[driveID] = delta + if len(delta.URL) > 0 { + deltaURLs[driveID] = delta.URL } // Avoid the edge case where there's no paths but we do have a valid delta diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index f784bad62..7b81a5b74 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -7,6 +7,7 @@ import ( "github.com/google/uuid" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -1482,3 +1483,94 @@ func delItem( return item } + +func (suite *OneDriveCollectionsSuite) TestCollectItems() { + next := "next" + delta := "delta" + + syncStateNotFound := "SyncStateNotFound" // TODO(meain): export graph.errCodeSyncStateNotFound + me := odataerrors.NewMainError() + me.SetCode(&syncStateNotFound) + + deltaError := odataerrors.NewODataError() + deltaError.SetError(me) + + table := []struct { + name string + items []deltaPagerResult + deltaURL string + prevDelta string + prevDeltaSuccess bool + err error + }{ + { + name: "delta on first run", + deltaURL: delta, + items: []deltaPagerResult{ + {deltaLink: &delta}, + }, + prevDeltaSuccess: true, + }, + { + name: "next then delta", + deltaURL: delta, + items: []deltaPagerResult{ + {nextLink: &next}, + {deltaLink: &delta}, + }, + prevDeltaSuccess: true, + }, + { + name: "invalid prev delta", + deltaURL: delta, + items: []deltaPagerResult{ + {nextLink: &next, err: deltaError}, + {deltaLink: &delta}, // works on retry + }, + prevDeltaSuccess: false, + }, + { + name: "fail a normal delta query", + items: []deltaPagerResult{ + {nextLink: &next}, + {nextLink: &next, err: assert.AnError}, + }, + prevDeltaSuccess: true, + err: assert.AnError, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + ctx, flush := tester.NewContext() + defer flush() + + itemPager := &mockItemPager{ + toReturn: test.items, + } + + collectorFunc := func( + ctx context.Context, + driveID, driveName string, + driveItems []models.DriveItemable, + oldPaths map[string]string, + newPaths map[string]string, + excluded map[string]struct{}, + ) error { + return nil + } + + delta, _, _, err := collectItems( + ctx, + itemPager, + "", + "General", + collectorFunc, + "", + ) + + require.ErrorIs(suite.T(), err, test.err) + require.Equal(suite.T(), test.deltaURL, delta.URL) + require.Equal(suite.T(), !test.prevDeltaSuccess, delta.Reset) + }) + } +} diff --git a/src/internal/connector/onedrive/drive.go b/src/internal/connector/onedrive/drive.go index ebcbe8b6f..b06184884 100644 --- a/src/internal/connector/onedrive/drive.go +++ b/src/internal/connector/onedrive/drive.go @@ -35,6 +35,17 @@ const ( contextDeadlineExceeded = "context deadline exceeded" ) +// DeltaUpdate holds the results of a current delta token. It normally +// gets produced when aggregating the addition and removal of items in +// a delta-queriable folder. +// FIXME: This is same as exchange.api.DeltaUpdate +type DeltaUpdate struct { + // the deltaLink itself + URL string + // true if the old delta was marked as invalid + Reset bool +} + type drivePager interface { GetPage(context.Context) (gapi.PageLinker, error) SetNext(nextLink string) @@ -172,22 +183,41 @@ func collectItems( pager itemPager, driveID, driveName string, collector itemCollector, -) (string, map[string]string, map[string]struct{}, error) { + prevDelta string, +) (DeltaUpdate, map[string]string, map[string]struct{}, error) { var ( newDeltaURL = "" // TODO(ashmrtn): Eventually this should probably be a parameter so we can // take in previous paths. - oldPaths = map[string]string{} - newPaths = map[string]string{} - excluded = map[string]struct{}{} + oldPaths = map[string]string{} + newPaths = map[string]string{} + excluded = map[string]struct{}{} + invalidPrevDelta = false + triedPrevDelta = false ) maps.Copy(newPaths, oldPaths) + if len(prevDelta) != 0 { + pager.SetNext(prevDelta) + } + for { page, err := pager.GetPage(ctx) + + if !triedPrevDelta && graph.IsErrInvalidDelta(err) { + logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) + + triedPrevDelta = true // TODO(meain): Do we need this check? + invalidPrevDelta = true + + pager.SetNext("") + + continue + } + if err != nil { - return "", nil, nil, errors.Wrapf( + return DeltaUpdate{}, nil, nil, errors.Wrapf( err, "failed to query drive items. details: %s", support.ConnectorStackErrorTrace(err), @@ -196,12 +226,12 @@ func collectItems( vals, err := pager.ValuesIn(page) if err != nil { - return "", nil, nil, errors.Wrap(err, "extracting items from response") + return DeltaUpdate{}, nil, nil, errors.Wrap(err, "extracting items from response") } err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded) if err != nil { - return "", nil, nil, err + return DeltaUpdate{}, nil, nil, err } nextLink, deltaLink := gapi.NextAndDeltaLink(page) @@ -219,7 +249,7 @@ func collectItems( pager.SetNext(nextLink) } - return newDeltaURL, newPaths, excluded, nil + return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil } // getFolder will lookup the specified folder name under `parentFolderID` @@ -379,6 +409,7 @@ func GetAllFolders( return nil }, + "", ) if err != nil { return nil, errors.Wrapf(err, "getting items for drive %s", *d.GetName()) diff --git a/src/internal/connector/onedrive/item_test.go b/src/internal/connector/onedrive/item_test.go index a2e008ec5..aec2f2474 100644 --- a/src/internal/connector/onedrive/item_test.go +++ b/src/internal/connector/onedrive/item_test.go @@ -126,6 +126,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { suite.userDriveID, "General", itemCollector, + "", ) require.NoError(suite.T(), err) From d82b5cacdf55b59f3bfc1fbd6663ce44d630d180 Mon Sep 17 00:00:00 2001 From: Danny Date: Mon, 6 Feb 2023 09:42:50 -0500 Subject: [PATCH 03/45] GC: Restore: SharePoint: Page Logic (#2225) ## Description Restore logic for restoring a SharePoint Page to M365 given a valid `[]byte`. Delete API also included Tests included ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature ## Issue(s) * related to #2169 ## Test Plan - [x] :zap: Unit test Must be tested locally due to CI Library issues, See #2086. Clean-up is handled within the tests. --- .../connector/sharepoint/api/helper_test.go | 8 +- .../connector/sharepoint/api/pages.go | 139 +++++++++++++++++- .../connector/sharepoint/api/pages_test.go | 59 ++++++-- .../connector/sharepoint/collection.go | 9 ++ 4 files changed, 199 insertions(+), 16 deletions(-) diff --git a/src/internal/connector/sharepoint/api/helper_test.go b/src/internal/connector/sharepoint/api/helper_test.go index 631dd7b3b..33dee1561 100644 --- a/src/internal/connector/sharepoint/api/helper_test.go +++ b/src/internal/connector/sharepoint/api/helper_test.go @@ -1,15 +1,15 @@ -package api +package api_test import ( "testing" - "github.com/alcionai/corso/src/internal/connector/discovery/api" + discover "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/pkg/account" "github.com/stretchr/testify/require" ) -func createTestBetaService(t *testing.T, credentials account.M365Config) *api.BetaService { +func createTestBetaService(t *testing.T, credentials account.M365Config) *discover.BetaService { adapter, err := graph.CreateAdapter( credentials.AzureTenantID, credentials.AzureClientID, @@ -17,5 +17,5 @@ func createTestBetaService(t *testing.T, credentials account.M365Config) *api.Be ) require.NoError(t, err) - return api.NewBetaService(adapter) + return discover.NewBetaService(adapter) } diff --git a/src/internal/connector/sharepoint/api/pages.go b/src/internal/connector/sharepoint/api/pages.go index a2232140c..16eb3f0ae 100644 --- a/src/internal/connector/sharepoint/api/pages.go +++ b/src/internal/connector/sharepoint/api/pages.go @@ -2,18 +2,26 @@ package api import ( "context" + "fmt" + "io" + "time" - "github.com/alcionai/corso/src/internal/connector/discovery/api" + "github.com/pkg/errors" + + discover "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites" "github.com/alcionai/corso/src/internal/connector/support" + "github.com/alcionai/corso/src/internal/data" + D "github.com/alcionai/corso/src/internal/diagnostics" + "github.com/alcionai/corso/src/pkg/backup/details" ) // GetSitePages retrieves a collection of Pages related to the give Site. // Returns error if error experienced during the call func GetSitePage( ctx context.Context, - serv *api.BetaService, + serv *discover.BetaService, siteID string, pages []string, ) ([]models.SitePageable, error) { @@ -33,7 +41,7 @@ func GetSitePage( } // fetchPages utility function to return the tuple of item -func FetchPages(ctx context.Context, bs *api.BetaService, siteID string) ([]Tuple, error) { +func FetchPages(ctx context.Context, bs *discover.BetaService, siteID string) ([]Tuple, error) { var ( builder = bs.Client().SitesById(siteID).Pages() opts = fetchPageOptions() @@ -80,6 +88,21 @@ func fetchPageOptions() *sites.ItemPagesRequestBuilderGetRequestConfiguration { return options } +// DeleteSitePage removes the selected page from the SharePoint Site +// https://learn.microsoft.com/en-us/graph/api/sitepage-delete?view=graph-rest-beta +func DeleteSitePage( + ctx context.Context, + serv *discover.BetaService, + siteID, pageID string, +) error { + err := serv.Client().SitesById(siteID).PagesById(pageID).Delete(ctx, nil) + if err != nil { + return support.ConnectorStackErrorTraceWrap(err, "deleting page: "+pageID) + } + + return nil +} + // retrievePageOptions returns options to expand func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration { fields := []string{"canvasLayout"} @@ -91,3 +114,113 @@ func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequ return options } + +func RestoreSitePage( + ctx context.Context, + service *discover.BetaService, + itemData data.Stream, + siteID, destName string, +) (details.ItemInfo, error) { + ctx, end := D.Span(ctx, "gc:sharepoint:restorePage", D.Label("item_uuid", itemData.UUID())) + defer end() + + var ( + dii = details.ItemInfo{} + pageID = itemData.UUID() + pageName = pageID + ) + + byteArray, err := io.ReadAll(itemData.ToReader()) + if err != nil { + return dii, errors.Wrap(err, "reading sharepoint page bytes from stream") + } + + // Hydrate Page + page, err := support.CreatePageFromBytes(byteArray) + if err != nil { + return dii, errors.Wrapf(err, "creating Page object %s", pageID) + } + + pageNamePtr := page.GetName() + if pageNamePtr != nil { + pageName = *pageNamePtr + } + + newName := fmt.Sprintf("%s_%s", destName, pageName) + page.SetName(&newName) + + // Restore is a 2-Step Process in Graph API + // 1. Create the Page on the site + // 2. Publish the site + // See: https://learn.microsoft.com/en-us/graph/api/sitepage-create?view=graph-rest-beta + restoredPage, err := service.Client().SitesById(siteID).Pages().Post(ctx, page, nil) + if err != nil { + sendErr := support.ConnectorStackErrorTraceWrap( + err, + "creating page from ID: %s"+pageName+" API Error Details", + ) + + return dii, sendErr + } + + pageID = *restoredPage.GetId() + // Publish page to make visible + // See https://learn.microsoft.com/en-us/graph/api/sitepage-publish?view=graph-rest-beta + if restoredPage.GetWebUrl() == nil { + return dii, fmt.Errorf("creating page %s incomplete. Field `webURL` not populated", pageID) + } + + err = service.Client(). + SitesById(siteID). + PagesById(pageID). + Publish(). + Post(ctx, nil) + if err != nil { + return dii, support.ConnectorStackErrorTraceWrap( + err, + "publishing page ID: "+*restoredPage.GetId()+" API Error Details", + ) + } + + dii.SharePoint = PageInfo(restoredPage, int64(len(byteArray))) + // Storing new pageID in unused field. + dii.SharePoint.ParentPath = pageID + + return dii, nil +} + +// ============================== +// Helpers +// ============================== +// PageInfo extracts useful metadata into struct for book keeping +func PageInfo(page models.SitePageable, size int64) *details.SharePointInfo { + var ( + name, webURL string + created, modified time.Time + ) + + if page.GetTitle() != nil { + name = *page.GetTitle() + } + + if page.GetWebUrl() != nil { + webURL = *page.GetWebUrl() + } + + if page.GetCreatedDateTime() != nil { + created = *page.GetCreatedDateTime() + } + + if page.GetLastModifiedDateTime() != nil { + modified = *page.GetLastModifiedDateTime() + } + + return &details.SharePointInfo{ + ItemType: details.SharePointItem, + ItemName: name, + Created: created, + Modified: modified, + WebURL: webURL, + Size: size, + } +} diff --git a/src/internal/connector/sharepoint/api/pages_test.go b/src/internal/connector/sharepoint/api/pages_test.go index ecc2cf18d..c6295748f 100644 --- a/src/internal/connector/sharepoint/api/pages_test.go +++ b/src/internal/connector/sharepoint/api/pages_test.go @@ -1,20 +1,28 @@ -package api +package api_test import ( + "bytes" + "io" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/common" + discover "github.com/alcionai/corso/src/internal/connector/discovery/api" + "github.com/alcionai/corso/src/internal/connector/mockconnector" + "github.com/alcionai/corso/src/internal/connector/sharepoint" + "github.com/alcionai/corso/src/internal/connector/sharepoint/api" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" ) type SharePointPageSuite struct { suite.Suite - siteID string - creds account.M365Config + siteID string + creds account.M365Config + service *discover.BetaService } func (suite *SharePointPageSuite) SetupSuite() { @@ -27,6 +35,7 @@ func (suite *SharePointPageSuite) SetupSuite() { require.NoError(t, err) suite.creds = m365 + suite.service = createTestBetaService(t, suite.creds) } func TestSharePointPageSuite(t *testing.T) { @@ -42,9 +51,7 @@ func (suite *SharePointPageSuite) TestFetchPages() { defer flush() t := suite.T() - service := createTestBetaService(t, suite.creds) - - pgs, err := FetchPages(ctx, service, suite.siteID) + pgs, err := api.FetchPages(ctx, suite.service, suite.siteID) assert.NoError(t, err) require.NotNil(t, pgs) assert.NotZero(t, len(pgs)) @@ -59,13 +66,47 @@ func (suite *SharePointPageSuite) TestGetSitePage() { defer flush() t := suite.T() - service := createTestBetaService(t, suite.creds) - tuples, err := FetchPages(ctx, service, suite.siteID) + tuples, err := api.FetchPages(ctx, suite.service, suite.siteID) require.NoError(t, err) require.NotNil(t, tuples) jobs := []string{tuples[0].ID} - pages, err := GetSitePage(ctx, service, suite.siteID, jobs) + pages, err := api.GetSitePage(ctx, suite.service, suite.siteID, jobs) assert.NoError(t, err) assert.NotEmpty(t, pages) } + +func (suite *SharePointPageSuite) TestRestoreSinglePage() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + destName := "Corso_Restore_" + common.FormatNow(common.SimpleTimeTesting) + testName := "MockPage" + + // Create Test Page + //nolint:lll + byteArray := mockconnector.GetMockPage("Byte Test") + + pageData := sharepoint.NewItem( + testName, + io.NopCloser(bytes.NewReader(byteArray)), + ) + + info, err := api.RestoreSitePage( + ctx, + suite.service, + pageData, + suite.siteID, + destName, + ) + + require.NoError(t, err) + require.NotNil(t, info) + + // Clean Up + pageID := info.SharePoint.ParentPath + err = api.DeleteSitePage(ctx, suite.service, suite.siteID, pageID) + assert.NoError(t, err) +} diff --git a/src/internal/connector/sharepoint/collection.go b/src/internal/connector/sharepoint/collection.go index c540af4e6..603edd685 100644 --- a/src/internal/connector/sharepoint/collection.go +++ b/src/internal/connector/sharepoint/collection.go @@ -106,6 +106,15 @@ type Item struct { deleted bool } +func NewItem(name string, d io.ReadCloser) *Item { + item := &Item{ + id: name, + data: d, + } + + return item +} + func (sd *Item) UUID() string { return sd.id } From b3b5189e19a256e9d3bb675c4b19cf2a470951f7 Mon Sep 17 00:00:00 2001 From: Danny Date: Mon, 6 Feb 2023 10:27:39 -0500 Subject: [PATCH 04/45] GC: Restore: SharePoint: Collection logic (#2227) ## Description Updates SharePoint Restore Collection Logic. Test Suite included. Restore Pipeline is not connected in this PR for ease of parsing. It is noted that there is a large amount of code duplication between Lists and Pages. Code Clean-Up will address these issues once issue #2174 has been handled. As this will require the use of an HTTP client that is not necessary for other services. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature ## Issue(s) * related to #2169 ## Test Plan - [x] :zap: Unit test --- src/internal/connector/graph_connector.go | 2 +- src/internal/connector/sharepoint/restore.go | 105 +++++++++++++++++-- 2 files changed, 97 insertions(+), 10 deletions(-) diff --git a/src/internal/connector/graph_connector.go b/src/internal/connector/graph_connector.go index def430f14..5ef6ef6be 100644 --- a/src/internal/connector/graph_connector.go +++ b/src/internal/connector/graph_connector.go @@ -293,7 +293,7 @@ func (gc *GraphConnector) RestoreDataCollections( case selectors.ServiceOneDrive: status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets) case selectors.ServiceSharePoint: - status, err = sharepoint.RestoreCollections(ctx, backupVersion, gc.Service, dest, dcs, deets) + status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets) default: err = errors.Errorf("restore data from service %s not supported", selector.Service.String()) } diff --git a/src/internal/connector/sharepoint/restore.go b/src/internal/connector/sharepoint/restore.go index 3cf35d287..10cf125e7 100644 --- a/src/internal/connector/sharepoint/restore.go +++ b/src/internal/connector/sharepoint/restore.go @@ -9,11 +9,14 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" + discover "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" + "github.com/alcionai/corso/src/internal/connector/sharepoint/api" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" D "github.com/alcionai/corso/src/internal/diagnostics" + "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" @@ -37,6 +40,7 @@ import ( func RestoreCollections( ctx context.Context, backupVersion int, + creds account.M365Config, service graph.Servicer, dest control.RestoreDestination, dcs []data.Collection, @@ -74,7 +78,7 @@ func RestoreCollections( false, ) case path.ListsCategory: - metrics, canceled = RestoreCollection( + metrics, canceled = RestoreListCollection( ctx, service, dc, @@ -83,11 +87,14 @@ func RestoreCollections( errUpdater, ) case path.PagesCategory: - errorMessage := fmt.Sprintf("restore of %s not supported", dc.FullPath().Category()) - logger.Ctx(ctx).Error(errorMessage) - - return nil, errors.New(errorMessage) - + metrics, canceled = RestorePageCollection( + ctx, + creds, + dc, + dest.ContainerName, + deets, + errUpdater, + ) default: return nil, errors.Errorf("category %s not supported", dc.FullPath().Category()) } @@ -209,7 +216,7 @@ func restoreListItem( return dii, nil } -func RestoreCollection( +func RestoreListCollection( ctx context.Context, service graph.Servicer, dc data.Collection, @@ -217,7 +224,7 @@ func RestoreCollection( deets *details.Builder, errUpdater func(string, error), ) (support.CollectionMetrics, bool) { - ctx, end := D.Span(ctx, "gc:sharepoint:restoreCollection", D.Label("path", dc.FullPath())) + ctx, end := D.Span(ctx, "gc:sharepoint:restoreListCollection", D.Label("path", dc.FullPath())) defer end() var ( @@ -225,7 +232,7 @@ func RestoreCollection( directory = dc.FullPath() ) - trace.Log(ctx, "gc:sharepoint:restoreCollection", directory.String()) + trace.Log(ctx, "gc:sharepoint:restoreListCollection", directory.String()) siteID := directory.ResourceOwner() // Restore items from the collection @@ -276,3 +283,83 @@ func RestoreCollection( } } } + +// RestorePageCollection handles restoration of an individual site page collection. +// returns: +// - the collection's item and byte count metrics +// - the context cancellation station. True iff context is canceled. +func RestorePageCollection( + ctx context.Context, + creds account.M365Config, + dc data.Collection, + restoreContainerName string, + deets *details.Builder, + errUpdater func(string, error), +) (support.CollectionMetrics, bool) { + ctx, end := D.Span(ctx, "gc:sharepoint:restorePageCollection", D.Label("path", dc.FullPath())) + defer end() + + var ( + metrics = support.CollectionMetrics{} + directory = dc.FullPath() + ) + + adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret) + if err != nil { + return metrics, false + } + + service := discover.NewBetaService(adpt) + + trace.Log(ctx, "gc:sharepoint:restorePageCollection", directory.String()) + siteID := directory.ResourceOwner() + + // Restore items from collection + items := dc.Items() + + for { + select { + case <-ctx.Done(): + errUpdater("context canceled", ctx.Err()) + return metrics, true + + case itemData, ok := <-items: + if !ok { + return metrics, false + } + metrics.Objects++ + + itemInfo, err := api.RestoreSitePage( + ctx, + service, + itemData, + siteID, + restoreContainerName, + ) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + metrics.TotalBytes += itemInfo.SharePoint.Size + + itemPath, err := dc.FullPath().Append(itemData.UUID(), true) + if err != nil { + logger.Ctx(ctx).Errorw("transforming item to full path", "error", err) + errUpdater(itemData.UUID(), err) + + continue + } + + deets.Add( + itemPath.String(), + itemPath.ShortRef(), + "", + true, + itemInfo, + ) + + metrics.Successes++ + } + } +} From 63ffd8004ab596af8a484eb1d98d0efc3a7f6b4f Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 6 Feb 2023 12:37:51 -0700 Subject: [PATCH 05/45] add clues, fault to op/restore.go (#2320) ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test --- src/internal/operations/restore.go | 24 +++++++++++++----------- src/internal/operations/restore_test.go | 1 + src/pkg/fault/mock/mock.go | 2 +- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index dafb8670e..5fd8c9ffb 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -25,6 +25,7 @@ import ( "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" @@ -201,7 +202,7 @@ func (op *RestoreOperation) do( return nil, errors.Wrap(err, "getting backup and details") } - paths, err := formatDetailsForRestoration(ctx, op.Selectors, deets) + paths, err := formatDetailsForRestoration(ctx, op.Selectors, deets, op.Errors) if err != nil { return nil, errors.Wrap(err, "formatting paths from details") } @@ -290,6 +291,7 @@ func (op *RestoreOperation) persistResults( if opStats.readErr != nil || opStats.writeErr != nil { op.Status = Failed + // TODO(keepers): replace with fault.Errors handling. return multierror.Append( errors.New("errors prevented the operation from processing"), opStats.readErr, @@ -340,6 +342,7 @@ func formatDetailsForRestoration( ctx context.Context, sel selectors.Selector, deets *details.Details, + errs *fault.Errors, ) ([]path.Path, error) { fds, err := sel.Reduce(ctx, deets) if err != nil { @@ -347,18 +350,21 @@ func formatDetailsForRestoration( } var ( - errs *multierror.Error fdsPaths = fds.Paths() paths = make([]path.Path, len(fdsPaths)) ) for i := range fdsPaths { + if errs.Err() != nil { + return nil, errs.Err() + } + p, err := path.FromDataLayerPath(fdsPaths[i], true) if err != nil { - errs = multierror.Append( - errs, - errors.Wrap(err, "parsing details entry path"), - ) + errs.Add(clues. + Wrap(err, "parsing details path after reduction"). + WithMap(clues.In(ctx)). + With("path", fdsPaths[i])) continue } @@ -377,9 +383,5 @@ func formatDetailsForRestoration( return paths[i].String() < paths[j].String() }) - if errs != nil { - return nil, errs - } - - return paths, nil + return paths, errs.Err() } diff --git a/src/internal/operations/restore_test.go b/src/internal/operations/restore_test.go index 9bf21b806..7b0e0d211 100644 --- a/src/internal/operations/restore_test.go +++ b/src/internal/operations/restore_test.go @@ -285,6 +285,7 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() { ds, err := ro.Run(ctx) require.NoError(t, err, "restoreOp.Run()") + require.Empty(t, ro.Errors.Errs(), "restoreOp.Run() recoverable errors") require.NotEmpty(t, ro.Results, "restoreOp results") require.NotNil(t, ds, "restored details") assert.Equal(t, ro.Status, Completed, "restoreOp status") diff --git a/src/pkg/fault/mock/mock.go b/src/pkg/fault/mock/mock.go index ba560996d..4d3fd06cd 100644 --- a/src/pkg/fault/mock/mock.go +++ b/src/pkg/fault/mock/mock.go @@ -13,5 +13,5 @@ func NewAdder() *Adder { func (ma *Adder) Add(err error) *fault.Errors { ma.Errs = append(ma.Errs, err) - return fault.New(false) + return fault.New(true) } From c4cc3b1a2380775a3257a2ba6d723a6ff3b4c90c Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Mon, 6 Feb 2023 11:58:13 -0800 Subject: [PATCH 06/45] Wire up most of exclude list for OneDrive (#2379) ## Description Push exclude list through the whole stack. It's not wired to kopia yet, but only one location (marked with a TODO) needs to be changed to have that happen. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #2243 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/connector/onedrive/collections.go | 2 +- .../connector/onedrive/collections_test.go | 6 ++---- src/internal/operations/backup.go | 14 ++++++++------ src/internal/operations/backup_test.go | 1 + 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/internal/connector/onedrive/collections.go b/src/internal/connector/onedrive/collections.go index a5adb9d34..b8c8b9c48 100644 --- a/src/internal/connector/onedrive/collections.go +++ b/src/internal/connector/onedrive/collections.go @@ -359,7 +359,7 @@ func (c *Collections) Get( } // TODO(ashmrtn): Track and return the set of items to exclude. - return collections, nil, nil + return collections, excludedItems, nil } // UpdateCollections initializes and adds the provided drive items to Collections diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index 7b81a5b74..0b10d02bd 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -1387,7 +1387,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() { c.itemPagerFunc = itemPagerFunc // TODO(ashmrtn): Allow passing previous metadata. - cols, _, err := c.Get(ctx, nil) + cols, delList, err := c.Get(ctx, nil) test.errCheck(t, err) if err != nil { @@ -1424,9 +1424,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() { assert.ElementsMatch(t, test.expectedCollections[folderPath], itemIDs) } - // TODO(ashmrtn): Uncomment this when we begin return the set of items to - // remove from the upcoming backup. - // assert.Equal(t, test.expectedDelList, delList) + assert.Equal(t, test.expectedDelList, delList) }) } } diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index ec47bae1c..31912585f 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -243,7 +243,7 @@ func (op *BackupOperation) do( return nil, errors.Wrap(err, "connectng to m365") } - cs, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options) + cs, excludes, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options) if err != nil { return nil, errors.Wrap(err, "producing backup data collections") } @@ -257,6 +257,7 @@ func (op *BackupOperation) do( reasons, mans, cs, + excludes, backupID, op.incremental && canUseMetaData) if err != nil { @@ -309,7 +310,7 @@ func produceBackupDataCollections( sel selectors.Selector, metadata []data.Collection, ctrlOpts control.Options, -) ([]data.Collection, error) { +) ([]data.Collection, map[string]struct{}, error) { complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup")) defer func() { complete <- struct{}{} @@ -317,11 +318,9 @@ func produceBackupDataCollections( closer() }() - // TODO(ashmrtn): When we're ready to wire up the global exclude list return - // all values. - cols, _, errs := gc.DataCollections(ctx, sel, metadata, ctrlOpts) + cols, excludes, errs := gc.DataCollections(ctx, sel, metadata, ctrlOpts) - return cols, errs + return cols, excludes, errs } // --------------------------------------------------------------------------- @@ -391,6 +390,7 @@ func consumeBackupDataCollections( reasons []kopia.Reason, mans []*kopia.ManifestEntry, cs []data.Collection, + excludes map[string]struct{}, backupID model.StableID, isIncremental bool, ) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) { @@ -456,6 +456,8 @@ func consumeBackupDataCollections( ctx, bases, cs, + // TODO(ashmrtn): When we're ready to enable incremental backups for + // OneDrive replace this with `excludes`. nil, tags, isIncremental) diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index 149448e66..4adc70b30 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -575,6 +575,7 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat nil, test.inputMan, nil, + nil, model.StableID(""), true, ) From faad5d35a4bc54c195a3fe136829f1e899a799a5 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 6 Feb 2023 13:45:09 -0700 Subject: [PATCH 07/45] refactor structured errors/logs in pkg (#2323) ## Description Refactors errors and logs to support structured data throughout the smaller packages within pkg/... Larger packages will come later. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test --- src/pkg/account/m365.go | 3 ++- src/pkg/credentials/aws.go | 3 ++- src/pkg/credentials/corso.go | 3 ++- src/pkg/credentials/m365.go | 3 ++- src/pkg/path/onedrive.go | 11 ++++------- src/pkg/path/path.go | 26 ++++++++++++++------------ src/pkg/path/resource_path.go | 17 ++++++++--------- src/pkg/repository/repository.go | 20 ++++++++++++++------ src/pkg/services/m365/m365.go | 32 +++++++++++++++++--------------- src/pkg/storage/common.go | 3 ++- src/pkg/storage/s3.go | 3 ++- 11 files changed, 69 insertions(+), 55 deletions(-) diff --git a/src/pkg/account/m365.go b/src/pkg/account/m365.go index a9c84fb37..b9f91b8aa 100644 --- a/src/pkg/account/m365.go +++ b/src/pkg/account/m365.go @@ -1,6 +1,7 @@ package account import ( + "github.com/alcionai/clues" "github.com/pkg/errors" "github.com/alcionai/corso/src/pkg/credentials" @@ -66,7 +67,7 @@ func (c M365Config) validate() error { for k, v := range check { if len(v) == 0 { - return errors.Wrap(errMissingRequired, k) + return clues.Stack(errMissingRequired, errors.New(k)) } } diff --git a/src/pkg/credentials/aws.go b/src/pkg/credentials/aws.go index 61c0673aa..9ab9d7c3e 100644 --- a/src/pkg/credentials/aws.go +++ b/src/pkg/credentials/aws.go @@ -3,6 +3,7 @@ package credentials import ( "os" + "github.com/alcionai/clues" "github.com/pkg/errors" ) @@ -48,7 +49,7 @@ func (c AWS) Validate() error { for k, v := range check { if len(v) == 0 { - return errors.Wrap(errMissingRequired, k) + return clues.Stack(errMissingRequired, errors.New(k)) } } diff --git a/src/pkg/credentials/corso.go b/src/pkg/credentials/corso.go index 297f2030a..fc22dc957 100644 --- a/src/pkg/credentials/corso.go +++ b/src/pkg/credentials/corso.go @@ -3,6 +3,7 @@ package credentials import ( "os" + "github.com/alcionai/clues" "github.com/pkg/errors" ) @@ -34,7 +35,7 @@ func (c Corso) Validate() error { for k, v := range check { if len(v) == 0 { - return errors.Wrap(errMissingRequired, k) + return clues.Stack(errMissingRequired, errors.New(k)) } } diff --git a/src/pkg/credentials/m365.go b/src/pkg/credentials/m365.go index 2d0943aa7..c2d0c5906 100644 --- a/src/pkg/credentials/m365.go +++ b/src/pkg/credentials/m365.go @@ -3,6 +3,7 @@ package credentials import ( "os" + "github.com/alcionai/clues" "github.com/pkg/errors" ) @@ -36,7 +37,7 @@ func (c M365) Validate() error { for k, v := range check { if len(v) == 0 { - return errors.Wrap(errMissingRequired, k) + return clues.Stack(errMissingRequired, errors.New(k)) } } diff --git a/src/pkg/path/onedrive.go b/src/pkg/path/onedrive.go index cf960933f..86d40c887 100644 --- a/src/pkg/path/onedrive.go +++ b/src/pkg/path/onedrive.go @@ -1,8 +1,6 @@ package path -import ( - "github.com/pkg/errors" -) +import "github.com/alcionai/clues" // drivePath is used to represent path components // of an item within the drive i.e. @@ -20,10 +18,9 @@ func ToOneDrivePath(p Path) (*DrivePath, error) { // Must be at least `drives//root:` if len(folders) < 3 { - return nil, errors.Errorf( - "folder path doesn't match expected format for OneDrive items: %s", - p.Folder(), - ) + return nil, clues. + New("folder path doesn't match expected format for OneDrive items"). + With("folders", p.Folder()) } return &DrivePath{DriveID: folders[1], Folders: folders[3:]}, nil diff --git a/src/pkg/path/path.go b/src/pkg/path/path.go index e2e6d273e..b0c4456c4 100644 --- a/src/pkg/path/path.go +++ b/src/pkg/path/path.go @@ -56,11 +56,10 @@ import ( "fmt" "strings" + "github.com/alcionai/clues" "github.com/pkg/errors" ) -const templateErrPathParsing = "parsing resource path from %s" - const ( escapeCharacter = '\\' PathSeparator = '/' @@ -73,7 +72,10 @@ var charactersToEscape = map[rune]struct{}{ escapeCharacter: {}, } -var errMissingSegment = errors.New("missing required path element") +var ( + errMissingSegment = errors.New("missing required path element") + errParsingPath = errors.New("parsing resource path") +) // For now, adding generic functions to pull information from segments. // Resources that don't have the requested information should return an empty @@ -252,11 +254,11 @@ func (pb Builder) join(start, end int) string { func verifyInputValues(tenant, resourceOwner string) error { if len(tenant) == 0 { - return errors.Wrap(errMissingSegment, "tenant") + return clues.Stack(errMissingSegment, errors.New("tenant")) } if len(resourceOwner) == 0 { - return errors.Wrap(errMissingSegment, "resourceOwner") + return clues.Stack(errMissingSegment, errors.New("resourceOwner")) } return nil @@ -418,17 +420,17 @@ func FromDataLayerPath(p string, isItem bool) (Path, error) { p = TrimTrailingSlash(p) // If p was just the path separator then it will be empty now. if len(p) == 0 { - return nil, errors.Errorf("logically empty path given: %s", p) + return nil, clues.New("logically empty path given").With("path_string", p) } // Turn into a Builder to reuse code that ignores empty elements. pb, err := Builder{}.UnescapeAndAppend(Split(p)...) if err != nil { - return nil, errors.Wrapf(err, templateErrPathParsing, p) + return nil, clues.Stack(errParsingPath, err).With("path_string", p) } if len(pb.elements) < 5 { - return nil, errors.Errorf("path has too few segments: %s", p) + return nil, clues.New("path has too few segments").With("path_string", p) } service, category, err := validateServiceAndCategoryStrings( @@ -436,7 +438,7 @@ func FromDataLayerPath(p string, isItem bool) (Path, error) { pb.elements[3], ) if err != nil { - return nil, errors.Wrapf(err, templateErrPathParsing, p) + return nil, clues.Stack(errParsingPath, err).With("path_string", p) } return &dataLayerResourcePath{ @@ -519,8 +521,8 @@ func validateEscapedElement(element string) error { prevWasEscape = false if _, ok := charactersToEscape[c]; !ok { - return errors.Errorf( - "bad escape sequence in path: '%c%c'", escapeCharacter, c) + return clues.New("bad escape sequence in path"). + With("escape_sequence", fmt.Sprintf("'%c%c'", escapeCharacter, c)) } case false: @@ -530,7 +532,7 @@ func validateEscapedElement(element string) error { } if _, ok := charactersToEscape[c]; ok { - return errors.Errorf("unescaped '%c' in path", c) + return clues.New("unescaped character in path").With("character", c) } } } diff --git a/src/pkg/path/resource_path.go b/src/pkg/path/resource_path.go index 07f9f429c..57f41c6ff 100644 --- a/src/pkg/path/resource_path.go +++ b/src/pkg/path/resource_path.go @@ -1,8 +1,10 @@ package path import ( + "fmt" "strings" + "github.com/alcionai/clues" "github.com/pkg/errors" ) @@ -119,12 +121,12 @@ var serviceCategories = map[ServiceType]map[CategoryType]struct{}{ func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, error) { service := toServiceType(s) if service == UnknownService { - return UnknownService, UnknownCategory, errors.Wrapf(ErrorUnknownService, "%q", s) + return UnknownService, UnknownCategory, clues.Stack(ErrorUnknownService).With("service", fmt.Sprintf("%q", s)) } category := ToCategoryType(c) if category == UnknownCategory { - return UnknownService, UnknownCategory, errors.Wrapf(ErrorUnknownCategory, "%q", c) + return UnknownService, UnknownCategory, clues.Stack(ErrorUnknownService).With("category", fmt.Sprintf("%q", c)) } if err := validateServiceAndCategory(service, category); err != nil { @@ -137,15 +139,12 @@ func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, func validateServiceAndCategory(service ServiceType, category CategoryType) error { cats, ok := serviceCategories[service] if !ok { - return errors.New("unsupported service") + return clues.New("unsupported service").With("service", fmt.Sprintf("%q", service)) } if _, ok := cats[category]; !ok { - return errors.Errorf( - "unknown service/category combination %q/%q", - service, - category, - ) + return clues.New("unknown service/category combination"). + WithAll("service", fmt.Sprintf("%q", service), "category", fmt.Sprintf("%q", category)) } return nil @@ -234,7 +233,7 @@ func (rp dataLayerResourcePath) Item() string { func (rp dataLayerResourcePath) Dir() (Path, error) { if len(rp.elements) <= 4 { - return nil, errors.Errorf("unable to shorten path %q", rp) + return nil, clues.New("unable to shorten path").With("path", fmt.Sprintf("%q", rp)) } return &dataLayerResourcePath{ diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index 087b193bc..a8f1e2827 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -89,13 +89,17 @@ func Initialize( s storage.Storage, opts control.Options, ) (Repository, error) { - ctx = clues.AddAll(ctx, "acct_provider", acct.Provider, "storage_provider", s.Provider) + ctx = clues.AddAll( + ctx, + "acct_provider", acct.Provider.String(), + "acct_id", acct.ID(), // TODO: pii + "storage_provider", s.Provider.String()) kopiaRef := kopia.NewConn(s) if err := kopiaRef.Initialize(ctx); err != nil { // replace common internal errors so that sdk users can check results with errors.Is() if kopia.IsRepoAlreadyExistsError(err) { - return nil, ErrorRepoAlreadyExists + return nil, clues.Stack(ErrorRepoAlreadyExists).WithClues(ctx) } return nil, errors.Wrap(err, "initializing kopia") @@ -153,7 +157,11 @@ func Connect( s storage.Storage, opts control.Options, ) (Repository, error) { - ctx = clues.AddAll(ctx, "acct_provider", acct.Provider, "storage_provider", s.Provider) + ctx = clues.AddAll( + ctx, + "acct_provider", acct.Provider.String(), + "acct_id", acct.ID(), // TODO: pii + "storage_provider", s.Provider.String()) // Close/Reset the progress bar. This ensures callers don't have to worry about // their output getting clobbered (#1720) @@ -210,12 +218,12 @@ func Connect( func (r *repository) Close(ctx context.Context) error { if err := r.Bus.Close(); err != nil { - logger.Ctx(ctx).Debugw("closing the event bus", "err", err) + logger.Ctx(ctx).With("err", err).Debugw("closing the event bus", clues.In(ctx).Slice()...) } if r.dataLayer != nil { if err := r.dataLayer.Close(ctx); err != nil { - logger.Ctx(ctx).Debugw("closing Datalayer", "err", err) + logger.Ctx(ctx).With("err", err).Debugw("closing Datalayer", clues.In(ctx).Slice()...) } r.dataLayer = nil @@ -223,7 +231,7 @@ func (r *repository) Close(ctx context.Context) error { if r.modelStore != nil { if err := r.modelStore.Close(ctx); err != nil { - logger.Ctx(ctx).Debugw("closing modelStore", "err", err) + logger.Ctx(ctx).With("err", err).Debugw("closing modelStore", clues.In(ctx).Slice()...) } r.modelStore = nil diff --git a/src/pkg/services/m365/m365.go b/src/pkg/services/m365/m365.go index 984326b6d..19d37f0d5 100644 --- a/src/pkg/services/m365/m365.go +++ b/src/pkg/services/m365/m365.go @@ -3,6 +3,7 @@ package m365 import ( "context" + "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" @@ -20,10 +21,10 @@ type User struct { // Users returns a list of users in the specified M365 tenant // TODO: Implement paging support -func Users(ctx context.Context, m365Account account.Account) ([]*User, error) { - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), m365Account, connector.Users) +func Users(ctx context.Context, acct account.Account) ([]*User, error) { + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users) if err != nil { - return nil, errors.Wrap(err, "could not initialize M365 graph connection") + return nil, errors.Wrap(err, "initializing M365 graph connection") } users, err := discovery.Users(ctx, gc.Owners.Users()) @@ -36,7 +37,7 @@ func Users(ctx context.Context, m365Account account.Account) ([]*User, error) { for _, u := range users { pu, err := parseUser(u) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing userable") } ret = append(ret, pu) @@ -45,8 +46,8 @@ func Users(ctx context.Context, m365Account account.Account) ([]*User, error) { return ret, nil } -func UserIDs(ctx context.Context, m365Account account.Account) ([]string, error) { - users, err := Users(ctx, m365Account) +func UserIDs(ctx context.Context, acct account.Account) ([]string, error) { + users, err := Users(ctx, acct) if err != nil { return nil, err } @@ -61,8 +62,8 @@ func UserIDs(ctx context.Context, m365Account account.Account) ([]string, error) // UserPNs retrieves all user principleNames in the tenant. Principle Names // can be used analogous userIDs in graph API queries. -func UserPNs(ctx context.Context, m365Account account.Account) ([]string, error) { - users, err := Users(ctx, m365Account) +func UserPNs(ctx context.Context, acct account.Account) ([]string, error) { + users, err := Users(ctx, acct) if err != nil { return nil, err } @@ -76,20 +77,20 @@ func UserPNs(ctx context.Context, m365Account account.Account) ([]string, error) } // SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant -func SiteURLs(ctx context.Context, m365Account account.Account) ([]string, error) { - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), m365Account, connector.Sites) +func SiteURLs(ctx context.Context, acct account.Account) ([]string, error) { + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites) if err != nil { - return nil, errors.Wrap(err, "could not initialize M365 graph connection") + return nil, errors.Wrap(err, "initializing M365 graph connection") } return gc.GetSiteWebURLs(), nil } // SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant -func SiteIDs(ctx context.Context, m365Account account.Account) ([]string, error) { - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), m365Account, connector.Sites) +func SiteIDs(ctx context.Context, acct account.Account) ([]string, error) { + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites) if err != nil { - return nil, errors.Wrap(err, "could not initialize M365 graph connection") + return nil, errors.Wrap(err, "initializing graph connection") } return gc.GetSiteIDs(), nil @@ -98,7 +99,8 @@ func SiteIDs(ctx context.Context, m365Account account.Account) ([]string, error) // parseUser extracts information from `models.Userable` we care about func parseUser(item models.Userable) (*User, error) { if item.GetUserPrincipalName() == nil { - return nil, errors.Errorf("no principal name for User: %s", *item.GetId()) + return nil, clues.New("user missing principal name"). + With("user_id", *item.GetId()) // TODO: pii } u := &User{PrincipalName: *item.GetUserPrincipalName(), ID: *item.GetId()} diff --git a/src/pkg/storage/common.go b/src/pkg/storage/common.go index fd5bb24dd..e230e50fb 100644 --- a/src/pkg/storage/common.go +++ b/src/pkg/storage/common.go @@ -1,6 +1,7 @@ package storage import ( + "github.com/alcionai/clues" "github.com/pkg/errors" "github.com/alcionai/corso/src/pkg/credentials" @@ -45,7 +46,7 @@ func (s Storage) CommonConfig() (CommonConfig, error) { // ensures all required properties are present func (c CommonConfig) validate() error { if len(c.CorsoPassphrase) == 0 { - return errors.Wrap(errMissingRequired, credentials.CorsoPassphrase) + return clues.Stack(errMissingRequired, errors.New(credentials.CorsoPassphrase)) } // kopiaCfgFilePath is not required diff --git a/src/pkg/storage/s3.go b/src/pkg/storage/s3.go index 67711f421..78de67ce3 100644 --- a/src/pkg/storage/s3.go +++ b/src/pkg/storage/s3.go @@ -3,6 +3,7 @@ package storage import ( "strconv" + "github.com/alcionai/clues" "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/common" @@ -81,7 +82,7 @@ func (c S3Config) validate() error { } for k, v := range check { if len(v) == 0 { - return errors.Wrap(errMissingRequired, k) + return clues.Stack(errMissingRequired, errors.New(k)) } } From 5537a119482763d0fa32a96cd0f19fba0b01b477 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 6 Feb 2023 15:06:50 -0700 Subject: [PATCH 08/45] add clues, fault to selectors (#2335) ## Description Adds clues and fault handling to selectors pkg. Some bleed upward into the CLI occured from where the cli directly calls selectors.Reduce. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/cli/backup/exchange.go | 22 ++++++++------ src/cli/backup/exchange_test.go | 32 ++++----------------- src/cli/backup/onedrive.go | 22 ++++++++------ src/cli/backup/onedrive_test.go | 14 ++++----- src/cli/backup/sharepoint.go | 22 ++++++++------ src/cli/backup/sharepoint_test.go | 14 ++++----- src/go.mod | 2 +- src/go.sum | 2 ++ src/internal/operations/restore.go | 2 +- src/pkg/selectors/example_selectors_test.go | 8 ++++-- src/pkg/selectors/exchange.go | 9 ++++-- src/pkg/selectors/exchange_test.go | 6 +++- src/pkg/selectors/onedrive.go | 9 ++++-- src/pkg/selectors/onedrive_test.go | 6 +++- src/pkg/selectors/scopes.go | 6 ++-- src/pkg/selectors/scopes_test.go | 7 ++++- src/pkg/selectors/selectors.go | 21 +++++++++----- src/pkg/selectors/selectors_reduce_test.go | 6 +++- src/pkg/selectors/sharepoint.go | 9 ++++-- src/pkg/selectors/sharepoint_test.go | 6 +++- 20 files changed, 134 insertions(+), 91 deletions(-) diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index ac6455522..1f6f6ae81 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -470,9 +471,10 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error { defer utils.CloseRepo(ctx, r) - ds, err := runDetailsExchangeCmd(ctx, r, backupID, opts) - if err != nil { - return Only(ctx, err) + ds, errs := runDetailsExchangeCmd(ctx, r, backupID, opts) + if errs.Err() != nil { + // TODO: log/display iterated errors + return Only(ctx, errs.Err()) } if len(ds.Entries) == 0 { @@ -486,29 +488,33 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error { } // runDetailsExchangeCmd actually performs the lookup in backup details. +// the fault.Errors return is always non-nil. Callers should check if +// errs.Err() == nil. func runDetailsExchangeCmd( ctx context.Context, r repository.BackupGetter, backupID string, opts utils.ExchangeOpts, -) (*details.Details, error) { +) (*details.Details, *fault.Errors) { + errs := fault.New(false) + if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil { - return nil, err + return nil, errs.Fail(err) } d, _, err := r.BackupDetails(ctx, backupID) if err != nil { if errors.Is(err, kopia.ErrNotFound) { - return nil, errors.Errorf("No backup exists with the id %s", backupID) + return nil, errs.Fail(errors.Errorf("No backup exists with the id %s", backupID)) } - return nil, errors.Wrap(err, "Failed to get backup details in the repository") + return nil, errs.Fail(errors.Wrap(err, "Failed to get backup details in the repository")) } sel := utils.IncludeExchangeRestoreDataSelectors(opts) utils.FilterExchangeRestoreInfoSelectors(sel, opts) - return sel.Reduce(ctx, d), nil + return sel.Reduce(ctx, d, errs), errs } // ------------------------------------------------------------------------------------------------ diff --git a/src/cli/backup/exchange_test.go b/src/cli/backup/exchange_test.go index ecb7bec29..c67a5c15b 100644 --- a/src/cli/backup/exchange_test.go +++ b/src/cli/backup/exchange_test.go @@ -223,33 +223,14 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectors() { ctx, test.BackupGetter, "backup-ID", - test.Opts, - ) - assert.NoError(t, err) - + test.Opts) + assert.NoError(t, err.Err(), "failure") + assert.Empty(t, err.Errs(), "recovered errors") assert.ElementsMatch(t, test.Expected, output.Entries) }) } } -func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadBackupID() { - t := suite.T() - ctx, flush := tester.NewContext() - backupGetter := &testdata.MockBackupGetter{} - - defer flush() - - output, err := runDetailsExchangeCmd( - ctx, - backupGetter, - "backup-ID", - utils.ExchangeOpts{}, - ) - assert.Error(t, err) - - assert.Empty(t, output) -} - func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() { ctx, flush := tester.NewContext() defer flush() @@ -260,10 +241,9 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() { ctx, test.BackupGetter, "backup-ID", - test.Opts, - ) - - assert.Error(t, err) + test.Opts) + assert.Error(t, err.Err(), "failure") + assert.Empty(t, err.Errs(), "recovered errors") assert.Empty(t, output) }) } diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 9dfb20b79..517477661 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -362,9 +363,10 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error { Populated: utils.GetPopulatedFlags(cmd), } - ds, err := runDetailsOneDriveCmd(ctx, r, backupID, opts) - if err != nil { - return Only(ctx, err) + ds, errs := runDetailsOneDriveCmd(ctx, r, backupID, opts) + if errs.Err() != nil { + // TODO: log/display iterated errors + return Only(ctx, errs.Err()) } if len(ds.Entries) == 0 { @@ -378,29 +380,33 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error { } // runDetailsOneDriveCmd actually performs the lookup in backup details. +// the fault.Errors return is always non-nil. Callers should check if +// errs.Err() == nil. func runDetailsOneDriveCmd( ctx context.Context, r repository.BackupGetter, backupID string, opts utils.OneDriveOpts, -) (*details.Details, error) { +) (*details.Details, *fault.Errors) { + errs := fault.New(false) + if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil { - return nil, err + return nil, errs.Fail(err) } d, _, err := r.BackupDetails(ctx, backupID) if err != nil { if errors.Is(err, kopia.ErrNotFound) { - return nil, errors.Errorf("no backup exists with the id %s", backupID) + return nil, errs.Fail(errors.Errorf("no backup exists with the id %s", backupID)) } - return nil, errors.Wrap(err, "Failed to get backup details in the repository") + return nil, errs.Fail(errors.Wrap(err, "Failed to get backup details in the repository")) } sel := utils.IncludeOneDriveRestoreDataSelectors(opts) utils.FilterOneDriveRestoreInfoSelectors(sel, opts) - return sel.Reduce(ctx, d), nil + return sel.Reduce(ctx, d, errs), errs } // `corso backup delete onedrive [...]` diff --git a/src/cli/backup/onedrive_test.go b/src/cli/backup/onedrive_test.go index 2f7654969..7fb2a38e3 100644 --- a/src/cli/backup/onedrive_test.go +++ b/src/cli/backup/onedrive_test.go @@ -98,10 +98,9 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectors() { ctx, test.BackupGetter, "backup-ID", - test.Opts, - ) - assert.NoError(t, err) - + test.Opts) + assert.NoError(t, err.Err()) + assert.Empty(t, err.Errs()) assert.ElementsMatch(t, test.Expected, output.Entries) }) } @@ -117,10 +116,9 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectorsBadFormats() { ctx, test.BackupGetter, "backup-ID", - test.Opts, - ) - - assert.Error(t, err) + test.Opts) + assert.Error(t, err.Err()) + assert.Empty(t, err.Errs()) assert.Empty(t, output) }) } diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index 45d885b80..e8d65752a 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -18,6 +18,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -481,9 +482,10 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error { Populated: utils.GetPopulatedFlags(cmd), } - ds, err := runDetailsSharePointCmd(ctx, r, backupID, opts) - if err != nil { - return Only(ctx, err) + ds, errs := runDetailsSharePointCmd(ctx, r, backupID, opts) + if errs.Err() != nil { + // TODO: log/display iterated errors + return Only(ctx, errs.Err()) } if len(ds.Entries) == 0 { @@ -497,27 +499,31 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error { } // runDetailsSharePointCmd actually performs the lookup in backup details. +// the fault.Errors return is always non-nil. Callers should check if +// errs.Err() == nil. func runDetailsSharePointCmd( ctx context.Context, r repository.BackupGetter, backupID string, opts utils.SharePointOpts, -) (*details.Details, error) { +) (*details.Details, *fault.Errors) { + errs := fault.New(false) + if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil { - return nil, err + return nil, errs.Fail(err) } d, _, err := r.BackupDetails(ctx, backupID) if err != nil { if errors.Is(err, kopia.ErrNotFound) { - return nil, errors.Errorf("no backup exists with the id %s", backupID) + return nil, errs.Fail(errors.Errorf("no backup exists with the id %s", backupID)) } - return nil, errors.Wrap(err, "Failed to get backup details in the repository") + return nil, errs.Fail(errors.Wrap(err, "Failed to get backup details in the repository")) } sel := utils.IncludeSharePointRestoreDataSelectors(opts) utils.FilterSharePointRestoreInfoSelectors(sel, opts) - return sel.Reduce(ctx, d), nil + return sel.Reduce(ctx, d, errs), errs } diff --git a/src/cli/backup/sharepoint_test.go b/src/cli/backup/sharepoint_test.go index 89e40a9f3..a46deeeff 100644 --- a/src/cli/backup/sharepoint_test.go +++ b/src/cli/backup/sharepoint_test.go @@ -213,10 +213,9 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectors() { ctx, test.BackupGetter, "backup-ID", - test.Opts, - ) - assert.NoError(t, err) - + test.Opts) + assert.NoError(t, err.Err()) + assert.Empty(t, err.Errs()) assert.ElementsMatch(t, test.Expected, output.Entries) }) } @@ -232,10 +231,9 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectorsBadFormats() { ctx, test.BackupGetter, "backup-ID", - test.Opts, - ) - - assert.Error(t, err) + test.Opts) + assert.Error(t, err.Err()) + assert.Empty(t, err.Errs()) assert.Empty(t, output) }) } diff --git a/src/go.mod b/src/go.mod index bf43f6494..0d9792c5d 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 - github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 + github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e github.com/aws/aws-sdk-go v1.44.192 github.com/aws/aws-xray-sdk-go v1.8.0 github.com/google/uuid v1.3.0 diff --git a/src/go.sum b/src/go.sum index 17d6f25bd..566adbca0 100644 --- a/src/go.sum +++ b/src/go.sum @@ -54,6 +54,8 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 h1:eTgICcmcydEWG8J+hgnidf0pzujV3Gd2XqmknykZkzA= github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4= +github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e h1:KMRGDB9lh0wC/WYVmQ28MJ07qiHszCSH2PRwkw2YElM= +github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index 5fd8c9ffb..a87243a9e 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -344,7 +344,7 @@ func formatDetailsForRestoration( deets *details.Details, errs *fault.Errors, ) ([]path.Path, error) { - fds, err := sel.Reduce(ctx, deets) + fds, err := sel.Reduce(ctx, deets, errs) if err != nil { return nil, err } diff --git a/src/pkg/selectors/example_selectors_test.go b/src/pkg/selectors/example_selectors_test.go index d802585cc..ff556a306 100644 --- a/src/pkg/selectors/example_selectors_test.go +++ b/src/pkg/selectors/example_selectors_test.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -52,7 +53,7 @@ func Example_newSelector() { bSel.ToOneDriveBackup() } - // Output: OneDrive service is not Exchange: wrong selector service type + // Output: wrong selector service type: OneDrive is not Exchange } // ExampleIncludeFoldersAndItems demonstrates how to select for granular data. @@ -141,10 +142,11 @@ func Example_reduceDetails() { ser := selectors.NewExchangeRestore( []string{"your-user-id", "foo-user-id", "bar-user-id"}, ) + errAgg := fault.New(false) // The Reduce() call is where our constructed selectors are applied to the data // from a previous backup record. - filteredDetails := ser.Reduce(ctxBG, exampleDetails) + filteredDetails := ser.Reduce(ctxBG, exampleDetails, errAgg) // We haven't added any scopes to our selector yet, so none of the data is retained. fmt.Println("Before adding scopes:", len(filteredDetails.Entries)) @@ -153,7 +155,7 @@ func Example_reduceDetails() { ser.Filter(ser.MailSubject("the answer to life")) // Now that we've selected our data, we should find a result. - filteredDetails = ser.Reduce(ctxBG, exampleDetails) + filteredDetails = ser.Reduce(ctxBG, exampleDetails, errAgg) fmt.Println("After adding scopes:", len(filteredDetails.Entries)) // Output: Before adding scopes: 0 diff --git a/src/pkg/selectors/exchange.go b/src/pkg/selectors/exchange.go index fd3a29e65..a5547749f 100644 --- a/src/pkg/selectors/exchange.go +++ b/src/pkg/selectors/exchange.go @@ -6,6 +6,7 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" ) @@ -704,7 +705,11 @@ func (s ExchangeScope) setDefaults() { // Reduce filters the entries in a details struct to only those that match the // inclusions, filters, and exclusions in the selector. -func (s exchange) Reduce(ctx context.Context, deets *details.Details) *details.Details { +func (s exchange) Reduce( + ctx context.Context, + deets *details.Details, + errs fault.Adder, +) *details.Details { return reduce[ExchangeScope]( ctx, deets, @@ -714,7 +719,7 @@ func (s exchange) Reduce(ctx context.Context, deets *details.Details) *details.D path.EventsCategory: ExchangeEvent, path.EmailCategory: ExchangeMail, }, - ) + errs) } // matchesInfo handles the standard behavior when comparing a scope and an ExchangeFilter diff --git a/src/pkg/selectors/exchange_test.go b/src/pkg/selectors/exchange_test.go index df556895f..830cde0c0 100644 --- a/src/pkg/selectors/exchange_test.go +++ b/src/pkg/selectors/exchange_test.go @@ -11,6 +11,7 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault/mock" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" ) @@ -1029,10 +1030,13 @@ func (suite *ExchangeSelectorSuite) TestExchangeRestore_Reduce() { ctx, flush := tester.NewContext() defer flush() + errs := mock.NewAdder() + sel := test.makeSelector() - results := sel.Reduce(ctx, test.deets) + results := sel.Reduce(ctx, test.deets, errs) paths := results.Paths() assert.Equal(t, test.expect, paths) + assert.Empty(t, errs.Errs) }) } } diff --git a/src/pkg/selectors/onedrive.go b/src/pkg/selectors/onedrive.go index 14ece70fb..f4d924a3b 100644 --- a/src/pkg/selectors/onedrive.go +++ b/src/pkg/selectors/onedrive.go @@ -5,6 +5,7 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" ) @@ -483,7 +484,11 @@ func (s OneDriveScope) DiscreteCopy(user string) OneDriveScope { // Reduce filters the entries in a details struct to only those that match the // inclusions, filters, and exclusions in the selector. -func (s oneDrive) Reduce(ctx context.Context, deets *details.Details) *details.Details { +func (s oneDrive) Reduce( + ctx context.Context, + deets *details.Details, + errs fault.Adder, +) *details.Details { return reduce[OneDriveScope]( ctx, deets, @@ -491,7 +496,7 @@ func (s oneDrive) Reduce(ctx context.Context, deets *details.Details) *details.D map[path.CategoryType]oneDriveCategory{ path.FilesCategory: OneDriveItem, }, - ) + errs) } // matchesInfo handles the standard behavior when comparing a scope and an oneDriveInfo diff --git a/src/pkg/selectors/onedrive_test.go b/src/pkg/selectors/onedrive_test.go index 1efcb1f3b..273019519 100644 --- a/src/pkg/selectors/onedrive_test.go +++ b/src/pkg/selectors/onedrive_test.go @@ -11,6 +11,7 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault/mock" "github.com/alcionai/corso/src/pkg/path" ) @@ -241,10 +242,13 @@ func (suite *OneDriveSelectorSuite) TestOneDriveRestore_Reduce() { ctx, flush := tester.NewContext() defer flush() + errs := mock.NewAdder() + sel := test.makeSelector() - results := sel.Reduce(ctx, test.deets) + results := sel.Reduce(ctx, test.deets, errs) paths := results.Paths() assert.Equal(t, test.expect, paths) + assert.Empty(t, errs.Errs) }) } } diff --git a/src/pkg/selectors/scopes.go b/src/pkg/selectors/scopes.go index 5dc5c4513..9ea595897 100644 --- a/src/pkg/selectors/scopes.go +++ b/src/pkg/selectors/scopes.go @@ -5,10 +5,11 @@ import ( "golang.org/x/exp/maps" + "github.com/alcionai/clues" D "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" - "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -286,6 +287,7 @@ func reduce[T scopeT, C categoryT]( deets *details.Details, s Selector, dataCategories map[path.CategoryType]C, + errs fault.Adder, ) *details.Details { ctx, end := D.Span(ctx, "selectors:reduce") defer end() @@ -311,7 +313,7 @@ func reduce[T scopeT, C categoryT]( for _, ent := range deets.Items() { repoPath, err := path.FromDataLayerPath(ent.RepoRef, true) if err != nil { - logger.Ctx(ctx).Debugw("transforming repoRef to path", "err", err) + errs.Add(clues.Wrap(err, "transforming repoRef to path").WithClues(ctx)) continue } diff --git a/src/pkg/selectors/scopes_test.go b/src/pkg/selectors/scopes_test.go index 9758b1109..848d55767 100644 --- a/src/pkg/selectors/scopes_test.go +++ b/src/pkg/selectors/scopes_test.go @@ -9,6 +9,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault/mock" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" ) @@ -273,13 +274,17 @@ func (suite *SelectorScopesSuite) TestReduce() { ctx, flush := tester.NewContext() defer flush() + errs := mock.NewAdder() + ds := deets() result := reduce[mockScope]( ctx, &ds, test.sel().Selector, - dataCats) + dataCats, + errs) require.NotNil(t, result) + require.Empty(t, errs.Errs, "iteration errors") assert.Len(t, result.Entries, test.expectLen) }) } diff --git a/src/pkg/selectors/selectors.go b/src/pkg/selectors/selectors.go index 505ff1a7b..8a76b8f38 100644 --- a/src/pkg/selectors/selectors.go +++ b/src/pkg/selectors/selectors.go @@ -5,9 +5,11 @@ import ( "encoding/json" "strings" + "github.com/alcionai/clues" "github.com/pkg/errors" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" ) @@ -30,8 +32,9 @@ var serviceToPathType = map[service]path.ServiceType{ } var ( - ErrorBadSelectorCast = errors.New("wrong selector service type") - ErrorNoMatchingItems = errors.New("no items match the specified selectors") + ErrorBadSelectorCast = errors.New("wrong selector service type") + ErrorNoMatchingItems = errors.New("no items match the specified selectors") + ErrorUnrecognizedService = errors.New("unrecognized service") ) const ( @@ -67,7 +70,7 @@ var ( const All = "All" type Reducer interface { - Reduce(context.Context, *details.Details) *details.Details + Reduce(context.Context, *details.Details, fault.Adder) *details.Details } // selectorResourceOwners aggregates all discrete path category types described @@ -234,13 +237,17 @@ func (s Selector) PathService() path.ServiceType { // from the generic selector by interpreting the selector service type rather // than have the caller make that interpretation. Returns an error if the // service is unsupported. -func (s Selector) Reduce(ctx context.Context, deets *details.Details) (*details.Details, error) { +func (s Selector) Reduce( + ctx context.Context, + deets *details.Details, + errs fault.Adder, +) (*details.Details, error) { r, err := selectorAsIface[Reducer](s) if err != nil { return nil, err } - return r.Reduce(ctx, deets), nil + return r.Reduce(ctx, deets, errs), nil } // returns the sets of path categories identified in each scope set. @@ -272,7 +279,7 @@ func selectorAsIface[T any](s Selector) (T, error) { a, err = func() (any, error) { return s.ToSharePointRestore() }() t = a.(T) default: - err = errors.New("service not supported: " + s.Service.String()) + err = clues.Stack(ErrorUnrecognizedService, errors.New(s.Service.String())) } return t, err @@ -374,7 +381,7 @@ func pathComparator() option { } func badCastErr(cast, is service) error { - return errors.Wrapf(ErrorBadSelectorCast, "%s service is not %s", cast, is) + return clues.Stack(ErrorBadSelectorCast, errors.Errorf("%s is not %s", cast, is)) } func join(s ...string) string { diff --git a/src/pkg/selectors/selectors_reduce_test.go b/src/pkg/selectors/selectors_reduce_test.go index 67b6bc9e2..3748f793a 100644 --- a/src/pkg/selectors/selectors_reduce_test.go +++ b/src/pkg/selectors/selectors_reduce_test.go @@ -10,6 +10,7 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault/mock" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors/testdata" ) @@ -264,8 +265,11 @@ func (suite *SelectorReduceSuite) TestReduce() { for _, test := range table { suite.T().Run(test.name, func(t *testing.T) { - output := test.selFunc().Reduce(ctx, allDetails) + errs := mock.NewAdder() + + output := test.selFunc().Reduce(ctx, allDetails, errs) assert.ElementsMatch(t, test.expected, output.Entries) + assert.Empty(t, errs.Errs) }) } } diff --git a/src/pkg/selectors/sharepoint.go b/src/pkg/selectors/sharepoint.go index 7138acd6c..1df132e93 100644 --- a/src/pkg/selectors/sharepoint.go +++ b/src/pkg/selectors/sharepoint.go @@ -4,6 +4,7 @@ import ( "context" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" ) @@ -555,7 +556,11 @@ func (s SharePointScope) DiscreteCopy(site string) SharePointScope { // Reduce filters the entries in a details struct to only those that match the // inclusions, filters, and exclusions in the selector. -func (s sharePoint) Reduce(ctx context.Context, deets *details.Details) *details.Details { +func (s sharePoint) Reduce( + ctx context.Context, + deets *details.Details, + errs fault.Adder, +) *details.Details { return reduce[SharePointScope]( ctx, deets, @@ -565,7 +570,7 @@ func (s sharePoint) Reduce(ctx context.Context, deets *details.Details) *details path.ListsCategory: SharePointListItem, path.PagesCategory: SharePointPage, }, - ) + errs) } // matchesInfo handles the standard behavior when comparing a scope and an sharePointInfo diff --git a/src/pkg/selectors/sharepoint_test.go b/src/pkg/selectors/sharepoint_test.go index 4ce3859cd..2bf3f585c 100644 --- a/src/pkg/selectors/sharepoint_test.go +++ b/src/pkg/selectors/sharepoint_test.go @@ -9,6 +9,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault/mock" "github.com/alcionai/corso/src/pkg/path" ) @@ -305,10 +306,13 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() { ctx, flush := tester.NewContext() defer flush() + errs := mock.NewAdder() + sel := test.makeSelector() - results := sel.Reduce(ctx, test.deets) + results := sel.Reduce(ctx, test.deets, errs) paths := results.Paths() assert.Equal(t, test.expect, paths) + assert.Empty(t, errs.Errs) }) } } From 313f05fcb6f832006f7fe26720f2cecd1d6110a9 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 6 Feb 2023 17:44:35 -0700 Subject: [PATCH 09/45] add clues, fault to small /internal pkgs (#2336) --- src/internal/common/errors.go | 2 ++ src/internal/common/slices.go | 1 + src/internal/common/time.go | 14 ++++++++----- src/internal/events/events.go | 4 ++-- src/internal/streamstore/streamstore.go | 27 +++++++++++++------------ src/pkg/backup/details/details.go | 6 +++--- src/pkg/path/onedrive.go | 2 +- 7 files changed, 32 insertions(+), 24 deletions(-) diff --git a/src/internal/common/errors.go b/src/internal/common/errors.go index 2eb3ba891..82dea7b38 100644 --- a/src/internal/common/errors.go +++ b/src/internal/common/errors.go @@ -5,6 +5,8 @@ import ( "io" ) +// TODO: Remove in favor of clues.Stack() + // Err provides boiler-plate functions that other types of errors can use // if they wish to be compared with `errors.As()`. This struct ensures that // stack traces are printed when requested (if present) and that Err diff --git a/src/internal/common/slices.go b/src/internal/common/slices.go index 7600400e9..73c7c951e 100644 --- a/src/internal/common/slices.go +++ b/src/internal/common/slices.go @@ -1,5 +1,6 @@ package common +// TODO: can be replaced with slices.Contains() func ContainsString(super []string, sub string) bool { for _, s := range super { if s == sub { diff --git a/src/internal/common/time.go b/src/internal/common/time.go index de430cf8b..f747bef4c 100644 --- a/src/internal/common/time.go +++ b/src/internal/common/time.go @@ -4,6 +4,7 @@ import ( "regexp" "time" + "github.com/alcionai/clues" "github.com/pkg/errors" ) @@ -85,7 +86,10 @@ var ( } ) -var ErrNoTimeString = errors.New("no substring contains a known time format") +var ( + ErrNoTimeString = errors.New("no substring contains a known time format") + errParsingStringToTime = errors.New("parsing string as time.Time") +) // Now produces the current time as a string in the standard format. func Now() string { @@ -132,7 +136,7 @@ func FormatLegacyTime(t time.Time) string { // the provided string. Always returns a UTC timezone value. func ParseTime(s string) (time.Time, error) { if len(s) == 0 { - return time.Time{}, errors.New("cannot interpret an empty string as time.Time") + return time.Time{}, clues.Stack(errParsingStringToTime, errors.New("empty string")) } for _, form := range formats { @@ -142,14 +146,14 @@ func ParseTime(s string) (time.Time, error) { } } - return time.Time{}, errors.New("unable to parse time string: " + s) + return time.Time{}, clues.Stack(errParsingStringToTime, errors.New(s)) } // ExtractTime greedily retrieves a timestamp substring from the provided string. // returns ErrNoTimeString if no match is found. func ExtractTime(s string) (time.Time, error) { if len(s) == 0 { - return time.Time{}, errors.New("cannot extract time.Time from an empty string") + return time.Time{}, clues.Stack(errParsingStringToTime, errors.New("empty string")) } for _, re := range regexes { @@ -159,5 +163,5 @@ func ExtractTime(s string) (time.Time, error) { } } - return time.Time{}, errors.Wrap(ErrNoTimeString, s) + return time.Time{}, clues.Stack(ErrNoTimeString, errors.New(s)) } diff --git a/src/internal/events/events.go b/src/internal/events/events.go index 58b57f63f..d91c734c9 100644 --- a/src/internal/events/events.go +++ b/src/internal/events/events.go @@ -7,7 +7,7 @@ import ( "os" "time" - "github.com/pkg/errors" + "github.com/alcionai/clues" analytics "github.com/rudderlabs/analytics-go" "github.com/alcionai/corso/src/internal/version" @@ -93,7 +93,7 @@ func NewBus(ctx context.Context, s storage.Storage, tenID string, opts control.O }) if err != nil { - return Bus{}, errors.Wrap(err, "configuring event bus") + return Bus{}, clues.Wrap(err, "configuring event bus").WithClues(ctx) } } diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index fb5dfccd9..d2fd4b654 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -8,6 +8,7 @@ import ( "encoding/json" "io" + "github.com/alcionai/clues" "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/data" @@ -52,17 +53,16 @@ func (ss *streamStore) WriteBackupDetails( ss.tenant, collectionPurposeDetails, ss.service, - false, - ) + false) if err != nil { - return "", err + return "", clues.Stack(err).WithClues(ctx) } // TODO: We could use an io.Pipe here to avoid a double copy but that // makes error handling a bit complicated dbytes, err := json.Marshal(backupDetails) if err != nil { - return "", errors.Wrap(err, "marshalling backup details") + return "", clues.Wrap(err, "marshalling backup details").WithClues(ctx) } dc := &streamCollection{ @@ -79,10 +79,9 @@ func (ss *streamStore) WriteBackupDetails( []data.Collection{dc}, nil, nil, - false, - ) + false) if err != nil { - return "", nil + return "", errors.Wrap(err, "storing details in repository") } return backupStats.SnapshotID, nil @@ -104,7 +103,7 @@ func (ss *streamStore) ReadBackupDetails( true, ) if err != nil { - return nil, err + return nil, clues.Stack(err).WithClues(ctx) } var bc stats.ByteCounter @@ -116,7 +115,9 @@ func (ss *streamStore) ReadBackupDetails( // Expect only 1 data collection if len(dcs) != 1 { - return nil, errors.Errorf("expected 1 details data collection: %d", len(dcs)) + return nil, clues.New("greater than 1 details data collection found"). + WithClues(ctx). + With("collection_count", len(dcs)) } dc := dcs[0] @@ -129,12 +130,12 @@ func (ss *streamStore) ReadBackupDetails( for { select { case <-ctx.Done(): - return nil, errors.New("context cancelled waiting for backup details data") + return nil, clues.New("context cancelled waiting for backup details data").WithClues(ctx) case itemData, ok := <-items: if !ok { if !found { - return nil, errors.New("no backup details found") + return nil, clues.New("no backup details found").WithClues(ctx) } return &d, nil @@ -142,7 +143,7 @@ func (ss *streamStore) ReadBackupDetails( err := json.NewDecoder(itemData.ToReader()).Decode(&d) if err != nil { - return nil, errors.Wrap(err, "failed to decode details data from repository") + return nil, clues.Wrap(err, "decoding details data").WithClues(ctx) } found = true @@ -157,7 +158,7 @@ func (ss *streamStore) DeleteBackupDetails( ) error { err := ss.kw.DeleteSnapshot(ctx, detailsID) if err != nil { - return errors.Wrap(err, "deleting backup details failed") + return errors.Wrap(err, "deleting backup details") } return nil diff --git a/src/pkg/backup/details/details.go b/src/pkg/backup/details/details.go index f244c72c9..bb392c223 100644 --- a/src/pkg/backup/details/details.go +++ b/src/pkg/backup/details/details.go @@ -6,8 +6,8 @@ import ( "sync" "time" + "github.com/alcionai/clues" "github.com/dustin/go-humanize" - "github.com/pkg/errors" "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/internal/common" @@ -509,7 +509,7 @@ func (i SharePointInfo) Values() []string { func (i *SharePointInfo) UpdateParentPath(newPath path.Path) error { newParent, err := path.GetDriveFolderPath(newPath) if err != nil { - return errors.Wrapf(err, "making sharepoint path from %s", newPath) + return clues.Wrap(err, "making sharePoint path").With("path", newPath) } i.ParentPath = newParent @@ -551,7 +551,7 @@ func (i OneDriveInfo) Values() []string { func (i *OneDriveInfo) UpdateParentPath(newPath path.Path) error { newParent, err := path.GetDriveFolderPath(newPath) if err != nil { - return errors.Wrapf(err, "making drive path from %s", newPath) + return clues.Wrap(err, "making oneDrive path").With("path", newPath) } i.ParentPath = newParent diff --git a/src/pkg/path/onedrive.go b/src/pkg/path/onedrive.go index 86d40c887..35738289c 100644 --- a/src/pkg/path/onedrive.go +++ b/src/pkg/path/onedrive.go @@ -20,7 +20,7 @@ func ToOneDrivePath(p Path) (*DrivePath, error) { if len(folders) < 3 { return nil, clues. New("folder path doesn't match expected format for OneDrive items"). - With("folders", p.Folder()) + With("path_folders", p.Folder()) } return &DrivePath{DriveID: folders[1], Folders: folders[3:]}, nil From 17b698b854b9fa3c770d6ff29ee00fe4011f46ca Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 6 Feb 2023 18:32:09 -0700 Subject: [PATCH 10/45] logging standards examples (#2411) ## Description Adds an example set to the logging for logging code standards guidance. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :world_map: Documentation ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test --- src/go.sum | 2 - src/pkg/fault/example_fault_test.go | 48 ++++++++++ src/pkg/logger/example_logger_test.go | 125 ++++++++++++++++++++++++++ website/docs/developers/logging.md | 3 + 4 files changed, 176 insertions(+), 2 deletions(-) create mode 100644 src/pkg/logger/example_logger_test.go create mode 100644 website/docs/developers/logging.md diff --git a/src/go.sum b/src/go.sum index 566adbca0..d3c038ca4 100644 --- a/src/go.sum +++ b/src/go.sum @@ -52,8 +52,6 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 h1:eTgICcmcydEWG8J+hgnidf0pzujV3Gd2XqmknykZkzA= -github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4= github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e h1:KMRGDB9lh0wC/WYVmQ28MJ07qiHszCSH2PRwkw2YElM= github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= diff --git a/src/pkg/fault/example_fault_test.go b/src/pkg/fault/example_fault_test.go index 4ad5945d2..f1e614996 100644 --- a/src/pkg/fault/example_fault_test.go +++ b/src/pkg/fault/example_fault_test.go @@ -34,6 +34,14 @@ type mockOper struct { func newOperation() mockOper { return mockOper{fault.New(true)} } func (m mockOper) Run() *fault.Errors { return m.Errors } +type mockDepenedency struct{} + +func (md mockDepenedency) do() error { + return errors.New("caught one") +} + +var dependency = mockDepenedency{} + // --------------------------------------------------------------------------- // examples // --------------------------------------------------------------------------- @@ -289,3 +297,43 @@ func Example_errors_e2e() { fmt.Println("recoverable err occurred", err) } } + +// ExampleErrorsErr showcases when to return err or nil vs errs.Err() +func Example_errors_err() { + // The general rule of thumb is to always handle the error directly + // by returning err, or nil, or any variety of extension (wrap, + // stack, clues, etc). + fn := func() error { + if err := dependency.do(); err != nil { + return errors.Wrap(err, "direct") + } + + return nil + } + if err := fn(); err != nil { + fmt.Println(err) + } + + // The exception is if you're handling recoverable errors. Those + // funcs should always return errs.Err(), in case a recoverable + // error happened on the last round of iteration. + fn2 := func(todo []string, errs *fault.Errors) error { + for range todo { + if errs.Err() != nil { + return errs.Err() + } + + if err := dependency.do(); err != nil { + errs.Add(errors.Wrap(err, "recoverable")) + } + } + + return errs.Err() + } + if err := fn2([]string{"a"}, fault.New(true)); err != nil { + fmt.Println(err) + } + + // Output: direct: caught one + // recoverable: caught one +} diff --git a/src/pkg/logger/example_logger_test.go b/src/pkg/logger/example_logger_test.go new file mode 100644 index 000000000..d04cc02ff --- /dev/null +++ b/src/pkg/logger/example_logger_test.go @@ -0,0 +1,125 @@ +package logger_test + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/alcionai/corso/src/pkg/logger" +) + +// --------------------------------------------------------------------------- +// mock helpers +// --------------------------------------------------------------------------- + +const ( + loglevel = "info" + logfile = "stderr" + itemID = "item_id" +) + +var err error + +// --------------------------------------------------------------------------- +// examples +// --------------------------------------------------------------------------- + +// ExampleSeed showcases seeding a logger into the context. +func Example_seed() { + // Before logging, a logger instance first needs to get seeded into + // the context. Seeding only needs to be done once. For example + // Corso's CLI layer seeds the logger in the cli initialization. + ctx := context.Background() + ctx, log := logger.Seed(ctx, loglevel, logfile) + + // SDK consumers who configure their own zap logger can Set their logger + // into the context directly, instead of Seeding a new one. + ctx = logger.Set(ctx, log) + + // logs should always be flushed before exiting whichever func + // seeded the logger. + defer func() { + _ = log.Sync() // flush all logs in the buffer + }() + + // downstream, the logger will retrieve its configuration from + // the context. + func(ctx context.Context) { + log := logger.Ctx(ctx) + log.Info("hello, world!") + }(ctx) +} + +// ExampleLoggerStandards reviews code standards around logging in Corso. +func Example_logger_standards() { + log := logger.Ctx(context.Background()) + + // 1. Keep messsages short. Lowercase text, no ending punctuation. + // This ensures logs are easy to scan, and simple to grok. + // + // preferred + log.Info("getting item") + // avoid + log.Info("Getting one item from the service so that we can send it through the item feed.") + + // 2. Do not fmt values into the message. Use With() or -w() to add structured data. + // By keeping dynamic data in a structured format, we maximize log readability, + // and make logs very easy to search or filter in bulk, and very easy to control pii. + // + // preferred + log.With("err", err).Error("getting item") + log.Errorw("getting item", "err", err) + // avoid + log.Errorf("getting item %s: %v", itemID, err) + + // 3. Give data keys reasonable namespaces. Use snake_case. + // Overly generic keys can collide unexpectedly. + // + // preferred + log.With("item_id", itemID).Info("getting item") + // avoid + log.With("id", itemID).Error("getting item") + + // 4. Avoid Warn-level logging. Prefer Info or Error. + // Minimize confusion/contention about what level a log + // "should be". Error during a failure, Info (or Debug) + // otherwise. + // + // preferred + log.With("err", err).Error("getting item") + // avoid + log.With("err", err).Warn("getting item") + + // 4. Avoid Panic/Fatal-level logging. Prefer Error. + // Panic and Fatal logging can crash the application without + // flushing buffered logs and finishing out other telemetry. + // + // preferred + log.With("err", err).Error("unable to connect") + // avoid + log.With("err", err).Panic("unable to connecct") +} + +// ExampleLoggerCluesStandards reviews code standards around using the Clues package while logging. +func Example_logger_clues_standards() { + log := logger.Ctx(context.Background()) + + // 1. Clues Ctx values are always added in .Ctx(); you don't + // need to add them directly. + // + // preferred + ctx := clues.Add(context.Background(), "item_id", itemID) + logger.Ctx(ctx).Info("getting item") + // avoid + ctx = clues.Add(context.Background(), "item_id", itemID) + logger.Ctx(ctx).With(clues.In(ctx).Slice()...).Info("getting item") + + // 2. Always extract structured data from errors. + // + // preferred + log.With("err", err).Errorw("getting item", clues.InErr(err).Slice()...) + // avoid + log.Errorw("getting item", "err", err) + + // TODO(keepers): PII + // 3. Protect pii in logs. +} diff --git a/website/docs/developers/logging.md b/website/docs/developers/logging.md new file mode 100644 index 000000000..c2fee4e95 --- /dev/null +++ b/website/docs/developers/logging.md @@ -0,0 +1,3 @@ +# Corso Logging Standards + +Logging standards and SDK consumer logging integration details can be found in the Corso repo [logging examples](https://github.com/alcionai/corso/blob/main/src/pkg/logger/example_logger_test.go). From b327de5801c650925121a05bd9a303ad674620e7 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Tue, 7 Feb 2023 10:55:22 +0530 Subject: [PATCH 11/45] Pass in prev delta to collectItems cont. (#2400) ## Description Forgot to push the review changes in https://github.com/alcionai/corso/pull/2371 . ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * # ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/connector/onedrive/api/drive.go | 6 ++++++ src/internal/connector/onedrive/collections_test.go | 12 ++++++------ src/internal/connector/onedrive/drive.go | 7 +++---- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/internal/connector/onedrive/api/drive.go b/src/internal/connector/onedrive/api/drive.go index ce246da85..391ea2632 100644 --- a/src/internal/connector/onedrive/api/drive.go +++ b/src/internal/connector/onedrive/api/drive.go @@ -30,6 +30,7 @@ const pageSize = int32(999) type driveItemPager struct { gs graph.Servicer + driveID string builder *msdrives.ItemRootDeltaRequestBuilder options *msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration } @@ -49,6 +50,7 @@ func NewItemPager( res := &driveItemPager{ gs: gs, + driveID: driveID, options: requestConfig, builder: gs.Client().DrivesById(driveID).Root().Delta(), } @@ -78,6 +80,10 @@ func (p *driveItemPager) SetNext(link string) { p.builder = msdrives.NewItemRootDeltaRequestBuilder(link, p.gs.Adapter()) } +func (p *driveItemPager) Reset() { + p.builder = p.gs.Client().DrivesById(p.driveID).Root().Delta() +} + func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) { return getValues[models.DriveItemable](l) } diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index 0b10d02bd..b172204c9 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -1048,6 +1048,7 @@ func (p *mockItemPager) GetPage(context.Context) (gapi.DeltaPageLinker, error) { } func (p *mockItemPager) SetNext(string) {} +func (p *mockItemPager) Reset() {} func (p *mockItemPager) ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error) { idx := p.getIdx @@ -1497,7 +1498,6 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() { name string items []deltaPagerResult deltaURL string - prevDelta string prevDeltaSuccess bool err error }{ @@ -1522,7 +1522,7 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() { name: "invalid prev delta", deltaURL: delta, items: []deltaPagerResult{ - {nextLink: &next, err: deltaError}, + {err: deltaError}, {deltaLink: &delta}, // works on retry }, prevDeltaSuccess: false, @@ -1531,7 +1531,7 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() { name: "fail a normal delta query", items: []deltaPagerResult{ {nextLink: &next}, - {nextLink: &next, err: assert.AnError}, + {err: assert.AnError}, }, prevDeltaSuccess: true, err: assert.AnError, @@ -1566,9 +1566,9 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() { "", ) - require.ErrorIs(suite.T(), err, test.err) - require.Equal(suite.T(), test.deltaURL, delta.URL) - require.Equal(suite.T(), !test.prevDeltaSuccess, delta.Reset) + require.ErrorIs(suite.T(), err, test.err, "delta fetch err") + require.Equal(suite.T(), test.deltaURL, delta.URL, "delta url") + require.Equal(suite.T(), !test.prevDeltaSuccess, delta.Reset, "delta reset") }) } } diff --git a/src/internal/connector/onedrive/drive.go b/src/internal/connector/onedrive/drive.go index b06184884..a7585ffe3 100644 --- a/src/internal/connector/onedrive/drive.go +++ b/src/internal/connector/onedrive/drive.go @@ -148,6 +148,7 @@ type itemCollector func( type itemPager interface { GetPage(context.Context) (gapi.DeltaPageLinker, error) SetNext(nextLink string) + Reset() ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error) } @@ -193,7 +194,6 @@ func collectItems( newPaths = map[string]string{} excluded = map[string]struct{}{} invalidPrevDelta = false - triedPrevDelta = false ) maps.Copy(newPaths, oldPaths) @@ -205,13 +205,12 @@ func collectItems( for { page, err := pager.GetPage(ctx) - if !triedPrevDelta && graph.IsErrInvalidDelta(err) { + if graph.IsErrInvalidDelta(err) { logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) - triedPrevDelta = true // TODO(meain): Do we need this check? invalidPrevDelta = true - pager.SetNext("") + pager.Reset() continue } From 2c6ccb70b2bc70de12b723c2c31b2a33a94092da Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Tue, 7 Feb 2023 15:26:44 +0530 Subject: [PATCH 12/45] Update changelog for v0.3.0 (#2421) ## Description Updating CHANGELOG file prepping for new release. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [x] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * # ## Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c029b8f9..a8d75a352 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] (alpha) +## [v0.3.0] (alpha) - 2023-2-07 + ### Added - Document Corso's fault-tolerance and restartability features @@ -156,7 +158,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Miscellaneous - Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35)) -[Unreleased]: https://github.com/alcionai/corso/compare/v0.2.0...HEAD +[Unreleased]: https://github.com/alcionai/corso/compare/v0.3.0...HEAD +[v0.3.0]: https://github.com/alcionai/corso/compare/v0.2.0...v0.3.0 [v0.2.0]: https://github.com/alcionai/corso/compare/v0.1.0...v0.2.0 [v0.1.0]: https://github.com/alcionai/corso/compare/v0.0.4...v0.1.0 [v0.0.4]: https://github.com/alcionai/corso/compare/v0.0.3...v0.0.4 From 45291ebaea5453cee8efa19e5028f6e7e7151751 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Tue, 7 Feb 2023 20:17:36 +0530 Subject: [PATCH 13/45] Set DoNotMerge on OneDrive collections if delta token expired (#2401) ## Description Wire up configuring DoNotMerge for OneDrive collections. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No ## Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * https://github.com/alcionai/corso/issues/2123 * https://github.com/alcionai/corso/issues/2124 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/connector/graph/service.go | 2 +- src/internal/connector/onedrive/collection.go | 20 +-- .../connector/onedrive/collection_test.go | 9 +- .../connector/onedrive/collections.go | 5 +- .../connector/onedrive/collections_test.go | 147 +++++++++++++++++- src/internal/connector/onedrive/drive.go | 4 +- src/internal/connector/onedrive/item_test.go | 1 + .../connector/sharepoint/api/helper_test.go | 3 +- .../sharepoint/data_collections_test.go | 2 +- src/internal/connector/support/m365Support.go | 3 +- 10 files changed, 173 insertions(+), 23 deletions(-) diff --git a/src/internal/connector/graph/service.go b/src/internal/connector/graph/service.go index fd6142028..aa5a19f5b 100644 --- a/src/internal/connector/graph/service.go +++ b/src/internal/connector/graph/service.go @@ -8,7 +8,6 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/alcionai/corso/src/internal/connector/support" "github.com/microsoft/kiota-abstractions-go/serialization" ka "github.com/microsoft/kiota-authentication-azure-go" khttp "github.com/microsoft/kiota-http-go" @@ -16,6 +15,7 @@ import ( msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" "github.com/pkg/errors" + "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" diff --git a/src/internal/connector/onedrive/collection.go b/src/internal/connector/onedrive/collection.go index 343a8911e..d56114746 100644 --- a/src/internal/connector/onedrive/collection.go +++ b/src/internal/connector/onedrive/collection.go @@ -97,17 +97,19 @@ func NewCollection( statusUpdater support.StatusUpdater, source driveSource, ctrlOpts control.Options, + doNotMergeItems bool, ) *Collection { c := &Collection{ - itemClient: itemClient, - folderPath: folderPath, - driveItems: map[string]models.DriveItemable{}, - driveID: driveID, - source: source, - service: service, - data: make(chan data.Stream, collectionChannelBufferSize), - statusUpdater: statusUpdater, - ctrl: ctrlOpts, + itemClient: itemClient, + folderPath: folderPath, + driveItems: map[string]models.DriveItemable{}, + driveID: driveID, + source: source, + service: service, + data: make(chan data.Stream, collectionChannelBufferSize), + statusUpdater: statusUpdater, + ctrl: ctrlOpts, + doNotMergeItems: doNotMergeItems, } // Allows tests to set a mock populator diff --git a/src/internal/connector/onedrive/collection_test.go b/src/internal/connector/onedrive/collection_test.go index 734009d72..39c19c097 100644 --- a/src/internal/connector/onedrive/collection_test.go +++ b/src/internal/connector/onedrive/collection_test.go @@ -168,7 +168,8 @@ func (suite *CollectionUnitTestSuite) TestCollection() { suite, suite.testStatusUpdater(&wg, &collStatus), test.source, - control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}) + control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}, + true) require.NotNil(t, coll) assert.Equal(t, folderPath, coll.FullPath()) @@ -301,7 +302,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() { suite, suite.testStatusUpdater(&wg, &collStatus), test.source, - control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}) + control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}, + true) mockItem := models.NewDriveItem() mockItem.SetId(&testItemID) @@ -372,7 +374,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionDisablePermissionsBackup() { suite, suite.testStatusUpdater(&wg, &collStatus), test.source, - control.Options{ToggleFeatures: control.Toggles{}}) + control.Options{ToggleFeatures: control.Toggles{}}, + true) now := time.Now() mockItem := models.NewDriveItem() diff --git a/src/internal/connector/onedrive/collections.go b/src/internal/connector/onedrive/collections.go index b8c8b9c48..4388d7fd5 100644 --- a/src/internal/connector/onedrive/collections.go +++ b/src/internal/connector/onedrive/collections.go @@ -374,6 +374,7 @@ func (c *Collections) UpdateCollections( oldPaths map[string]string, newPaths map[string]string, excluded map[string]struct{}, + invalidPrevDelta bool, ) error { for _, item := range items { if item.GetRoot() != nil { @@ -465,7 +466,9 @@ func (c *Collections) UpdateCollections( c.service, c.statusUpdater, c.source, - c.ctrl) + c.ctrl, + invalidPrevDelta, + ) c.CollectionMap[collectionPath.String()] = col c.NumContainers++ diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index b172204c9..ec7b53442 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -646,6 +646,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { tt.inputFolderMap, outputFolderMap, excludes, + false, ) tt.expect(t, err) assert.Equal(t, len(tt.expectedCollectionPaths), len(c.CollectionMap), "collection paths") @@ -1133,6 +1134,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() { expectedDeltaURLs map[string]string expectedFolderPaths map[string]map[string]string expectedDelList map[string]struct{} + doNotMergeItems bool }{ { name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors", @@ -1344,6 +1346,135 @@ func (suite *OneDriveCollectionsSuite) TestGet() { expectedFolderPaths: nil, expectedDelList: nil, }, + { + name: "OneDrive_OneItemPage_DeltaError", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + err: getDeltaError(), + }, + { + items: []models.DriveItemable{ + driveItem("file", "file", testBaseDrivePath, true, false, false), + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + )[0]: {"file"}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + // We need an empty map here so deserializing metadata knows the delta + // token for this drive is valid. + driveID1: {}, + }, + expectedDelList: map[string]struct{}{}, + doNotMergeItems: true, + }, + { + name: "OneDrive_MultipleCollections_DeltaError", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + err: getDeltaError(), + }, + { + items: []models.DriveItemable{ + driveItem("file", "file", testBaseDrivePath, true, false, false), + }, + nextLink: &next, + }, + { + items: []models.DriveItemable{ + driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false), + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + )[0]: {"file"}, + expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath+"/folder", + )[0]: {"file"}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + // We need an empty map here so deserializing metadata knows the delta + // token for this drive is valid. + driveID1: {}, + }, + expectedDelList: map[string]struct{}{}, + doNotMergeItems: true, + }, + { + name: "OneDrive_MultipleCollections_NoDeltaError", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + driveItem("file", "file", testBaseDrivePath, true, false, false), + }, + nextLink: &next, + }, + { + items: []models.DriveItemable{ + driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false), + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + )[0]: {"file"}, + expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath+"/folder", + )[0]: {"file"}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + // We need an empty map here so deserializing metadata knows the delta + // token for this drive is valid. + driveID1: {}, + }, + expectedDelList: map[string]struct{}{}, + doNotMergeItems: false, + }, } for _, test := range table { suite.T().Run(test.name, func(t *testing.T) { @@ -1423,6 +1554,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() { } assert.ElementsMatch(t, test.expectedCollections[folderPath], itemIDs) + assert.Equal(t, test.doNotMergeItems, baseCol.DoNotMergeItems(), "DoNotMergeItems") } assert.Equal(t, test.expectedDelList, delList) @@ -1483,10 +1615,7 @@ func delItem( return item } -func (suite *OneDriveCollectionsSuite) TestCollectItems() { - next := "next" - delta := "delta" - +func getDeltaError() error { syncStateNotFound := "SyncStateNotFound" // TODO(meain): export graph.errCodeSyncStateNotFound me := odataerrors.NewMainError() me.SetCode(&syncStateNotFound) @@ -1494,6 +1623,13 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() { deltaError := odataerrors.NewODataError() deltaError.SetError(me) + return deltaError +} + +func (suite *OneDriveCollectionsSuite) TestCollectItems() { + next := "next" + delta := "delta" + table := []struct { name string items []deltaPagerResult @@ -1522,7 +1658,7 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() { name: "invalid prev delta", deltaURL: delta, items: []deltaPagerResult{ - {err: deltaError}, + {err: getDeltaError()}, {deltaLink: &delta}, // works on retry }, prevDeltaSuccess: false, @@ -1553,6 +1689,7 @@ func (suite *OneDriveCollectionsSuite) TestCollectItems() { oldPaths map[string]string, newPaths map[string]string, excluded map[string]struct{}, + doNotMergeItems bool, ) error { return nil } diff --git a/src/internal/connector/onedrive/drive.go b/src/internal/connector/onedrive/drive.go index a7585ffe3..471a42aad 100644 --- a/src/internal/connector/onedrive/drive.go +++ b/src/internal/connector/onedrive/drive.go @@ -143,6 +143,7 @@ type itemCollector func( oldPaths map[string]string, newPaths map[string]string, excluded map[string]struct{}, + validPrevDelta bool, ) error type itemPager interface { @@ -228,7 +229,7 @@ func collectItems( return DeltaUpdate{}, nil, nil, errors.Wrap(err, "extracting items from response") } - err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded) + err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded, invalidPrevDelta) if err != nil { return DeltaUpdate{}, nil, nil, err } @@ -380,6 +381,7 @@ func GetAllFolders( oldPaths map[string]string, newPaths map[string]string, excluded map[string]struct{}, + doNotMergeItems bool, ) error { for _, item := range items { // Skip the root item. diff --git a/src/internal/connector/onedrive/item_test.go b/src/internal/connector/onedrive/item_test.go index aec2f2474..5151e4466 100644 --- a/src/internal/connector/onedrive/item_test.go +++ b/src/internal/connector/onedrive/item_test.go @@ -106,6 +106,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { oldPaths map[string]string, newPaths map[string]string, excluded map[string]struct{}, + doNotMergeItems bool, ) error { for _, item := range items { if item.GetFile() != nil { diff --git a/src/internal/connector/sharepoint/api/helper_test.go b/src/internal/connector/sharepoint/api/helper_test.go index 33dee1561..1d50263ee 100644 --- a/src/internal/connector/sharepoint/api/helper_test.go +++ b/src/internal/connector/sharepoint/api/helper_test.go @@ -4,9 +4,10 @@ import ( "testing" discover "github.com/alcionai/corso/src/internal/connector/discovery/api" + "github.com/stretchr/testify/require" + "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/pkg/account" - "github.com/stretchr/testify/require" ) func createTestBetaService(t *testing.T, credentials account.M365Config) *discover.BetaService { diff --git a/src/internal/connector/sharepoint/data_collections_test.go b/src/internal/connector/sharepoint/data_collections_test.go index 11d05156c..10a1e25b0 100644 --- a/src/internal/connector/sharepoint/data_collections_test.go +++ b/src/internal/connector/sharepoint/data_collections_test.go @@ -100,7 +100,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() { &MockGraphService{}, nil, control.Options{}) - err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths, excluded) + err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths, excluded, true) test.expect(t, err) assert.Equal(t, len(test.expectedCollectionPaths), len(c.CollectionMap), "collection paths") assert.Equal(t, test.expectedItemCount, c.NumItems, "item count") diff --git a/src/internal/connector/support/m365Support.go b/src/internal/connector/support/m365Support.go index 0780a2b0e..c3aacef7f 100644 --- a/src/internal/connector/support/m365Support.go +++ b/src/internal/connector/support/m365Support.go @@ -3,11 +3,12 @@ package support import ( "strings" - bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" absser "github.com/microsoft/kiota-abstractions-go/serialization" js "github.com/microsoft/kiota-serialization-json-go" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" + + bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" ) // CreateFromBytes helper function to initialize m365 object form bytes. From ca0358e24998d2aedff3ae4582f595219ae841f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Feb 2023 16:51:50 +0000 Subject: [PATCH 14/45] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.192=20to=201.44.195=20in=20/src=20(#?= =?UTF-8?q?2418)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.192 to 1.44.195.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.195 (2023-02-06)

Service Client Updates

  • service/compute-optimizer: Updates service API and documentation
  • service/customer-profiles: Updates service API and documentation
  • service/frauddetector: Updates service API and documentation
  • service/mediaconvert: Updates service API and documentation
    • The AWS Elemental MediaConvert SDK has added improved scene change detection capabilities and a bandwidth reduction filter, along with video quality enhancements, to the AVC encoder.
  • service/outposts: Updates service API and documentation

Release v1.44.194 (2023-02-03)

Service Client Updates

  • service/proton: Updates service API and documentation
  • service/redshift: Updates service documentation
    • Corrects descriptions of the parameters for the API operations RestoreFromClusterSnapshot, RestoreTableFromClusterSnapshot, and CreateCluster.

Release v1.44.193 (2023-02-02)

Service Client Updates

  • service/appconfig: Updates service API and documentation
  • service/connect: Adds new service
  • service/ec2: Updates service API and documentation
    • Documentation updates for EC2.
  • service/elasticloadbalancingv2: Updates service documentation
  • service/keyspaces: Adds new service
  • service/quicksight: Updates service API and documentation
    • QuickSight support for Radar Chart and Dashboard Publish Options
  • service/redshift: Adds new service
    • Enabled FIPS endpoints for GovCloud (US) regions in SDK.
  • service/sso-admin: Adds new service
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.192&new-version=1.44.195)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 0d9792c5d..157225347 100644 --- a/src/go.mod +++ b/src/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e - github.com/aws/aws-sdk-go v1.44.192 + github.com/aws/aws-sdk-go v1.44.195 github.com/aws/aws-xray-sdk-go v1.8.0 github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 diff --git a/src/go.sum b/src/go.sum index d3c038ca4..d9be913a5 100644 --- a/src/go.sum +++ b/src/go.sum @@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM= -github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.195 h1:d5xFL0N83Fpsq2LFiHgtBUHknCRUPGHdOlCWt/jtOJs= +github.com/aws/aws-sdk-go v1.44.195/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY= github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= From 503c60df06ecd211926cb0fe4720f6f10ab3cb17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Feb 2023 17:20:24 +0000 Subject: [PATCH 15/45] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/vb?= =?UTF-8?q?auerster/mpb/v8=20from=208.1.4=20to=208.1.6=20in=20/src=20(#241?= =?UTF-8?q?9)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/vbauerster/mpb/v8](https://github.com/vbauerster/mpb) from 8.1.4 to 8.1.6.
Release notes

Sourced from github.com/vbauerster/mpb/v8's releases.

v8.1.6

Fixed #122 Drop go 1.17 support because of dependency update.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/vbauerster/mpb/v8&package-manager=go_modules&previous-version=8.1.4&new-version=8.1.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 4 ++-- src/go.sum | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/go.mod b/src/go.mod index 157225347..59e27706b 100644 --- a/src/go.mod +++ b/src/go.mod @@ -26,7 +26,7 @@ require ( github.com/stretchr/testify v1.8.1 github.com/tidwall/pretty v1.2.1 github.com/tomlazar/table v0.1.2 - github.com/vbauerster/mpb/v8 v8.1.4 + github.com/vbauerster/mpb/v8 v8.1.6 go.uber.org/zap v1.24.0 golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 golang.org/x/tools v0.5.0 @@ -97,7 +97,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect + github.com/rivo/uniseg v0.4.3 // indirect github.com/rs/xid v1.4.0 // indirect github.com/segmentio/backo-go v1.0.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect diff --git a/src/go.sum b/src/go.sum index d9be913a5..a2741aed3 100644 --- a/src/go.sum +++ b/src/go.sum @@ -342,8 +342,9 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= @@ -402,8 +403,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4= github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vbauerster/mpb/v8 v8.1.4 h1:MOcLTIbbAA892wVjRiuFHa1nRlNvifQMDVh12Bq/xIs= -github.com/vbauerster/mpb/v8 v8.1.4/go.mod h1:2fRME8lCLU9gwJwghZb1bO9A3Plc8KPeQ/ayGj+Ek4I= +github.com/vbauerster/mpb/v8 v8.1.6 h1:EswHDkAsy4OQ7QBAmU1MUPz4vHzl6KlINjlh7vJoxvY= +github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= From d4abc7f68ac3f970a8a038e84c1147402b513fee Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Tue, 7 Feb 2023 10:07:39 -0800 Subject: [PATCH 16/45] Use case-insensitive comparison for emails in permission check (#2417) ## Description Tests were flaking due to inconsistent case in strings. Normalize case prior to comparing. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * closes #2416 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/connector/graph_connector_helper_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index 539cbf501..138cd6439 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -698,7 +698,7 @@ func compareOneDriveItem( // FIXME(meain): The permissions before and after might not be in the same order. for i, p := range expectedMeta.Permissions { - assert.Equal(t, p.Email, itemMeta.Permissions[i].Email) + assert.Equal(t, strings.ToLower(p.Email), strings.ToLower(itemMeta.Permissions[i].Email)) assert.Equal(t, p.Roles, itemMeta.Permissions[i].Roles) assert.Equal(t, p.Expiration, itemMeta.Permissions[i].Expiration) } From 77e9c0fad2f2cb737dc54cba7eb39f5877901fd8 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Tue, 7 Feb 2023 23:59:44 +0530 Subject: [PATCH 17/45] Update retry handling for permissions (#2420) ## Description Previous retry check logic was incorrect and was never retrying. This switches it to using `graph.RunWithRetry`. Sample failures: - https://github.com/alcionai/corso/actions/runs/4109625295/jobs/7091735297 - https://github.com/alcionai/corso/actions/runs/4110739264/jobs/7093919589 ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * # ## Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/connector/onedrive/collection.go | 42 +++++++------------ 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/src/internal/connector/onedrive/collection.go b/src/internal/connector/onedrive/collection.go index d56114746..e0a328ef7 100644 --- a/src/internal/connector/onedrive/collection.go +++ b/src/internal/connector/onedrive/collection.go @@ -280,35 +280,23 @@ func (oc *Collection) populateItems(ctx context.Context) { if oc.source == OneDriveSource { // Fetch metadata for the file - for i := 1; i <= maxRetries; i++ { - if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup { - // We are still writing the metadata file but with - // empty permissions as we don't have a way to - // signify that the permissions was explicitly - // not added. - itemMeta = io.NopCloser(strings.NewReader("{}")) - itemMetaSize = 2 + if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup { + // We are still writing the metadata file but with + // empty permissions as we don't have a way to + // signify that the permissions was explicitly + // not added. + itemMeta = io.NopCloser(strings.NewReader("{}")) + itemMetaSize = 2 + } else { + err = graph.RunWithRetry(func() error { + itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item) + return err + }) - break + if err != nil { + errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions")) + return } - - itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item) - - // retry on Timeout type errors, break otherwise. - if err == nil || - !graph.IsErrTimeout(err) || - !graph.IsInternalServerError(err) { - break - } - - if i < maxRetries { - time.Sleep(1 * time.Second) - } - } - - if err != nil { - errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions")) - return } } From 231038de9047354e592809253959a6ea60b4a700 Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 7 Feb 2023 12:09:45 -0700 Subject: [PATCH 18/45] fix mod file (#2424) ## Type of change - [x] :bug: Bugfix --- src/go.mod | 2 +- src/go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/go.mod b/src/go.mod index 59e27706b..8b42d8352 100644 --- a/src/go.mod +++ b/src/go.mod @@ -114,7 +114,7 @@ require ( golang.org/x/mod v0.7.0 // indirect golang.org/x/net v0.5.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.4.0 // indirect + golang.org/x/sys v0.5.0 // indirect golang.org/x/text v0.6.0 // indirect google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect google.golang.org/grpc v1.52.0 // indirect diff --git a/src/go.sum b/src/go.sum index a2741aed3..79638570c 100644 --- a/src/go.sum +++ b/src/go.sum @@ -613,6 +613,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 27c1e5c51140b238c6c3cbc3df79b290e37704fe Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Tue, 7 Feb 2023 11:32:50 -0800 Subject: [PATCH 19/45] Remove old code to count number of resource owners backed up (#2414) ## Description Remove unused code that counted the number of resource owners that participated in the backup. This is no longer required as we've restricted each backup to act on a single resource owner ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/data/data_collection.go | 35 -------- src/internal/data/data_collection_test.go | 98 ----------------------- 2 files changed, 133 deletions(-) delete mode 100644 src/internal/data/data_collection_test.go diff --git a/src/internal/data/data_collection.go b/src/internal/data/data_collection.go index 0e3d492e0..8f5fb67cf 100644 --- a/src/internal/data/data_collection.go +++ b/src/internal/data/data_collection.go @@ -86,38 +86,3 @@ type StreamSize interface { type StreamModTime interface { ModTime() time.Time } - -// ------------------------------------------------------------------------------------------------ -// functionality -// ------------------------------------------------------------------------------------------------ - -// ResourceOwnerSet extracts the set of unique resource owners from the -// slice of Collections. -func ResourceOwnerSet(cs []Collection) []string { - rs := map[string]struct{}{} - - for _, c := range cs { - fp := c.FullPath() - if fp == nil { - // Deleted collections have their full path set to nil but the previous - // path will be populated. - fp = c.PreviousPath() - } - - if fp == nil { - // This should not happen, but keep us from hitting a nil pointer - // exception if it does somehow occur. Statistics will be off though. - continue - } - - rs[fp.ResourceOwner()] = struct{}{} - } - - rss := make([]string, 0, len(rs)) - - for k := range rs { - rss = append(rss, k) - } - - return rss -} diff --git a/src/internal/data/data_collection_test.go b/src/internal/data/data_collection_test.go deleted file mode 100644 index 82e34a79c..000000000 --- a/src/internal/data/data_collection_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package data - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/alcionai/corso/src/pkg/path" -) - -type mockColl struct { - p path.Path - prevP path.Path -} - -func (mc mockColl) Items() <-chan Stream { - return nil -} - -func (mc mockColl) FullPath() path.Path { - return mc.p -} - -func (mc mockColl) PreviousPath() path.Path { - return mc.prevP -} - -func (mc mockColl) State() CollectionState { - return NewState -} - -func (mc mockColl) DoNotMergeItems() bool { - return false -} - -type CollectionSuite struct { - suite.Suite -} - -// ------------------------------------------------------------------------------------------------ -// tests -// ------------------------------------------------------------------------------------------------ - -func TestCollectionSuite(t *testing.T) { - suite.Run(t, new(CollectionSuite)) -} - -func (suite *CollectionSuite) TestResourceOwnerSet() { - t := suite.T() - toColl := func(t *testing.T, resource string) Collection { - p, err := path.Builder{}. - Append("foo"). - ToDataLayerExchangePathForCategory("tid", resource, path.EventsCategory, false) - require.NoError(t, err) - - return mockColl{p, nil} - } - - table := []struct { - name string - input []Collection - expect []string - }{ - { - name: "empty", - input: []Collection{}, - expect: []string{}, - }, - { - name: "nil", - input: nil, - expect: []string{}, - }, - { - name: "single resource", - input: []Collection{toColl(t, "fnords")}, - expect: []string{"fnords"}, - }, - { - name: "multiple resource", - input: []Collection{toColl(t, "fnords"), toColl(t, "smarfs")}, - expect: []string{"fnords", "smarfs"}, - }, - { - name: "duplciate resources", - input: []Collection{toColl(t, "fnords"), toColl(t, "smarfs"), toColl(t, "fnords")}, - expect: []string{"fnords", "smarfs"}, - }, - } - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - rs := ResourceOwnerSet(test.input) - assert.ElementsMatch(t, test.expect, rs) - }) - } -} From 7f2a8735efdae9a821b5f288af029cbb85efd69b Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 7 Feb 2023 14:10:34 -0700 Subject: [PATCH 20/45] add fault to repository repository funcs (#2364) ## Description adds fault.Errors to backupDetails() and backups(). ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/cli/backup/exchange.go | 34 +++++++++----------- src/cli/backup/exchange_integration_test.go | 10 +++--- src/cli/backup/exchange_test.go | 6 ++-- src/cli/backup/onedrive.go | 34 +++++++++----------- src/cli/backup/onedrive_test.go | 6 ++-- src/cli/backup/sharepoint.go | 34 +++++++++----------- src/cli/backup/sharepoint_test.go | 6 ++-- src/cli/restore/exchange_integration_test.go | 6 ++-- src/cli/utils/testdata/opts.go | 14 +++++--- src/pkg/repository/repository.go | 26 ++++++++------- src/pkg/repository/repository_load_test.go | 8 +++-- 11 files changed, 93 insertions(+), 91 deletions(-) diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 1f6f6ae81..5a12dba87 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -16,7 +16,6 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" - "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -309,9 +308,10 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { bIDs = append(bIDs, bo.Results.BackupID) } - bups, err := r.Backups(ctx, bIDs) - if err != nil { - return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage")) + bups, ferrs := r.Backups(ctx, bIDs) + // TODO: print/log recoverable errors + if ferrs.Err() != nil { + return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage")) } backup.PrintAll(ctx, bups) @@ -471,10 +471,9 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error { defer utils.CloseRepo(ctx, r) - ds, errs := runDetailsExchangeCmd(ctx, r, backupID, opts) - if errs.Err() != nil { - // TODO: log/display iterated errors - return Only(ctx, errs.Err()) + ds, err := runDetailsExchangeCmd(ctx, r, backupID, opts) + if err != nil { + return Only(ctx, err) } if len(ds.Entries) == 0 { @@ -495,26 +494,25 @@ func runDetailsExchangeCmd( r repository.BackupGetter, backupID string, opts utils.ExchangeOpts, -) (*details.Details, *fault.Errors) { - errs := fault.New(false) - +) (*details.Details, error) { if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil { - return nil, errs.Fail(err) + return nil, err } - d, _, err := r.BackupDetails(ctx, backupID) - if err != nil { - if errors.Is(err, kopia.ErrNotFound) { - return nil, errs.Fail(errors.Errorf("No backup exists with the id %s", backupID)) + d, _, errs := r.BackupDetails(ctx, backupID) + // TODO: log/track recoverable errors + if errs.Err() != nil { + if errors.Is(errs.Err(), kopia.ErrNotFound) { + return nil, errors.Errorf("No backup exists with the id %s", backupID) } - return nil, errs.Fail(errors.Wrap(err, "Failed to get backup details in the repository")) + return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository") } sel := utils.IncludeExchangeRestoreDataSelectors(opts) utils.FilterExchangeRestoreInfoSelectors(sel, opts) - return sel.Reduce(ctx, d, errs), errs + return sel.Reduce(ctx, d, errs), nil } // ------------------------------------------------------------------------------------------------ diff --git a/src/cli/backup/exchange_integration_test.go b/src/cli/backup/exchange_integration_test.go index 4e8f63ef4..1b2a36466 100644 --- a/src/cli/backup/exchange_integration_test.go +++ b/src/cli/backup/exchange_integration_test.go @@ -296,8 +296,9 @@ func (suite *PreparedBackupExchangeIntegrationSuite) SetupSuite() { b, err := suite.repo.Backup(ctx, bop.Results.BackupID) require.NoError(t, err, "retrieving recent backup by ID") require.Equal(t, bIDs, string(b.ID), "repo backup matches results id") - _, b, err = suite.repo.BackupDetails(ctx, bIDs) - require.NoError(t, err, "retrieving recent backup details by ID") + _, b, errs := suite.repo.BackupDetails(ctx, bIDs) + require.NoError(t, errs.Err(), "retrieving recent backup details by ID") + require.Empty(t, errs.Errs(), "retrieving recent backup details by ID") require.Equal(t, bIDs, string(b.ID), "repo details matches results id") suite.backupOps[set] = string(b.ID) @@ -396,8 +397,9 @@ func (suite *PreparedBackupExchangeIntegrationSuite) TestExchangeDetailsCmd() { bID := suite.backupOps[set] // fetch the details from the repo first - deets, _, err := suite.repo.BackupDetails(ctx, string(bID)) - require.NoError(t, err) + deets, _, errs := suite.repo.BackupDetails(ctx, string(bID)) + require.NoError(t, errs.Err()) + require.Empty(t, errs.Errs()) cmd := tester.StubRootCmd( "backup", "details", "exchange", diff --git a/src/cli/backup/exchange_test.go b/src/cli/backup/exchange_test.go index c67a5c15b..40a1f9b2c 100644 --- a/src/cli/backup/exchange_test.go +++ b/src/cli/backup/exchange_test.go @@ -224,8 +224,7 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectors() { test.BackupGetter, "backup-ID", test.Opts) - assert.NoError(t, err.Err(), "failure") - assert.Empty(t, err.Errs(), "recovered errors") + assert.NoError(t, err, "failure") assert.ElementsMatch(t, test.Expected, output.Entries) }) } @@ -242,8 +241,7 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() { test.BackupGetter, "backup-ID", test.Opts) - assert.Error(t, err.Err(), "failure") - assert.Empty(t, err.Errs(), "recovered errors") + assert.Error(t, err, "failure") assert.Empty(t, output) }) } diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 517477661..2b60432ff 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -16,7 +16,6 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" - "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -232,9 +231,10 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { bIDs = append(bIDs, bo.Results.BackupID) } - bups, err := r.Backups(ctx, bIDs) - if err != nil { - return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage")) + bups, ferrs := r.Backups(ctx, bIDs) + // TODO: print/log recoverable errors + if ferrs.Err() != nil { + return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage")) } backup.PrintAll(ctx, bups) @@ -363,10 +363,9 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error { Populated: utils.GetPopulatedFlags(cmd), } - ds, errs := runDetailsOneDriveCmd(ctx, r, backupID, opts) - if errs.Err() != nil { - // TODO: log/display iterated errors - return Only(ctx, errs.Err()) + ds, err := runDetailsOneDriveCmd(ctx, r, backupID, opts) + if err != nil { + return Only(ctx, err) } if len(ds.Entries) == 0 { @@ -387,26 +386,25 @@ func runDetailsOneDriveCmd( r repository.BackupGetter, backupID string, opts utils.OneDriveOpts, -) (*details.Details, *fault.Errors) { - errs := fault.New(false) - +) (*details.Details, error) { if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil { - return nil, errs.Fail(err) + return nil, err } - d, _, err := r.BackupDetails(ctx, backupID) - if err != nil { - if errors.Is(err, kopia.ErrNotFound) { - return nil, errs.Fail(errors.Errorf("no backup exists with the id %s", backupID)) + d, _, errs := r.BackupDetails(ctx, backupID) + // TODO: log/track recoverable errors + if errs.Err() != nil { + if errors.Is(errs.Err(), kopia.ErrNotFound) { + return nil, errors.Errorf("no backup exists with the id %s", backupID) } - return nil, errs.Fail(errors.Wrap(err, "Failed to get backup details in the repository")) + return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository") } sel := utils.IncludeOneDriveRestoreDataSelectors(opts) utils.FilterOneDriveRestoreInfoSelectors(sel, opts) - return sel.Reduce(ctx, d, errs), errs + return sel.Reduce(ctx, d, errs), nil } // `corso backup delete onedrive [...]` diff --git a/src/cli/backup/onedrive_test.go b/src/cli/backup/onedrive_test.go index 7fb2a38e3..8dde53bd8 100644 --- a/src/cli/backup/onedrive_test.go +++ b/src/cli/backup/onedrive_test.go @@ -99,8 +99,7 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectors() { test.BackupGetter, "backup-ID", test.Opts) - assert.NoError(t, err.Err()) - assert.Empty(t, err.Errs()) + assert.NoError(t, err) assert.ElementsMatch(t, test.Expected, output.Entries) }) } @@ -117,8 +116,7 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectorsBadFormats() { test.BackupGetter, "backup-ID", test.Opts) - assert.Error(t, err.Err()) - assert.Empty(t, err.Errs()) + assert.Error(t, err) assert.Empty(t, output) }) } diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index e8d65752a..c2a155334 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -18,7 +18,6 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" - "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -252,9 +251,10 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { bIDs = append(bIDs, bo.Results.BackupID) } - bups, err := r.Backups(ctx, bIDs) - if err != nil { - return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage")) + bups, ferrs := r.Backups(ctx, bIDs) + // TODO: print/log recoverable errors + if ferrs.Err() != nil { + return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage")) } backup.PrintAll(ctx, bups) @@ -482,10 +482,9 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error { Populated: utils.GetPopulatedFlags(cmd), } - ds, errs := runDetailsSharePointCmd(ctx, r, backupID, opts) - if errs.Err() != nil { - // TODO: log/display iterated errors - return Only(ctx, errs.Err()) + ds, err := runDetailsSharePointCmd(ctx, r, backupID, opts) + if err != nil { + return Only(ctx, err) } if len(ds.Entries) == 0 { @@ -506,24 +505,23 @@ func runDetailsSharePointCmd( r repository.BackupGetter, backupID string, opts utils.SharePointOpts, -) (*details.Details, *fault.Errors) { - errs := fault.New(false) - +) (*details.Details, error) { if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil { - return nil, errs.Fail(err) + return nil, err } - d, _, err := r.BackupDetails(ctx, backupID) - if err != nil { - if errors.Is(err, kopia.ErrNotFound) { - return nil, errs.Fail(errors.Errorf("no backup exists with the id %s", backupID)) + d, _, errs := r.BackupDetails(ctx, backupID) + // TODO: log/track recoverable errors + if errs.Err() != nil { + if errors.Is(errs.Err(), kopia.ErrNotFound) { + return nil, errors.Errorf("no backup exists with the id %s", backupID) } - return nil, errs.Fail(errors.Wrap(err, "Failed to get backup details in the repository")) + return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository") } sel := utils.IncludeSharePointRestoreDataSelectors(opts) utils.FilterSharePointRestoreInfoSelectors(sel, opts) - return sel.Reduce(ctx, d, errs), errs + return sel.Reduce(ctx, d, errs), nil } diff --git a/src/cli/backup/sharepoint_test.go b/src/cli/backup/sharepoint_test.go index a46deeeff..e5b206ff6 100644 --- a/src/cli/backup/sharepoint_test.go +++ b/src/cli/backup/sharepoint_test.go @@ -214,8 +214,7 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectors() { test.BackupGetter, "backup-ID", test.Opts) - assert.NoError(t, err.Err()) - assert.Empty(t, err.Errs()) + assert.NoError(t, err) assert.ElementsMatch(t, test.Expected, output.Entries) }) } @@ -232,8 +231,7 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectorsBadFormats() { test.BackupGetter, "backup-ID", test.Opts) - assert.Error(t, err.Err()) - assert.Empty(t, err.Errs()) + assert.Error(t, err) assert.Empty(t, output) }) } diff --git a/src/cli/restore/exchange_integration_test.go b/src/cli/restore/exchange_integration_test.go index 23649b23b..4ef62e58b 100644 --- a/src/cli/restore/exchange_integration_test.go +++ b/src/cli/restore/exchange_integration_test.go @@ -110,8 +110,10 @@ func (suite *RestoreExchangeIntegrationSuite) SetupSuite() { // sanity check, ensure we can find the backup and its details immediately _, err = suite.repo.Backup(ctx, bop.Results.BackupID) require.NoError(t, err, "retrieving recent backup by ID") - _, _, err = suite.repo.BackupDetails(ctx, string(bop.Results.BackupID)) - require.NoError(t, err, "retrieving recent backup details by ID") + + _, _, errs := suite.repo.BackupDetails(ctx, string(bop.Results.BackupID)) + require.NoError(t, errs.Err(), "retrieving recent backup details by ID") + require.Empty(t, errs.Errs(), "retrieving recent backup details by ID") } } diff --git a/src/cli/utils/testdata/opts.go b/src/cli/utils/testdata/opts.go index 68a33911a..e958fc5c5 100644 --- a/src/cli/utils/testdata/opts.go +++ b/src/cli/utils/testdata/opts.go @@ -10,6 +10,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/selectors/testdata" "github.com/alcionai/corso/src/pkg/store" @@ -497,8 +498,11 @@ func (MockBackupGetter) Backup( return nil, errors.New("unexpected call to mock") } -func (MockBackupGetter) Backups(context.Context, []model.StableID) ([]*backup.Backup, error) { - return nil, errors.New("unexpected call to mock") +func (MockBackupGetter) Backups( + context.Context, + []model.StableID, +) ([]*backup.Backup, *fault.Errors) { + return nil, fault.New(false).Fail(errors.New("unexpected call to mock")) } func (MockBackupGetter) BackupsByTag( @@ -511,10 +515,10 @@ func (MockBackupGetter) BackupsByTag( func (bg *MockBackupGetter) BackupDetails( ctx context.Context, backupID string, -) (*details.Details, *backup.Backup, error) { +) (*details.Details, *backup.Backup, *fault.Errors) { if bg == nil { - return testdata.GetDetailsSet(), nil, nil + return testdata.GetDetailsSet(), nil, fault.New(true) } - return nil, nil, errors.New("unexpected call to mock") + return nil, nil, fault.New(false).Fail(errors.New("unexpected call to mock")) } diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index a8f1e2827..429b0e7a7 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -6,7 +6,6 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" - "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/events" @@ -19,6 +18,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/storage" @@ -31,12 +31,12 @@ var ErrorRepoAlreadyExists = errors.New("a repository was already initialized wi // repository. type BackupGetter interface { Backup(ctx context.Context, id model.StableID) (*backup.Backup, error) - Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, error) + Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Errors) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) BackupDetails( ctx context.Context, backupID string, - ) (*details.Details, *backup.Backup, error) + ) (*details.Details, *backup.Backup, *fault.Errors) } type Repository interface { @@ -282,23 +282,23 @@ func (r repository) Backup(ctx context.Context, id model.StableID) (*backup.Back // BackupsByID lists backups by ID. Returns as many backups as possible with // errors for the backups it was unable to retrieve. -func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, error) { +func (r repository) Backups(ctx context.Context, ids []model.StableID) ([]*backup.Backup, *fault.Errors) { var ( - errs *multierror.Error bups []*backup.Backup + errs = fault.New(false) sw = store.NewKopiaStore(r.modelStore) ) for _, id := range ids { b, err := sw.GetBackup(ctx, id) if err != nil { - errs = multierror.Append(errs, err) + errs.Add(clues.Stack(err).With("backup_id", id)) } bups = append(bups, b) } - return bups, errs.ErrorOrNil() + return bups, errs } // backups lists backups in a repository @@ -308,12 +308,16 @@ func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) } // BackupDetails returns the specified backup details object -func (r repository) BackupDetails(ctx context.Context, backupID string) (*details.Details, *backup.Backup, error) { +func (r repository) BackupDetails( + ctx context.Context, + backupID string, +) (*details.Details, *backup.Backup, *fault.Errors) { sw := store.NewKopiaStore(r.modelStore) + errs := fault.New(false) dID, b, err := sw.GetDetailsIDFromBackupID(ctx, model.StableID(backupID)) if err != nil { - return nil, nil, err + return nil, nil, errs.Fail(err) } deets, err := streamstore.New( @@ -321,10 +325,10 @@ func (r repository) BackupDetails(ctx context.Context, backupID string) (*detail r.Account.ID(), b.Selector.PathService()).ReadBackupDetails(ctx, dID) if err != nil { - return nil, nil, err + return nil, nil, errs.Fail(err) } - return deets, b, nil + return deets, b, errs } // DeleteBackup removes the backup from both the model store and the backup storage. diff --git a/src/pkg/repository/repository_load_test.go b/src/pkg/repository/repository_load_test.go index 36714df71..a080d916c 100644 --- a/src/pkg/repository/repository_load_test.go +++ b/src/pkg/repository/repository_load_test.go @@ -19,6 +19,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -241,17 +242,18 @@ func runBackupDetailsLoadTest( t.Run("backup_details_"+name, func(t *testing.T) { var ( - err error + errs *fault.Errors b *backup.Backup ds *details.Details labels = pprof.Labels("details_load_test", name) ) pprof.Do(ctx, labels, func(ctx context.Context) { - ds, b, err = r.BackupDetails(ctx, backupID) + ds, b, errs = r.BackupDetails(ctx, backupID) }) - require.NoError(t, err, "retrieving details in backup "+backupID) + require.NoError(t, errs.Err(), "retrieving details in backup "+backupID) + require.Empty(t, errs.Errs(), "retrieving details in backup "+backupID) require.NotNil(t, ds, "backup details must exist") require.NotNil(t, b, "backup must exist") From 373f0458a769efacb46c724c16fd80b6ad9274f7 Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Tue, 7 Feb 2023 14:15:48 -0800 Subject: [PATCH 21/45] Split collection interface (#2415) ## Description Split the collection interface into stuff used during backup and stuff used during restore. Does not change other code beyond fixing types ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * closes #1944 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- src/cmd/factory/impl/common.go | 4 +- src/internal/connector/data_collections.go | 12 +-- .../connector/exchange/data_collections.go | 14 +-- .../exchange/data_collections_test.go | 6 +- .../exchange/exchange_data_collection.go | 8 +- .../connector/exchange/service_iterators.go | 4 +- .../exchange/service_iterators_test.go | 6 +- .../connector/exchange/service_restore.go | 6 +- .../connector/graph/metadata_collection.go | 6 +- src/internal/connector/graph_connector.go | 2 +- .../connector/graph_connector_helper_test.go | 14 +-- .../connector/graph_connector_test.go | 12 +-- .../mockconnector/mock_data_collection.go | 11 ++- .../connector/mockconnector/mock_data_list.go | 4 +- src/internal/connector/onedrive/collection.go | 8 +- .../connector/onedrive/collections.go | 14 +-- .../connector/onedrive/collections_test.go | 4 +- src/internal/connector/onedrive/restore.go | 4 +- .../connector/sharepoint/collection.go | 8 +- .../connector/sharepoint/data_collections.go | 18 ++-- src/internal/connector/sharepoint/restore.go | 8 +- src/internal/data/data_collection.go | 18 ++-- src/internal/kopia/data_collection.go | 16 +--- src/internal/kopia/upload.go | 14 +-- src/internal/kopia/upload_test.go | 86 +++++++++---------- src/internal/kopia/wrapper.go | 6 +- src/internal/kopia/wrapper_test.go | 65 ++++++++++---- src/internal/operations/backup.go | 8 +- .../operations/backup_integration_test.go | 4 +- src/internal/operations/backup_test.go | 16 ++-- src/internal/operations/manifests.go | 6 +- src/internal/operations/manifests_test.go | 33 +++---- src/internal/operations/restore.go | 4 +- src/internal/operations/restore_test.go | 5 +- src/internal/streamstore/streamstore.go | 4 +- 35 files changed, 240 insertions(+), 218 deletions(-) diff --git a/src/cmd/factory/impl/common.go b/src/cmd/factory/impl/common.go index 78a5dca0e..2279c71a3 100644 --- a/src/cmd/factory/impl/common.go +++ b/src/cmd/factory/impl/common.go @@ -152,8 +152,8 @@ func buildCollections( tenant, user string, dest control.RestoreDestination, colls []collection, -) ([]data.Collection, error) { - collections := make([]data.Collection, 0, len(colls)) +) ([]data.RestoreCollection, error) { + collections := make([]data.RestoreCollection, 0, len(colls)) for _, c := range colls { pth, err := toDataLayerPath( diff --git a/src/internal/connector/data_collections.go b/src/internal/connector/data_collections.go index 410a05462..51beb4eb2 100644 --- a/src/internal/connector/data_collections.go +++ b/src/internal/connector/data_collections.go @@ -34,9 +34,9 @@ import ( func (gc *GraphConnector) DataCollections( ctx context.Context, sels selectors.Selector, - metadata []data.Collection, + metadata []data.RestoreCollection, ctrlOpts control.Options, -) ([]data.Collection, map[string]struct{}, error) { +) ([]data.BackupCollection, map[string]struct{}, error) { ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String())) defer end() @@ -51,7 +51,7 @@ func (gc *GraphConnector) DataCollections( } if !serviceEnabled { - return []data.Collection{}, nil, nil + return []data.BackupCollection{}, nil, nil } switch sels.Service { @@ -182,9 +182,9 @@ func (fm odFolderMatcher) Matches(dir string) bool { func (gc *GraphConnector) OneDriveDataCollections( ctx context.Context, selector selectors.Selector, - metadata []data.Collection, + metadata []data.RestoreCollection, ctrlOpts control.Options, -) ([]data.Collection, map[string]struct{}, error) { +) ([]data.BackupCollection, map[string]struct{}, error) { odb, err := selector.ToOneDriveBackup() if err != nil { return nil, nil, errors.Wrap(err, "oneDriveDataCollection: parsing selector") @@ -192,7 +192,7 @@ func (gc *GraphConnector) OneDriveDataCollections( var ( user = selector.DiscreteOwner - collections = []data.Collection{} + collections = []data.BackupCollection{} allExcludes = map[string]struct{}{} errs error ) diff --git a/src/internal/connector/exchange/data_collections.go b/src/internal/connector/exchange/data_collections.go index 41bc16301..92e826f3d 100644 --- a/src/internal/connector/exchange/data_collections.go +++ b/src/internal/connector/exchange/data_collections.go @@ -63,7 +63,7 @@ type DeltaPath struct { // and path lookup maps. func parseMetadataCollections( ctx context.Context, - colls []data.Collection, + colls []data.RestoreCollection, ) (CatDeltaPaths, error) { // cdp stores metadata cdp := CatDeltaPaths{ @@ -163,11 +163,11 @@ func parseMetadataCollections( func DataCollections( ctx context.Context, selector selectors.Selector, - metadata []data.Collection, + metadata []data.RestoreCollection, acct account.M365Config, su support.StatusUpdater, ctrlOpts control.Options, -) ([]data.Collection, map[string]struct{}, error) { +) ([]data.BackupCollection, map[string]struct{}, error) { eb, err := selector.ToExchangeBackup() if err != nil { return nil, nil, errors.Wrap(err, "exchangeDataCollection: parsing selector") @@ -175,7 +175,7 @@ func DataCollections( var ( user = selector.DiscreteOwner - collections = []data.Collection{} + collections = []data.BackupCollection{} errs error ) @@ -231,10 +231,10 @@ func createCollections( dps DeltaPaths, ctrlOpts control.Options, su support.StatusUpdater, -) ([]data.Collection, error) { +) ([]data.BackupCollection, error) { var ( errs *multierror.Error - allCollections = make([]data.Collection, 0) + allCollections = make([]data.BackupCollection, 0) ac = api.Client{Credentials: creds} category = scope.Category().PathType() ) @@ -245,7 +245,7 @@ func createCollections( } // Create collection of ExchangeDataCollection - collections := make(map[string]data.Collection) + collections := make(map[string]data.BackupCollection) qp := graph.QueryParams{ Category: category, diff --git a/src/internal/connector/exchange/data_collections_test.go b/src/internal/connector/exchange/data_collections_test.go index 07eef5e7a..42b24c9f3 100644 --- a/src/internal/connector/exchange/data_collections_test.go +++ b/src/internal/connector/exchange/data_collections_test.go @@ -174,7 +174,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { ) require.NoError(t, err) - cdps, err := parseMetadataCollections(ctx, []data.Collection{coll}) + cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{coll}) test.expectError(t, err) emails := cdps[path.EmailCategory] @@ -335,7 +335,7 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() { require.NoError(t, err) assert.Less(t, 1, len(collections), "retrieved metadata and data collections") - var metadata data.Collection + var metadata data.BackupCollection for _, coll := range collections { if coll.FullPath().Service() == path.ExchangeMetadataService { @@ -345,7 +345,7 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() { require.NotNil(t, metadata, "collections contains a metadata collection") - cdps, err := parseMetadataCollections(ctx, []data.Collection{metadata}) + cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{metadata}) require.NoError(t, err) dps := cdps[test.scope.Category().PathType()] diff --git a/src/internal/connector/exchange/exchange_data_collection.go b/src/internal/connector/exchange/exchange_data_collection.go index d53e3dbe9..ecb85521b 100644 --- a/src/internal/connector/exchange/exchange_data_collection.go +++ b/src/internal/connector/exchange/exchange_data_collection.go @@ -24,10 +24,10 @@ import ( ) var ( - _ data.Collection = &Collection{} - _ data.Stream = &Stream{} - _ data.StreamInfo = &Stream{} - _ data.StreamModTime = &Stream{} + _ data.BackupCollection = &Collection{} + _ data.Stream = &Stream{} + _ data.StreamInfo = &Stream{} + _ data.StreamModTime = &Stream{} ) const ( diff --git a/src/internal/connector/exchange/service_iterators.go b/src/internal/connector/exchange/service_iterators.go index b59f37877..d4b059664 100644 --- a/src/internal/connector/exchange/service_iterators.go +++ b/src/internal/connector/exchange/service_iterators.go @@ -25,14 +25,14 @@ type addedAndRemovedItemIDsGetter interface { // filterContainersAndFillCollections is a utility function // that places the M365 object ids belonging to specific directories -// into a Collection. Messages outside of those directories are omitted. +// into a BackupCollection. Messages outside of those directories are omitted. // @param collection is filled with during this function. // Supports all exchange applications: Contacts, Events, and Mail func filterContainersAndFillCollections( ctx context.Context, qp graph.QueryParams, getter addedAndRemovedItemIDsGetter, - collections map[string]data.Collection, + collections map[string]data.BackupCollection, statusUpdater support.StatusUpdater, resolver graph.ContainerResolver, scope selectors.ExchangeScope, diff --git a/src/internal/connector/exchange/service_iterators_test.go b/src/internal/connector/exchange/service_iterators_test.go index e1872b55c..1b54aa803 100644 --- a/src/internal/connector/exchange/service_iterators_test.go +++ b/src/internal/connector/exchange/service_iterators_test.go @@ -280,7 +280,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { ctx, flush := tester.NewContext() defer flush() - collections := map[string]data.Collection{} + collections := map[string]data.BackupCollection{} err := filterContainersAndFillCollections( ctx, @@ -433,7 +433,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea resolver = newMockResolver(container1) ) - collections := map[string]data.Collection{} + collections := map[string]data.BackupCollection{} err := filterContainersAndFillCollections( ctx, @@ -785,7 +785,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre ctx, flush := tester.NewContext() defer flush() - collections := map[string]data.Collection{} + collections := map[string]data.BackupCollection{} err := filterContainersAndFillCollections( ctx, diff --git a/src/internal/connector/exchange/service_restore.go b/src/internal/connector/exchange/service_restore.go index e6fa592f7..6da25450d 100644 --- a/src/internal/connector/exchange/service_restore.go +++ b/src/internal/connector/exchange/service_restore.go @@ -297,7 +297,7 @@ func SendMailToBackStore( return errs } -// RestoreExchangeDataCollections restores M365 objects in data.Collection to MSFT +// RestoreExchangeDataCollections restores M365 objects in data.RestoreCollection to MSFT // store through GraphAPI. // @param dest: container destination to M365 func RestoreExchangeDataCollections( @@ -305,7 +305,7 @@ func RestoreExchangeDataCollections( creds account.M365Config, gs graph.Servicer, dest control.RestoreDestination, - dcs []data.Collection, + dcs []data.RestoreCollection, deets *details.Builder, ) (*support.ConnectorOperationStatus, error) { var ( @@ -364,7 +364,7 @@ func RestoreExchangeDataCollections( func restoreCollection( ctx context.Context, gs graph.Servicer, - dc data.Collection, + dc data.RestoreCollection, folderID string, policy control.CollisionPolicy, deets *details.Builder, diff --git a/src/internal/connector/graph/metadata_collection.go b/src/internal/connector/graph/metadata_collection.go index 6036bfbe4..9506dce86 100644 --- a/src/internal/connector/graph/metadata_collection.go +++ b/src/internal/connector/graph/metadata_collection.go @@ -14,8 +14,8 @@ import ( ) var ( - _ data.Collection = &MetadataCollection{} - _ data.Stream = &MetadataItem{} + _ data.BackupCollection = &MetadataCollection{} + _ data.Stream = &MetadataItem{} ) // MetadataCollection in a simple collection that assumes all items to be @@ -67,7 +67,7 @@ func MakeMetadataCollection( cat path.CategoryType, metadata []MetadataCollectionEntry, statusUpdater support.StatusUpdater, -) (data.Collection, error) { +) (data.BackupCollection, error) { if len(metadata) == 0 { return nil, nil } diff --git a/src/internal/connector/graph_connector.go b/src/internal/connector/graph_connector.go index 5ef6ef6be..7b7b7a072 100644 --- a/src/internal/connector/graph_connector.go +++ b/src/internal/connector/graph_connector.go @@ -271,7 +271,7 @@ func (gc *GraphConnector) RestoreDataCollections( selector selectors.Selector, dest control.RestoreDestination, opts control.Options, - dcs []data.Collection, + dcs []data.RestoreCollection, ) (*details.Details, error) { ctx, end := D.Span(ctx, "connector:restore") defer end() diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index 138cd6439..299509f96 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -740,7 +740,7 @@ func compareItem( func checkHasCollections( t *testing.T, expected map[string]map[string][]byte, - got []data.Collection, + got []data.BackupCollection, ) { t.Helper() @@ -762,10 +762,10 @@ func checkCollections( t *testing.T, expectedItems int, expected map[string]map[string][]byte, - got []data.Collection, + got []data.BackupCollection, restorePermissions bool, ) int { - collectionsWithItems := []data.Collection{} + collectionsWithItems := []data.BackupCollection{} skipped := 0 gotItems := 0 @@ -950,8 +950,8 @@ func collectionsForInfo( tenant, user string, dest control.RestoreDestination, allInfo []colInfo, -) (int, int, []data.Collection, map[string]map[string][]byte) { - collections := make([]data.Collection, 0, len(allInfo)) +) (int, int, []data.RestoreCollection, map[string]map[string][]byte) { + collections := make([]data.RestoreCollection, 0, len(allInfo)) expectedData := make(map[string]map[string][]byte, len(allInfo)) totalItems := 0 kopiaEntries := 0 @@ -1002,8 +1002,8 @@ func collectionsForInfoVersion0( tenant, user string, dest control.RestoreDestination, allInfo []colInfo, -) (int, int, []data.Collection, map[string]map[string][]byte) { - collections := make([]data.Collection, 0, len(allInfo)) +) (int, int, []data.RestoreCollection, map[string]map[string][]byte) { + collections := make([]data.RestoreCollection, 0, len(allInfo)) expectedData := make(map[string]map[string][]byte, len(allInfo)) totalItems := 0 kopiaEntries := 0 diff --git a/src/internal/connector/graph_connector_test.go b/src/internal/connector/graph_connector_test.go index b3b55a15e..7a3d703a0 100644 --- a/src/internal/connector/graph_connector_test.go +++ b/src/internal/connector/graph_connector_test.go @@ -257,7 +257,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { dest := tester.DefaultTestRestoreDestination() table := []struct { name string - col []data.Collection + col []data.RestoreCollection sel selectors.Selector }{ { @@ -269,7 +269,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { }, { name: "ExchangeEmpty", - col: []data.Collection{}, + col: []data.RestoreCollection{}, sel: selectors.Selector{ Service: selectors.ServiceExchange, }, @@ -283,7 +283,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { }, { name: "OneDriveEmpty", - col: []data.Collection{}, + col: []data.RestoreCollection{}, sel: selectors.Selector{ Service: selectors.ServiceOneDrive, }, @@ -297,7 +297,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { }, { name: "SharePointEmpty", - col: []data.Collection{}, + col: []data.RestoreCollection{}, sel: selectors.Selector{ Service: selectors.ServiceSharePoint, }, @@ -370,7 +370,7 @@ func runRestoreBackupTest( opts control.Options, ) { var ( - collections []data.Collection + collections []data.RestoreCollection expectedData = map[string]map[string][]byte{} totalItems = 0 totalKopiaItems = 0 @@ -495,7 +495,7 @@ func runRestoreBackupTestVersion0( opts control.Options, ) { var ( - collections []data.Collection + collections []data.RestoreCollection expectedData = map[string]map[string][]byte{} totalItems = 0 totalKopiaItems = 0 diff --git a/src/internal/connector/mockconnector/mock_data_collection.go b/src/internal/connector/mockconnector/mock_data_collection.go index 6a6d806ff..8cd315d0d 100644 --- a/src/internal/connector/mockconnector/mock_data_collection.go +++ b/src/internal/connector/mockconnector/mock_data_collection.go @@ -27,10 +27,13 @@ type MockExchangeDataCollection struct { } var ( - _ data.Collection = &MockExchangeDataCollection{} - _ data.Stream = &MockExchangeData{} - _ data.StreamInfo = &MockExchangeData{} - _ data.StreamSize = &MockExchangeData{} + // Needs to implement both backup and restore interfaces so we can use it in + // integration tests. + _ data.BackupCollection = &MockExchangeDataCollection{} + _ data.RestoreCollection = &MockExchangeDataCollection{} + _ data.Stream = &MockExchangeData{} + _ data.StreamInfo = &MockExchangeData{} + _ data.StreamSize = &MockExchangeData{} ) // NewMockExchangeDataCollection creates an data collection that will return the specified number of diff --git a/src/internal/connector/mockconnector/mock_data_list.go b/src/internal/connector/mockconnector/mock_data_list.go index 2994dd275..78d37105b 100644 --- a/src/internal/connector/mockconnector/mock_data_list.go +++ b/src/internal/connector/mockconnector/mock_data_list.go @@ -14,8 +14,8 @@ import ( ) var ( - _ data.Stream = &MockListData{} - _ data.Collection = &MockListCollection{} + _ data.Stream = &MockListData{} + _ data.BackupCollection = &MockListCollection{} ) type MockListCollection struct { diff --git a/src/internal/connector/onedrive/collection.go b/src/internal/connector/onedrive/collection.go index e0a328ef7..4aff95ac1 100644 --- a/src/internal/connector/onedrive/collection.go +++ b/src/internal/connector/onedrive/collection.go @@ -42,10 +42,10 @@ const ( ) var ( - _ data.Collection = &Collection{} - _ data.Stream = &Item{} - _ data.StreamInfo = &Item{} - _ data.StreamModTime = &Item{} + _ data.BackupCollection = &Collection{} + _ data.Stream = &Item{} + _ data.StreamInfo = &Item{} + _ data.StreamModTime = &Item{} ) // Collection represents a set of OneDrive objects retrieved from M365 diff --git a/src/internal/connector/onedrive/collections.go b/src/internal/connector/onedrive/collections.go index 4388d7fd5..a951d682e 100644 --- a/src/internal/connector/onedrive/collections.go +++ b/src/internal/connector/onedrive/collections.go @@ -61,9 +61,9 @@ type Collections struct { ctrl control.Options - // collectionMap allows lookup of the data.Collection + // collectionMap allows lookup of the data.BackupCollection // for a OneDrive folder - CollectionMap map[string]data.Collection + CollectionMap map[string]data.BackupCollection // Not the most ideal, but allows us to change the pager function for testing // as needed. This will allow us to mock out some scenarios during testing. @@ -100,7 +100,7 @@ func NewCollections( resourceOwner: resourceOwner, source: source, matcher: matcher, - CollectionMap: map[string]data.Collection{}, + CollectionMap: map[string]data.BackupCollection{}, drivePagerFunc: PagerForSource, itemPagerFunc: defaultItemPager, service: service, @@ -111,7 +111,7 @@ func NewCollections( func deserializeMetadata( ctx context.Context, - cols []data.Collection, + cols []data.RestoreCollection, ) (map[string]string, map[string]map[string]string, error) { logger.Ctx(ctx).Infow( "deserialzing previous backup metadata", @@ -249,8 +249,8 @@ func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro // be excluded from the upcoming backup. func (c *Collections) Get( ctx context.Context, - prevMetadata []data.Collection, -) ([]data.Collection, map[string]struct{}, error) { + prevMetadata []data.RestoreCollection, +) ([]data.BackupCollection, map[string]struct{}, error) { prevDeltas, _, err := deserializeMetadata(ctx, prevMetadata) if err != nil { return nil, nil, err @@ -327,7 +327,7 @@ func (c *Collections) Get( observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items to backup", c.NumItems))) // Add an extra for the metadata collection. - collections := make([]data.Collection, 0, len(c.CollectionMap)+1) + collections := make([]data.BackupCollection, 0, len(c.CollectionMap)+1) for _, coll := range c.CollectionMap { collections = append(collections, coll) } diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index ec7b53442..1fae8ee9a 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -983,7 +983,7 @@ func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() { ctx, flush := tester.NewContext() defer flush() - cols := []data.Collection{} + cols := []data.RestoreCollection{} for _, c := range test.cols { mc, err := graph.MakeMetadataCollection( @@ -1529,7 +1529,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() { for _, baseCol := range cols { folderPath := baseCol.FullPath().String() if folderPath == metadataPath.String() { - deltas, paths, err := deserializeMetadata(ctx, []data.Collection{baseCol}) + deltas, paths, err := deserializeMetadata(ctx, []data.RestoreCollection{baseCol}) if !assert.NoError(t, err, "deserializing metadata") { continue } diff --git a/src/internal/connector/onedrive/restore.go b/src/internal/connector/onedrive/restore.go index 0014457c4..e2029f4cc 100644 --- a/src/internal/connector/onedrive/restore.go +++ b/src/internal/connector/onedrive/restore.go @@ -64,7 +64,7 @@ func RestoreCollections( service graph.Servicer, dest control.RestoreDestination, opts control.Options, - dcs []data.Collection, + dcs []data.RestoreCollection, deets *details.Builder, ) (*support.ConnectorOperationStatus, error) { var ( @@ -148,7 +148,7 @@ func RestoreCollection( ctx context.Context, backupVersion int, service graph.Servicer, - dc data.Collection, + dc data.RestoreCollection, parentPerms []UserPermission, source driveSource, restoreContainerName string, diff --git a/src/internal/connector/sharepoint/collection.go b/src/internal/connector/sharepoint/collection.go index 603edd685..b657c294e 100644 --- a/src/internal/connector/sharepoint/collection.go +++ b/src/internal/connector/sharepoint/collection.go @@ -30,10 +30,10 @@ const ( ) var ( - _ data.Collection = &Collection{} - _ data.Stream = &Item{} - _ data.StreamInfo = &Item{} - _ data.StreamModTime = &Item{} + _ data.BackupCollection = &Collection{} + _ data.Stream = &Item{} + _ data.StreamInfo = &Item{} + _ data.StreamModTime = &Item{} ) // Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported diff --git a/src/internal/connector/sharepoint/data_collections.go b/src/internal/connector/sharepoint/data_collections.go index 88e16882c..adb8a215e 100644 --- a/src/internal/connector/sharepoint/data_collections.go +++ b/src/internal/connector/sharepoint/data_collections.go @@ -34,7 +34,7 @@ func DataCollections( serv graph.Servicer, su statusUpdater, ctrlOpts control.Options, -) ([]data.Collection, map[string]struct{}, error) { +) ([]data.BackupCollection, map[string]struct{}, error) { b, err := selector.ToSharePointBackup() if err != nil { return nil, nil, errors.Wrap(err, "sharePointDataCollection: parsing selector") @@ -42,7 +42,7 @@ func DataCollections( var ( site = b.DiscreteOwner - collections = []data.Collection{} + collections = []data.BackupCollection{} errs error ) @@ -54,7 +54,7 @@ func DataCollections( defer closer() defer close(foldersComplete) - var spcs []data.Collection + var spcs []data.BackupCollection switch scope.Category().PathType() { case path.ListsCategory: @@ -97,10 +97,10 @@ func collectLists( tenantID, siteID string, updater statusUpdater, ctrlOpts control.Options, -) ([]data.Collection, error) { +) ([]data.BackupCollection, error) { logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections") - spcs := make([]data.Collection, 0) + spcs := make([]data.BackupCollection, 0) tuples, err := preFetchLists(ctx, serv, siteID) if err != nil { @@ -137,9 +137,9 @@ func collectLibraries( scope selectors.SharePointScope, updater statusUpdater, ctrlOpts control.Options, -) ([]data.Collection, map[string]struct{}, error) { +) ([]data.BackupCollection, map[string]struct{}, error) { var ( - collections = []data.Collection{} + collections = []data.BackupCollection{} errs error ) @@ -175,10 +175,10 @@ func collectPages( scope selectors.SharePointScope, updater statusUpdater, ctrlOpts control.Options, -) ([]data.Collection, error) { +) ([]data.BackupCollection, error) { logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Pages collections") - spcs := make([]data.Collection, 0) + spcs := make([]data.BackupCollection, 0) // make the betaClient adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret) diff --git a/src/internal/connector/sharepoint/restore.go b/src/internal/connector/sharepoint/restore.go index 10cf125e7..c2c92249f 100644 --- a/src/internal/connector/sharepoint/restore.go +++ b/src/internal/connector/sharepoint/restore.go @@ -30,7 +30,7 @@ import ( // -- Switch: // ---- Libraries restored via the same workflow as oneDrive // ---- Lists call RestoreCollection() -// ----> for each data.Stream within Collection.Items() +// ----> for each data.Stream within RestoreCollection.Items() // ----> restoreListItems() is called // Restored List can be found in the Site's `Site content` page // Restored Libraries can be found within the Site's `Pages` page @@ -43,7 +43,7 @@ func RestoreCollections( creds account.M365Config, service graph.Servicer, dest control.RestoreDestination, - dcs []data.Collection, + dcs []data.RestoreCollection, deets *details.Builder, ) (*support.ConnectorOperationStatus, error) { var ( @@ -219,7 +219,7 @@ func restoreListItem( func RestoreListCollection( ctx context.Context, service graph.Servicer, - dc data.Collection, + dc data.RestoreCollection, restoreContainerName string, deets *details.Builder, errUpdater func(string, error), @@ -291,7 +291,7 @@ func RestoreListCollection( func RestorePageCollection( ctx context.Context, creds account.M365Config, - dc data.Collection, + dc data.RestoreCollection, restoreContainerName string, deets *details.Builder, errUpdater func(string, error), diff --git a/src/internal/data/data_collection.go b/src/internal/data/data_collection.go index 8f5fb67cf..840268169 100644 --- a/src/internal/data/data_collection.go +++ b/src/internal/data/data_collection.go @@ -21,8 +21,8 @@ const ( DeletedState ) -// A Collection represents a compilation of data from the -// same type application (e.g. mail) +// A Collection represents the set of data within a single logical location +// denoted by FullPath. type Collection interface { // Items returns a channel from which items in the collection can be read. // Each returned struct contains the next item in the collection @@ -30,10 +30,13 @@ type Collection interface { // an unrecoverable error caused an early termination in the sender. Items() <-chan Stream // FullPath returns a path struct that acts as a metadata tag for this - // DataCollection. Returned items should be ordered from most generic to least - // generic. For example, a DataCollection for emails from a specific user - // would be {"", "exchange", "", "emails"}. + // Collection. FullPath() path.Path +} + +// BackupCollection is an extension of Collection that is used during backups. +type BackupCollection interface { + Collection // PreviousPath returns the path.Path this collection used to reside at // (according to the M365 ID for the container) if the collection was moved or // renamed. Returns nil if the collection is new. @@ -58,6 +61,11 @@ type Collection interface { DoNotMergeItems() bool } +// RestoreCollection is an extension of Collection that is used during restores. +type RestoreCollection interface { + Collection +} + // Stream represents a single item within a Collection // that can be consumed as a stream (it embeds io.Reader) type Stream interface { diff --git a/src/internal/kopia/data_collection.go b/src/internal/kopia/data_collection.go index 4197f754c..262ebd849 100644 --- a/src/internal/kopia/data_collection.go +++ b/src/internal/kopia/data_collection.go @@ -8,8 +8,8 @@ import ( ) var ( - _ data.Collection = &kopiaDataCollection{} - _ data.Stream = &kopiaDataStream{} + _ data.RestoreCollection = &kopiaDataCollection{} + _ data.Stream = &kopiaDataStream{} ) type kopiaDataCollection struct { @@ -35,18 +35,6 @@ func (kdc kopiaDataCollection) FullPath() path.Path { return kdc.path } -func (kdc kopiaDataCollection) PreviousPath() path.Path { - return nil -} - -func (kdc kopiaDataCollection) State() data.CollectionState { - return data.NewState -} - -func (kdc kopiaDataCollection) DoNotMergeItems() bool { - return false -} - type kopiaDataStream struct { reader io.ReadCloser uuid string diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index 8ddb46978..7ada05013 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -254,7 +254,7 @@ func (cp *corsoProgress) get(k string) *itemDetails { func collectionEntries( ctx context.Context, cb func(context.Context, fs.Entry) error, - streamedEnts data.Collection, + streamedEnts data.BackupCollection, progress *corsoProgress, ) (map[string]struct{}, *multierror.Error) { if streamedEnts == nil { @@ -442,7 +442,7 @@ func getStreamItemFunc( curPath path.Path, prevPath path.Path, staticEnts []fs.Entry, - streamedEnts data.Collection, + streamedEnts data.BackupCollection, baseDir fs.Directory, globalExcludeSet map[string]struct{}, progress *corsoProgress, @@ -540,7 +540,7 @@ type treeMap struct { childDirs map[string]*treeMap // Reference to data pulled from the external service. Contains only items in // this directory. Does not contain references to subdirectories. - collection data.Collection + collection data.BackupCollection // Reference to directory in base snapshot. The referenced directory itself // may contain files and subdirectories, but the subdirectories should // eventually be added when walking the base snapshot to build the hierarchy, @@ -617,7 +617,7 @@ func getTreeNode(roots map[string]*treeMap, pathElements []string) *treeMap { func inflateCollectionTree( ctx context.Context, - collections []data.Collection, + collections []data.BackupCollection, ) (map[string]*treeMap, map[string]path.Path, error) { roots := make(map[string]*treeMap) // Contains the old path for collections that have been moved or renamed. @@ -911,13 +911,13 @@ func inflateBaseTree( // exclude from base directories when uploading the snapshot. As items in *all* // base directories will be checked for in every base directory, this assumes // that items in the bases are unique. Deletions of directories or subtrees -// should be represented as changes in the status of a Collection, not an entry -// in the globalExcludeSet. +// should be represented as changes in the status of a BackupCollection, not an +// entry in the globalExcludeSet. func inflateDirTree( ctx context.Context, loader snapshotLoader, baseSnaps []IncrementalBase, - collections []data.Collection, + collections []data.BackupCollection, globalExcludeSet map[string]struct{}, progress *corsoProgress, ) (fs.Directory, error) { diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index 1cc4daf47..55877db17 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -683,7 +683,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() { progress := &corsoProgress{pending: map[string]*itemDetails{}} - collections := []data.Collection{ + collections := []data.BackupCollection{ mockconnector.NewMockExchangeCollection( suite.testPath, expectedFileCount[user1Encoded], @@ -759,11 +759,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory() // - 42 separate files table := []struct { name string - layout []data.Collection + layout []data.BackupCollection }{ { name: "SubdirFirst", - layout: []data.Collection{ + layout: []data.BackupCollection{ mockconnector.NewMockExchangeCollection( p2, 5, @@ -776,7 +776,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory() }, { name: "SubdirLast", - layout: []data.Collection{ + layout: []data.BackupCollection{ mockconnector.NewMockExchangeCollection( suite.testPath, 42, @@ -845,7 +845,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() { table := []struct { name string - layout []data.Collection + layout []data.BackupCollection }{ { "MultipleRoots", @@ -862,7 +862,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() { // - emails // - Inbox // - 42 separate files - []data.Collection{ + []data.BackupCollection{ mockconnector.NewMockExchangeCollection( suite.testPath, 5, @@ -875,7 +875,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() { }, { "NoCollectionPath", - []data.Collection{ + []data.BackupCollection{ mockconnector.NewMockExchangeCollection( nil, 5, @@ -973,7 +973,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() { progress := &corsoProgress{pending: map[string]*itemDetails{}} - cols := []data.Collection{} + cols := []data.BackupCollection{} for _, s := range test.states { prevPath := dirPath nowPath := dirPath @@ -1037,17 +1037,17 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { table := []struct { name string - inputCollections func() []data.Collection + inputCollections func() []data.BackupCollection expected *expectedNode }{ { name: "SkipsDeletedItems", - inputCollections: func() []data.Collection { + inputCollections: func() []data.BackupCollection { mc := mockconnector.NewMockExchangeCollection(dirPath, 1) mc.Names[0] = testFileName mc.DeletedItems[0] = true - return []data.Collection{mc} + return []data.BackupCollection{mc} }, expected: expectedTreeWithChildren( []string{ @@ -1066,13 +1066,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { }, { name: "AddsNewItems", - inputCollections: func() []data.Collection { + inputCollections: func() []data.BackupCollection { mc := mockconnector.NewMockExchangeCollection(dirPath, 1) mc.Names[0] = testFileName2 mc.Data[0] = testFileData2 mc.ColState = data.NotMovedState - return []data.Collection{mc} + return []data.BackupCollection{mc} }, expected: expectedTreeWithChildren( []string{ @@ -1101,13 +1101,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { }, { name: "SkipsUpdatedItems", - inputCollections: func() []data.Collection { + inputCollections: func() []data.BackupCollection { mc := mockconnector.NewMockExchangeCollection(dirPath, 1) mc.Names[0] = testFileName mc.Data[0] = testFileData2 mc.ColState = data.NotMovedState - return []data.Collection{mc} + return []data.BackupCollection{mc} }, expected: expectedTreeWithChildren( []string{ @@ -1132,7 +1132,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { }, { name: "DeleteAndNew", - inputCollections: func() []data.Collection { + inputCollections: func() []data.BackupCollection { mc1 := mockconnector.NewMockExchangeCollection(dirPath, 0) mc1.ColState = data.DeletedState mc1.PrevPath = dirPath @@ -1142,7 +1142,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { mc2.Names[0] = testFileName2 mc2.Data[0] = testFileData2 - return []data.Collection{mc1, mc2} + return []data.BackupCollection{mc1, mc2} }, expected: expectedTreeWithChildren( []string{ @@ -1167,7 +1167,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { }, { name: "MovedAndNew", - inputCollections: func() []data.Collection { + inputCollections: func() []data.BackupCollection { mc1 := mockconnector.NewMockExchangeCollection(dirPath2, 0) mc1.ColState = data.MovedState mc1.PrevPath = dirPath @@ -1177,7 +1177,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { mc2.Names[0] = testFileName2 mc2.Data[0] = testFileData2 - return []data.Collection{mc1, mc2} + return []data.BackupCollection{mc1, mc2} }, expected: expectedTreeWithChildren( []string{ @@ -1211,13 +1211,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { }, { name: "NewDoesntMerge", - inputCollections: func() []data.Collection { + inputCollections: func() []data.BackupCollection { mc1 := mockconnector.NewMockExchangeCollection(dirPath, 1) mc1.ColState = data.NewState mc1.Names[0] = testFileName2 mc1.Data[0] = testFileData2 - return []data.Collection{mc1} + return []data.BackupCollection{mc1} }, expected: expectedTreeWithChildren( []string{ @@ -1369,13 +1369,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto table := []struct { name string - inputCollections func(t *testing.T) []data.Collection + inputCollections func(t *testing.T) []data.BackupCollection inputExcludes map[string]struct{} expected *expectedNode }{ { name: "GlobalExcludeSet", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { return nil }, inputExcludes: map[string]struct{}{ @@ -1417,7 +1417,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "MovesSubtree", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { newPath := makePath( t, []string{testTenant, service, testUser, category, testInboxDir + "2"}, @@ -1428,7 +1428,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto mc.PrevPath = inboxPath mc.ColState = data.MovedState - return []data.Collection{mc} + return []data.BackupCollection{mc} }, expected: expectedTreeWithChildren( []string{ @@ -1474,7 +1474,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "MovesChildAfterAncestorMove", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { newInboxPath := makePath( t, []string{testTenant, service, testUser, category, testInboxDir + "2"}, @@ -1494,7 +1494,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto work.PrevPath = workPath work.ColState = data.MovedState - return []data.Collection{inbox, work} + return []data.BackupCollection{inbox, work} }, expected: expectedTreeWithChildren( []string{ @@ -1540,7 +1540,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "MovesChildAfterAncestorDelete", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { newWorkPath := makePath( t, []string{testTenant, service, testUser, category, workDir}, @@ -1555,7 +1555,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto work.PrevPath = workPath work.ColState = data.MovedState - return []data.Collection{inbox, work} + return []data.BackupCollection{inbox, work} }, expected: expectedTreeWithChildren( []string{ @@ -1579,7 +1579,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "ReplaceDeletedDirectory", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { personal := mockconnector.NewMockExchangeCollection(personalPath, 0) personal.PrevPath = personalPath personal.ColState = data.DeletedState @@ -1588,7 +1588,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto work.PrevPath = workPath work.ColState = data.MovedState - return []data.Collection{personal, work} + return []data.BackupCollection{personal, work} }, expected: expectedTreeWithChildren( []string{ @@ -1620,7 +1620,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "ReplaceDeletedDirectoryWithNew", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { personal := mockconnector.NewMockExchangeCollection(personalPath, 0) personal.PrevPath = personalPath personal.ColState = data.DeletedState @@ -1630,7 +1630,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto newCol.Names[0] = workFileName2 newCol.Data[0] = workFileData2 - return []data.Collection{personal, newCol} + return []data.BackupCollection{personal, newCol} }, expected: expectedTreeWithChildren( []string{ @@ -1671,7 +1671,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "ReplaceMovedDirectory", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { newPersonalPath := makePath( t, []string{testTenant, service, testUser, category, personalDir}, @@ -1686,7 +1686,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto work.PrevPath = workPath work.ColState = data.MovedState - return []data.Collection{personal, work} + return []data.BackupCollection{personal, work} }, expected: expectedTreeWithChildren( []string{ @@ -1729,7 +1729,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "MoveDirectoryAndMergeItems", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { newPersonalPath := makePath( t, []string{testTenant, service, testUser, category, workDir}, @@ -1744,7 +1744,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto personal.Names[1] = testFileName4 personal.Data[1] = testFileData4 - return []data.Collection{personal} + return []data.BackupCollection{personal} }, expected: expectedTreeWithChildren( []string{ @@ -1793,7 +1793,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "MoveParentDeleteFileNoMergeSubtreeMerge", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { newInboxPath := makePath( t, []string{testTenant, service, testUser, category, personalDir}, @@ -1824,7 +1824,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto work.Names[0] = testFileName6 work.Data[0] = testFileData6 - return []data.Collection{inbox, work} + return []data.BackupCollection{inbox, work} }, expected: expectedTreeWithChildren( []string{ @@ -1876,7 +1876,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto }, { name: "NoMoveParentDeleteFileNoMergeSubtreeMerge", - inputCollections: func(t *testing.T) []data.Collection { + inputCollections: func(t *testing.T) []data.BackupCollection { inbox := mockconnector.NewMockExchangeCollection(inboxPath, 1) inbox.PrevPath = inboxPath inbox.ColState = data.NotMovedState @@ -1892,7 +1892,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto work.Names[0] = testFileName6 work.Data[0] = testFileData6 - return []data.Collection{inbox, work} + return []data.BackupCollection{inbox, work} }, expected: expectedTreeWithChildren( []string{ @@ -2105,7 +2105,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre snapshotRoot: getBaseSnapshot(), } - collections := []data.Collection{mc} + collections := []data.BackupCollection{mc} // Returned directory structure should look like: // - a-tenant @@ -2361,7 +2361,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt }, } - collections := []data.Collection{mc} + collections := []data.BackupCollection{mc} dirTree, err := inflateDirTree( ctx, diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index 8c1aaeec7..d78e874c6 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -118,7 +118,7 @@ type IncrementalBase struct { func (w Wrapper) BackupCollections( ctx context.Context, previousSnapshots []IncrementalBase, - collections []data.Collection, + collections []data.BackupCollection, globalExcludeSet map[string]struct{}, tags map[string]string, buildTreeWithBase bool, @@ -368,7 +368,7 @@ func (w Wrapper) RestoreMultipleItems( snapshotID string, paths []path.Path, bcounter ByteCounter, -) ([]data.Collection, error) { +) ([]data.RestoreCollection, error) { ctx, end := D.Span(ctx, "kopia:restoreMultipleItems") defer end() @@ -409,7 +409,7 @@ func (w Wrapper) RestoreMultipleItems( c.streams = append(c.streams, ds) } - res := make([]data.Collection, 0, len(cols)) + res := make([]data.RestoreCollection, 0, len(cols)) for _, c := range cols { res = append(res, c) } diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 54bbb4c8e..4761adc33 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -52,7 +52,7 @@ var ( func testForFiles( t *testing.T, expected map[string][]byte, - collections []data.Collection, + collections []data.RestoreCollection, ) { t.Helper() @@ -196,7 +196,7 @@ func (suite *KopiaIntegrationSuite) TearDownTest() { } func (suite *KopiaIntegrationSuite) TestBackupCollections() { - collections := []data.Collection{ + collections := []data.BackupCollection{ mockconnector.NewMockExchangeCollection( suite.testPath1, 5, @@ -353,7 +353,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { stats, _, _, err := w.BackupCollections( ctx, nil, - []data.Collection{dc1, dc2}, + []data.BackupCollection{dc1, dc2}, nil, tags, true, @@ -382,6 +382,41 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { testForFiles(t, expected, result) } +type mockBackupCollection struct { + path path.Path + streams []data.Stream +} + +func (c *mockBackupCollection) Items() <-chan data.Stream { + res := make(chan data.Stream) + + go func() { + defer close(res) + + for _, s := range c.streams { + res <- s + } + }() + + return res +} + +func (c mockBackupCollection) FullPath() path.Path { + return c.path +} + +func (c mockBackupCollection) PreviousPath() path.Path { + return nil +} + +func (c mockBackupCollection) State() data.CollectionState { + return data.NewState +} + +func (c mockBackupCollection) DoNotMergeItems() bool { + return false +} + func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { t := suite.T() @@ -396,8 +431,8 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { tags[k] = "" } - collections := []data.Collection{ - &kopiaDataCollection{ + collections := []data.BackupCollection{ + &mockBackupCollection{ path: suite.testPath1, streams: []data.Stream{ &mockconnector.MockExchangeData{ @@ -410,7 +445,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { }, }, }, - &kopiaDataCollection{ + &mockBackupCollection{ path: suite.testPath2, streams: []data.Stream{ &mockconnector.MockExchangeData{ @@ -477,7 +512,7 @@ type backedupFile struct { func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() { table := []struct { name string - collections []data.Collection + collections []data.BackupCollection }{ { name: "NilCollections", @@ -485,7 +520,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() }, { name: "EmptyCollections", - collections: []data.Collection{}, + collections: []data.BackupCollection{}, }, } @@ -624,10 +659,10 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { suite.w = &Wrapper{c} - collections := []data.Collection{} + collections := []data.BackupCollection{} for _, parent := range []path.Path{suite.testPath1, suite.testPath2} { - collection := &kopiaDataCollection{path: parent} + collection := &mockBackupCollection{path: parent} for _, item := range suite.files[parent.String()] { collection.streams = append( @@ -723,7 +758,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludeItem bool expectedCachedItems int expectedUncachedItems int - cols func() []data.Collection + cols func() []data.BackupCollection backupIDCheck require.ValueAssertionFunc restoreCheck assert.ErrorAssertionFunc }{ @@ -732,7 +767,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excludeItem: true, expectedCachedItems: len(suite.filesByPath) - 1, expectedUncachedItems: 0, - cols: func() []data.Collection { + cols: func() []data.BackupCollection { return nil }, backupIDCheck: require.NotEmpty, @@ -743,7 +778,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { // No snapshot should be made since there were no changes. expectedCachedItems: 0, expectedUncachedItems: 0, - cols: func() []data.Collection { + cols: func() []data.BackupCollection { return nil }, // Backup doesn't run. @@ -753,14 +788,14 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { name: "NoExcludeItemWithChanges", expectedCachedItems: len(suite.filesByPath), expectedUncachedItems: 1, - cols: func() []data.Collection { + cols: func() []data.BackupCollection { c := mockconnector.NewMockExchangeCollection( suite.testPath1, 1, ) c.ColState = data.NotMovedState - return []data.Collection{c} + return []data.BackupCollection{c} }, backupIDCheck: require.NotEmpty, restoreCheck: assert.NoError, diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 31912585f..a3fde4cb8 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -308,9 +308,9 @@ func produceBackupDataCollections( ctx context.Context, gc *connector.GraphConnector, sel selectors.Selector, - metadata []data.Collection, + metadata []data.RestoreCollection, ctrlOpts control.Options, -) ([]data.Collection, map[string]struct{}, error) { +) ([]data.BackupCollection, map[string]struct{}, error) { complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup")) defer func() { complete <- struct{}{} @@ -331,7 +331,7 @@ type backuper interface { BackupCollections( ctx context.Context, bases []kopia.IncrementalBase, - cs []data.Collection, + cs []data.BackupCollection, excluded map[string]struct{}, tags map[string]string, buildTreeWithBase bool, @@ -389,7 +389,7 @@ func consumeBackupDataCollections( tenantID string, reasons []kopia.Reason, mans []*kopia.ManifestEntry, - cs []data.Collection, + cs []data.BackupCollection, excludes map[string]struct{}, backupID model.StableID, isIncremental bool, diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index b3ea617d9..277e5a40d 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -387,10 +387,10 @@ func buildCollections( tenant, user string, dest control.RestoreDestination, colls []incrementalCollection, -) []data.Collection { +) []data.RestoreCollection { t.Helper() - collections := make([]data.Collection, 0, len(colls)) + collections := make([]data.RestoreCollection, 0, len(colls)) for _, c := range colls { pth := toDataLayerPath( diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index 4adc70b30..6e9afda8a 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -36,20 +36,20 @@ import ( type mockRestorer struct { gotPaths []path.Path - colls []data.Collection - collsByID map[string][]data.Collection // snapshotID: []Collection + colls []data.RestoreCollection + collsByID map[string][]data.RestoreCollection // snapshotID: []RestoreCollection err error onRestore restoreFunc } -type restoreFunc func(id string, ps []path.Path) ([]data.Collection, error) +type restoreFunc func(id string, ps []path.Path) ([]data.RestoreCollection, error) func (mr *mockRestorer) buildRestoreFunc( t *testing.T, oid string, ops []path.Path, ) { - mr.onRestore = func(id string, ps []path.Path) ([]data.Collection, error) { + mr.onRestore = func(id string, ps []path.Path) ([]data.RestoreCollection, error) { assert.Equal(t, oid, id, "manifest id") checkPaths(t, ops, ps) @@ -62,7 +62,7 @@ func (mr *mockRestorer) RestoreMultipleItems( snapshotID string, paths []path.Path, bc kopia.ByteCounter, -) ([]data.Collection, error) { +) ([]data.RestoreCollection, error) { mr.gotPaths = append(mr.gotPaths, paths...) if mr.onRestore != nil { @@ -85,7 +85,7 @@ func checkPaths(t *testing.T, expected, got []path.Path) { type mockBackuper struct { checkFunc func( bases []kopia.IncrementalBase, - cs []data.Collection, + cs []data.BackupCollection, tags map[string]string, buildTreeWithBase bool, ) @@ -94,7 +94,7 @@ type mockBackuper struct { func (mbu mockBackuper) BackupCollections( ctx context.Context, bases []kopia.IncrementalBase, - cs []data.Collection, + cs []data.BackupCollection, excluded map[string]struct{}, tags map[string]string, buildTreeWithBase bool, @@ -559,7 +559,7 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat mbu := &mockBackuper{ checkFunc: func( bases []kopia.IncrementalBase, - cs []data.Collection, + cs []data.BackupCollection, tags map[string]string, buildTreeWithBase bool, ) { diff --git a/src/internal/operations/manifests.go b/src/internal/operations/manifests.go index c0ba35e43..dcfb415c9 100644 --- a/src/internal/operations/manifests.go +++ b/src/internal/operations/manifests.go @@ -46,10 +46,10 @@ func produceManifestsAndMetadata( tenantID string, getMetadata bool, errs fault.Adder, -) ([]*kopia.ManifestEntry, []data.Collection, bool, error) { +) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) { var ( metadataFiles = graph.AllMetadataFileNames() - collections []data.Collection + collections []data.RestoreCollection ) ms, err := mr.FetchPrevSnapshotManifests( @@ -183,7 +183,7 @@ func collectMetadata( man *kopia.ManifestEntry, fileNames []string, tenantID string, -) ([]data.Collection, error) { +) ([]data.RestoreCollection, error) { paths := []path.Path{} for _, fn := range fileNames { diff --git a/src/internal/operations/manifests_test.go b/src/internal/operations/manifests_test.go index 93cdb982f..e1be7df54 100644 --- a/src/internal/operations/manifests_test.go +++ b/src/internal/operations/manifests_test.go @@ -49,9 +49,8 @@ func (mg mockGetDetailsIDer) GetDetailsIDFromBackupID( } type mockColl struct { - id string // for comparisons - p path.Path - prevP path.Path + id string // for comparisons + p path.Path } func (mc mockColl) Items() <-chan data.Stream { @@ -62,18 +61,6 @@ func (mc mockColl) FullPath() path.Path { return mc.p } -func (mc mockColl) PreviousPath() path.Path { - return mc.prevP -} - -func (mc mockColl) State() data.CollectionState { - return data.NewState -} - -func (mc mockColl) DoNotMergeItems() bool { - return false -} - // --------------------------------------------------------------------------- // tests // --------------------------------------------------------------------------- @@ -447,7 +434,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta bool assertErr assert.ErrorAssertionFunc assertB assert.BoolAssertionFunc - expectDCS []data.Collection + expectDCS []data.RestoreCollection expectNilMans bool }{ { @@ -550,7 +537,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { { name: "man missing backup id", mr: mockManifestRestorer{ - mockRestorer: mockRestorer{collsByID: map[string][]data.Collection{ + mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ "id": {mockColl{id: "id_coll"}}, }}, mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "")}, @@ -577,7 +564,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { { name: "one complete, one incomplete", mr: mockManifestRestorer{ - mockRestorer: mockRestorer{collsByID: map[string][]data.Collection{ + mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ "id": {mockColl{id: "id_coll"}}, "incmpl_id": {mockColl{id: "incmpl_id_coll"}}, }}, @@ -591,12 +578,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta: true, assertErr: assert.NoError, assertB: assert.True, - expectDCS: []data.Collection{mockColl{id: "id_coll"}}, + expectDCS: []data.RestoreCollection{mockColl{id: "id_coll"}}, }, { name: "single valid man", mr: mockManifestRestorer{ - mockRestorer: mockRestorer{collsByID: map[string][]data.Collection{ + mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ "id": {mockColl{id: "id_coll"}}, }}, mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "bid")}, @@ -606,12 +593,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta: true, assertErr: assert.NoError, assertB: assert.True, - expectDCS: []data.Collection{mockColl{id: "id_coll"}}, + expectDCS: []data.RestoreCollection{mockColl{id: "id_coll"}}, }, { name: "multiple valid mans", mr: mockManifestRestorer{ - mockRestorer: mockRestorer{collsByID: map[string][]data.Collection{ + mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ "mail": {mockColl{id: "mail_coll"}}, "contact": {mockColl{id: "contact_coll"}}, }}, @@ -625,7 +612,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta: true, assertErr: assert.NoError, assertB: assert.True, - expectDCS: []data.Collection{ + expectDCS: []data.RestoreCollection{ mockColl{id: "mail_coll"}, mockColl{id: "contact_coll"}, }, diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index a87243a9e..d29c0bf40 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -88,7 +88,7 @@ func (op RestoreOperation) validate() error { // pointer wrapping the values, while those values // get populated asynchronously. type restoreStats struct { - cs []data.Collection + cs []data.RestoreCollection gc *support.ConnectorOperationStatus bytesRead *stats.ByteCounter resourceCount int @@ -104,7 +104,7 @@ type restorer interface { snapshotID string, paths []path.Path, bc kopia.ByteCounter, - ) ([]data.Collection, error) + ) ([]data.RestoreCollection, error) } // Run begins a synchronous restore operation. diff --git a/src/internal/operations/restore_test.go b/src/internal/operations/restore_test.go index 7b0e0d211..a3974ae64 100644 --- a/src/internal/operations/restore_test.go +++ b/src/internal/operations/restore_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/connector/exchange" + "github.com/alcionai/corso/src/internal/connector/mockconnector" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/events" @@ -61,7 +62,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() { bytesRead: &stats.ByteCounter{ NumBytes: 42, }, - cs: []data.Collection{&exchange.Collection{}}, + cs: []data.RestoreCollection{&mockconnector.MockExchangeDataCollection{}}, gc: &support.ConnectorOperationStatus{ ObjectCount: 1, Successful: 1, @@ -82,7 +83,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() { expectErr: assert.NoError, stats: restoreStats{ bytesRead: &stats.ByteCounter{}, - cs: []data.Collection{}, + cs: []data.RestoreCollection{}, gc: &support.ConnectorOperationStatus{}, }, }, diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index d2fd4b654..a97cace98 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -76,7 +76,7 @@ func (ss *streamStore) WriteBackupDetails( backupStats, _, _, err := ss.kw.BackupCollections( ctx, nil, - []data.Collection{dc}, + []data.BackupCollection{dc}, nil, nil, false) @@ -164,7 +164,7 @@ func (ss *streamStore) DeleteBackupDetails( return nil } -// streamCollection is a data.Collection used to persist +// streamCollection is a data.BackupCollection used to persist // a single data stream type streamCollection struct { // folderPath indicates what level in the hierarchy this collection From 93059e8430d80a579fb013cf3dfa36adce697e42 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 07:52:30 +0000 Subject: [PATCH 22/45] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.195=20to=201.44.196=20in=20/src=20(#?= =?UTF-8?q?2437)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.195 to 1.44.196.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.196 (2023-02-07)

Service Client Updates

  • service/transfer: Updates service documentation
    • Updated the documentation for the ImportCertificate API call, and added examples.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.195&new-version=1.44.196)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/go.mod b/src/go.mod index 8b42d8352..24d305739 100644 --- a/src/go.mod +++ b/src/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e - github.com/aws/aws-sdk-go v1.44.195 + github.com/aws/aws-sdk-go v1.44.196 github.com/aws/aws-xray-sdk-go v1.8.0 github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 diff --git a/src/go.sum b/src/go.sum index 79638570c..61562d958 100644 --- a/src/go.sum +++ b/src/go.sum @@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/aws/aws-sdk-go v1.44.195 h1:d5xFL0N83Fpsq2LFiHgtBUHknCRUPGHdOlCWt/jtOJs= -github.com/aws/aws-sdk-go v1.44.195/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.196 h1:e3h9M7fpnRHwHOohYmYjgVbcCBvkxKwZiT7fGrxRn28= +github.com/aws/aws-sdk-go v1.44.196/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY= github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -611,8 +611,6 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= From f782d1b63422f6dcaccae8dafe37697f584b935e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 13:39:49 +0000 Subject: [PATCH 23/45] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20docker/build-?= =?UTF-8?q?push-action=20from=203=20to=204=20(#2402)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3 to 4.
Release notes

Sourced from docker/build-push-action's releases.

v4.0.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v3.3.1...v4.0.0

v3.3.1

Full Changelog: https://github.com/docker/build-push-action/compare/v3.3.0...v3.3.1

v3.3.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v3.2.0...v3.3.0

v3.2.0

Full Changelog: https://github.com/docker/build-push-action/compare/v3.1.1...v3.2.0

v3.1.1

Full Changelog: https://github.com/docker/build-push-action/compare/v3.1.0...v3.1.1

v3.1.0

  • no-cache-filters input by @​crazy-max (#653)
  • Bump @​actions/github from 5.0.1 to 5.0.3 (#619)
  • Bump @​actions/core from 1.6.0 to 1.9.0 (#620 #637)
  • Bump csv-parse from 5.0.4 to 5.3.0 (#623 #650)

Full Changelog: https://github.com/docker/build-push-action/compare/v3.0.0...v3.1.0

Commits
  • 3b5e802 Merge pull request #784 from crazy-max/enable-provenance
  • 02d3266 update generated content
  • f403daf revert disable provenance by default if not set
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=3&new-version=4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 170a63357..7af8bb5de 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -501,7 +501,7 @@ jobs: # deploy the image - name: Build image and push to GitHub Container Registry - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: context: . file: ./build/Dockerfile From a4b50a1ec05cd44bb361a861b0203dd8dc9a1c92 Mon Sep 17 00:00:00 2001 From: Danny Date: Wed, 8 Feb 2023 08:58:51 -0500 Subject: [PATCH 24/45] GC: Item mail attachment Handling (#2422) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description Support for `itemAttachment.Mail` added to GC restore pipeline. Nested attachments within items disabled due to Kiota bug. Issue #2428 created to re-enable `itemAttachment.Item.Attachments` when the bug is patched. ## Does this PR need a docs update or release note? - [x] 🏢 : Yes. Known issues and ChangeLog updates required. ## Type of change - [x] :sunflower: Feature - [x] :bug: Bugfix ## Issue(s) * related to https://github.com/microsoft/kiota-serialization-json-go/issues/61 * closes #2372 ## Test Plan - [x] :zap: Unit test --- CHANGELOG.md | 9 + src/internal/connector/exchange/attachment.go | 5 +- .../connector/exchange/restore_test.go | 50 ++++- .../connector/exchange/service_restore.go | 14 ++ .../mockconnector/mock_data_message.go | 186 ++++++++++++++++++ .../connector/support/m365Transform.go | 123 +++++++++--- website/docs/support/known-issues.md | 2 + 7 files changed, 357 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8d75a352..cccc745a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] (alpha) +### Added + +### Fixed +- Support for item.Attachment:Mail restore + +### Changed + +### Known Issues +- Nested attachments are currently not restored due to an [issue](https://github.com/microsoft/kiota-serialization-json-go/issues/61) discovered in the Graph APIs ## [v0.3.0] (alpha) - 2023-2-07 diff --git a/src/internal/connector/exchange/attachment.go b/src/internal/connector/exchange/attachment.go index 94e6dbc6a..075ab09a6 100644 --- a/src/internal/connector/exchange/attachment.go +++ b/src/internal/connector/exchange/attachment.go @@ -68,8 +68,9 @@ func uploadAttachment( name = *prev.GetName() } - // TODO: Update to support PII protection - logger.Ctx(ctx).Infow("item attachment uploads are not supported ", + // TODO: (rkeepers) Update to support PII protection + msg := "item attachment restore not supported for this type. skipping upload." + logger.Ctx(ctx).Infow(msg, "err", err, "attachment_name", name, "attachment_type", attachmentType, diff --git a/src/internal/connector/exchange/restore_test.go b/src/internal/connector/exchange/restore_test.go index 360d15266..e6db75129 100644 --- a/src/internal/connector/exchange/restore_test.go +++ b/src/internal/connector/exchange/restore_test.go @@ -130,12 +130,13 @@ type containerDeleter interface { // TestRestoreExchangeObject verifies path.Category usage for restored objects func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { - a := tester.NewM365Account(suite.T()) + t := suite.T() + a := tester.NewM365Account(t) m365, err := a.M365Config() - require.NoError(suite.T(), err) + require.NoError(t, err) service, err := createService(m365) - require.NoError(suite.T(), err) + require.NoError(t, err) deleters := map[path.CategoryType]containerDeleter{ path.EmailCategory: suite.ac.Mail(), @@ -187,6 +188,48 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { return *folder.GetId() }, }, + { + name: "Test Mail: Item Attachment_Mail", + bytes: mockconnector.GetMockMessageWithItemAttachmentMail("Mail Item Attachment"), + category: path.EmailCategory, + destination: func(t *testing.T, ctx context.Context) string { + folderName := "TestRestoreMailItemAttachment: " + common.FormatSimpleDateTime(now) + folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) + require.NoError(t, err) + + return *folder.GetId() + }, + }, + { + name: "Test Mail: Hydrated Item Attachment Mail", + bytes: mockconnector.GetMockMessageWithNestedItemAttachmentMail(t, + mockconnector.GetMockMessageBytes("Basic Item Attachment"), + "Mail Item Attachment", + ), + category: path.EmailCategory, + destination: func(t *testing.T, ctx context.Context) string { + folderName := "TestRestoreMailBasicItemAttachment: " + common.FormatSimpleDateTime(now) + folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) + require.NoError(t, err) + + return *folder.GetId() + }, + }, + { + name: "Test Mail: Hydrated Item Attachment Mail One Attach", + bytes: mockconnector.GetMockMessageWithNestedItemAttachmentMail(t, + mockconnector.GetMockMessageWithDirectAttachment("Item Attachment Included"), + "Mail Item Attachment", + ), + category: path.EmailCategory, + destination: func(t *testing.T, ctx context.Context) string { + folderName := "ItemMailAttachmentwAttachment " + common.FormatSimpleDateTime(now) + folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) + require.NoError(t, err) + + return *folder.GetId() + }, + }, { // Restore will upload the Message without uploading the attachment name: "Test Mail: Item Attachment_NestedEvent", bytes: mockconnector.GetMockMessageWithNestedItemAttachmentEvent("Nested Item Attachment"), @@ -291,6 +334,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { ) assert.NoError(t, err, support.ConnectorStackErrorTrace(err)) assert.NotNil(t, info, "item info was not populated") + assert.NotNil(t, deleters) assert.NoError(t, deleters[test.category].DeleteContainer(ctx, userID, destination)) }) } diff --git a/src/internal/connector/exchange/service_restore.go b/src/internal/connector/exchange/service_restore.go index 6da25450d..bb0179c76 100644 --- a/src/internal/connector/exchange/service_restore.go +++ b/src/internal/connector/exchange/service_restore.go @@ -283,6 +283,20 @@ func SendMailToBackStore( for _, attachment := range attached { if err := uploadAttachment(ctx, uploader, attachment); err != nil { + if attachment.GetOdataType() != nil && + *attachment.GetOdataType() == "#microsoft.graph.itemAttachment" { + var name string + if attachment.GetName() != nil { + name = *attachment.GetName() + } + + logger.Ctx(ctx).Infow( + "item attachment upload not successful. content not accepted by M365 server", + "Attachment Name", name) + + continue + } + errs = support.WrapAndAppend( fmt.Sprintf("uploading attachment for message %s: %s", id, support.ConnectorStackErrorTrace(err)), diff --git a/src/internal/connector/mockconnector/mock_data_message.go b/src/internal/connector/mockconnector/mock_data_message.go index 4c2e84235..50ff3345c 100644 --- a/src/internal/connector/mockconnector/mock_data_message.go +++ b/src/internal/connector/mockconnector/mock_data_message.go @@ -3,6 +3,12 @@ package mockconnector import ( "encoding/base64" "fmt" + "testing" + + js "github.com/microsoft/kiota-serialization-json-go" + "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" "github.com/alcionai/corso/src/internal/common" ) @@ -360,6 +366,143 @@ func GetMockMessageWithItemAttachmentEvent(subject string) []byte { return []byte(message) } +func GetMockMessageWithItemAttachmentMail(subject string) []byte { + //nolint:lll + // Order of fields: + // 1. subject + // 2. alias + // 3. sender address + // 4. from address + // 5. toRecipients email address + template := `{ + "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages(attachments())/$entity", + "@odata.etag": "W/\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADKTqr3\"", + "id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADKo35SAAA=", + "createdDateTime": "2023-02-06T20:03:40Z", + "lastModifiedDateTime": "2023-02-06T20:03:42Z", + "changeKey": "CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADKTqr3", + "categories": [], + "receivedDateTime": "2023-02-06T20:03:40Z", + "sentDateTime": "2023-02-06T20:03:37Z", + "hasAttachments": true, + "internetMessageId": "", + "subject": "%[1]s", + "bodyPreview": "Nested Items are not encapsulated in a trivial manner. Review the findings.\r\n\r\nBest,\r\n\r\nYour Test Case", + "importance": "normal", + "parentFolderId": "AQMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4ADVkZWQwNmNlMTgALgAAAw_9XBStqZdPuOVIalVTz7sBAHzBhzS2FNNNiXdGkRghSr4AAAIBDAAAAA==", + "conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAPe8pEQOrBxLvFNhfDtMyEI=", + "conversationIndex": "AQHZOmYA97ykRA6sHEu8U2F8O0zIQg==", + "isDeliveryReceiptRequested": false, + "isReadReceiptRequested": false, + "isRead": false, + "isDraft": false, + "webLink": "https://outlook.office365.com/owal=ReadMessageItem", + "inferenceClassification": "focused", + "body": { + "contentType": "html", + "content": "\r\n
Nested Items are not encapsulated in a trivial manner. Review the findings.

Best, 

Your Test Case
" + }, + "sender": { + "emailAddress": { + "name": "%[2]s", + "address": "%[3]s" + } + }, + "from": { + "emailAddress": { + "name": "%[2]s", + "address": "%[4]s" + } + }, + "toRecipients": [ + { + "emailAddress": { + "name": "%[2]s", + "address": "%[5]s" + } + } + ], + "ccRecipients": [], + "bccRecipients": [], + "replyTo": [], + "flag": { + "flagStatus": "notFlagged" + }, + "attachments": [ + { + "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#/attachments(microsoft.graph.itemAttachment/item())/$entity", + "@odata.type": "#microsoft.graph.itemAttachment", + "id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADKo35SAAABEgAQABv3spWM8g5IriSvYJe5kO8=", + "lastModifiedDateTime": "2023-02-06T20:03:40Z", + "name": "Not Something Small. 28-Jul-2022_20:53:33 Different", + "contentType": null, + "size": 10959, + "isInline": false, + "item@odata.associationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')/$ref", + "item@odata.navigationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')", + "item": { + "@odata.type": "#microsoft.graph.message", + "id": "", + "createdDateTime": "2023-02-06T20:03:40Z", + "lastModifiedDateTime": "2023-02-06T20:03:40Z", + "receivedDateTime": "2022-07-28T20:53:33Z", + "sentDateTime": "2022-07-28T20:53:33Z", + "hasAttachments": false, + "internetMessageId": "", + "subject": "Not Something Small. 28-Jul-2022_20:53:33 Different", + "bodyPreview": "I've been going through with the changing of messages. It shouldn't have the same calls, right? Call Me?\r\n\r\nWe want to be able to send multiple messages and we want to be able to respond and do other things that make sense for our users. In this case. Let", + "importance": "normal", + "conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAOlAM0OrVQlHkhUZeZMPxgg=", + "conversationIndex": "AQHYosQZ6UAzQ6tVCUeSFRl5kw/GCA==", + "isDeliveryReceiptRequested": false, + "isReadReceiptRequested": false, + "isRead": true, + "isDraft": false, + "webLink": "https://outlook.office365.com/owa/?AttachmentItemID=Aviewmodel=ItemAttachment", + "body": { + "contentType": "html", + "content": "\r\n

I've been going through with the changing of messages. It shouldn't have the same calls, right? Call Me?

 

We want to be able to send multiple messages and we want to be able to respond and do other things that make sense for our users. In this case. Let’s consider a Mailbox

" + }, + "sender": { + "emailAddress": { + "name": "%[2]s", + "address": "%[3]s" + } + }, + "from": { + "emailAddress": { + "name": "%[2]s", + "address": "%[4]s" + } + }, + "toRecipients": [ + { + "emailAddress": { + "name": "Direct Report", + "address": "notAvailable@8qzvrj.onmicrosoft.com" + } + } + ], + "flag": { + "flagStatus": "notFlagged" + } + } + } + ] + }` + + message := fmt.Sprintf( + template, + subject, + defaultAlias, + defaultMessageSender, + defaultMessageFrom, + defaultMessageTo, + ) + + return []byte(message) +} + func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte { //nolint:lll // Order of fields: @@ -545,3 +688,46 @@ func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte { return []byte(message) } + +func GetMockMessageWithNestedItemAttachmentMail(t *testing.T, nested []byte, subject string) []byte { + base := GetMockMessageBytes(subject) + message, err := hydrateMessage(base) + require.NoError(t, err) + + nestedMessage, err := hydrateMessage(nested) + require.NoError(t, err) + + iaNode := models.NewItemAttachment() + attachmentSize := int32(len(nested)) + iaNode.SetSize(&attachmentSize) + + internalName := "Nested Message" + iaNode.SetName(&internalName) + iaNode.SetItem(nestedMessage) + message.SetAttachments([]models.Attachmentable{iaNode}) + + wtr := js.NewJsonSerializationWriter() + err = wtr.WriteObjectValue("", message) + require.NoError(t, err) + + byteArray, err := wtr.GetSerializedContent() + require.NoError(t, err) + + return byteArray +} + +func hydrateMessage(byteArray []byte) (models.Messageable, error) { + parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", byteArray) + if err != nil { + return nil, errors.Wrap(err, "deserializing bytes into base m365 object") + } + + anObject, err := parseNode.GetObjectValue(models.CreateMessageFromDiscriminatorValue) + if err != nil { + return nil, errors.Wrap(err, "parsing m365 object factory") + } + + message := anObject.(models.Messageable) + + return message, nil +} diff --git a/src/internal/connector/support/m365Transform.go b/src/internal/connector/support/m365Transform.go index 7fa207c9e..4f8227a29 100644 --- a/src/internal/connector/support/m365Transform.go +++ b/src/internal/connector/support/m365Transform.go @@ -7,7 +7,11 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" ) -const itemAttachment = "#microsoft.graph.itemAttachment" +//========================================================== +// m365Transform.go contains utility functions that +// either add, modify, or remove fields from M365 +// objects for interacton with M365 services +//========================================================= // CloneMessageableFields places data from original data into new message object. // SingleLegacyValueProperty is not populated during this operation @@ -282,14 +286,35 @@ func cloneColumnDefinitionable(orig models.ColumnDefinitionable) models.ColumnDe return newColumn } +// =============================================================================================== +// Sanitization section +// Set of functions that support ItemAttachemtable object restoration. +// These attachments can be nested as well as possess one of the other +// reference types. To ensure proper upload, each interior`item` requires +// that certain fields be modified. +// ItemAttachment: +// https://learn.microsoft.com/en-us/graph/api/resources/itemattachment?view=graph-rest-1.0 +// https://learn.microsoft.com/en-us/exchange/client-developer/exchange-web-services/attachments-and-ews-in-exchange +// https://learn.microsoft.com/en-us/exchange/client-developer/exchange-web-services/folders-and-items-in-ews-in-exchange +// =============================================================================================== +// M365 Models possess a field, OData.Type which indicate +// the represent the intended model in string format. +// The constants listed here identify the supported itemAttachments +// currently supported for Restore operations. +// itemAttachments +// support ODataType values +// +//nolint:lll +const ( + itemAttachment = "#microsoft.graph.itemAttachment" + eventItemType = "#microsoft.graph.event" + mailItemType = "#microsoft.graph.message" +) + // ToItemAttachment transforms internal item, OutlookItemables, into // objects that are able to be uploaded into M365. -// Supported Internal Items: -// - Events func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) { transform, ok := orig.(models.ItemAttachmentable) - supported := "#microsoft.graph.event" - if !ok { // Shouldn't ever happen return nil, fmt.Errorf("transforming attachment to item attachment") } @@ -298,7 +323,7 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) itemType := item.GetOdataType() switch *itemType { - case supported: + case eventItemType: event := item.(models.Eventable) newEvent, err := sanitizeEvent(event) @@ -308,12 +333,45 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) transform.SetItem(newEvent) + return transform, nil + case mailItemType: + message := item.(models.Messageable) + + newMessage, err := sanitizeMessage(message) + if err != nil { + return nil, err + } + + transform.SetItem(newMessage) + return transform, nil default: return nil, fmt.Errorf("exiting ToItemAttachment: %s not supported", *itemType) } } +// TODO #2428 (dadam39): re-apply nested attachments for itemAttachments +// func sanitizeAttachments(attached []models.Attachmentable) ([]models.Attachmentable, error) { +// attachments := make([]models.Attachmentable, len(attached)) + +// for _, ax := range attached { +// if *ax.GetOdataType() == itemAttachment { +// newAttachment, err := ToItemAttachment(ax) +// if err != nil { +// return nil, err +// } + +// attachments = append(attachments, newAttachment) + +// continue +// } + +// attachments = append(attachments, ax) +// } + +// return attachments, nil +// } + // sanitizeEvent transfers data into event object and // removes unique IDs from the M365 object func sanitizeEvent(orig models.Eventable) (models.Eventable, error) { @@ -324,7 +382,9 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) { newEvent.SetCalendar(orig.GetCalendar()) newEvent.SetCreatedDateTime(orig.GetCreatedDateTime()) newEvent.SetEnd(orig.GetEnd()) - newEvent.SetHasAttachments(orig.GetHasAttachments()) + // TODO: dadams39 Nested attachments not supported + // Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61 + newEvent.SetHasAttachments(nil) newEvent.SetHideAttendees(orig.GetHideAttendees()) newEvent.SetImportance(orig.GetImportance()) newEvent.SetIsAllDay(orig.GetIsAllDay()) @@ -337,7 +397,7 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) { newEvent.SetSubject(orig.GetSubject()) newEvent.SetType(orig.GetType()) - // Sanitation + // Sanitation NOTE // isDraft and isOrganizer *bool ptr's have to be removed completely // from JSON in order for POST method to succeed. // Current as of 2/2/2023 @@ -346,25 +406,34 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) { newEvent.SetIsDraft(nil) newEvent.SetAdditionalData(orig.GetAdditionalData()) - attached := orig.GetAttachments() - attachments := make([]models.Attachmentable, len(attached)) - - for _, ax := range attached { - if *ax.GetOdataType() == itemAttachment { - newAttachment, err := ToItemAttachment(ax) - if err != nil { - return nil, err - } - - attachments = append(attachments, newAttachment) - - continue - } - - attachments = append(attachments, ax) - } - - newEvent.SetAttachments(attachments) + // TODO #2428 (dadam39): re-apply nested attachments for itemAttachments + // Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61 + // attachments, err := sanitizeAttachments(message.GetAttachments()) + // if err != nil { + // return nil, err + // } + newEvent.SetAttachments(nil) return newEvent, nil } + +func sanitizeMessage(orig models.Messageable) (models.Messageable, error) { + message := ToMessage(orig) + + // TODO #2428 (dadam39): re-apply nested attachments for itemAttachments + // Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61 + // attachments, err := sanitizeAttachments(message.GetAttachments()) + // if err != nil { + // return nil, err + // } + message.SetAttachments(nil) + + // The following fields are set to nil to + // not interfere with M365 guard checks. + message.SetHasAttachments(nil) + message.SetParentFolderId(nil) + message.SetInternetMessageHeaders(nil) + message.SetIsDraft(nil) + + return message, nil +} diff --git a/website/docs/support/known-issues.md b/website/docs/support/known-issues.md index af2bc2db9..7b9c56052 100644 --- a/website/docs/support/known-issues.md +++ b/website/docs/support/known-issues.md @@ -12,6 +12,8 @@ Below is a list of known Corso issues and limitations: Advanced restore options such as in-place restore, or restore to a specific folder or to a different account aren't yet supported. +* Restoration of Nested attachments within Exchange Mail or Calendars aren't yet supported. + * Provides no guarantees about whether data moved, added, or deleted in M365 while a backup is being created will be included in the running backup. Future backups run when the data isn't modified will include the data. From adbe85b47fb3e9dc9536556517907a9a6315a31f Mon Sep 17 00:00:00 2001 From: neha_gupta Date: Wed, 8 Feb 2023 22:07:35 +0530 Subject: [PATCH 25/45] Fixed twice display of "Logging to file" line (#2438) ## Description Issue - https://github.com/alcionai/corso/issues/2404 Fixed display of redundant messages. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :bug: Bugfix ## Issue(s) * https://github.com/alcionai/corso/issues/2404 ## Test Plan - [ ] :muscle: Manual --- src/pkg/logger/logger.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/pkg/logger/logger.go b/src/pkg/logger/logger.go index a6b5aa4dd..6438f3563 100644 --- a/src/pkg/logger/logger.go +++ b/src/pkg/logger/logger.go @@ -11,8 +11,6 @@ import ( "github.com/spf13/pflag" "go.uber.org/zap" "go.uber.org/zap/zapcore" - - "github.com/alcionai/corso/src/cli/print" ) // Default location for writing logs, initialized in platform specific files @@ -123,7 +121,6 @@ func PreloadLoggingFlags() (string, string) { if logfile != "stdout" && logfile != "stderr" { LogFile = logfile logdir := filepath.Dir(logfile) - print.Info(context.Background(), "Logging to file: "+logfile) err := os.MkdirAll(logdir, 0o755) if err != nil { From 3d244c9feaa73a3a4b0c0c952c9e91716bc3b922 Mon Sep 17 00:00:00 2001 From: Danny Date: Wed, 8 Feb 2023 13:09:55 -0500 Subject: [PATCH 26/45] GC: SharePoint: Backup: Abstract / Serialize (#2187) ## Description Changes address updates to `sharePoint.Collection.Populate()`. - SharePoint Collections support `Lists` and `Pages`. Drives are supported in OneDrive at this time. - List's serialize function is abstracted to support `Pages`. Collection needs to support List and Pages support. Additionally, needs to use a similar interface as in `exchange` to ensure retry and async collection population ## Does this PR need a docs update or release note? - [x] :white_check_mark: Yes, it's included ## Type of change - [x] :sunflower: Feature ## Issue(s) * related to #2071 ## Test Plan Test can be completed locally. Per Issue #2086, the Beta library interferes with overall caching. `TestSharePointPageCollection_Populate()` inspects changes - [x] :zap: Unit test --- .../connector/sharepoint/api/pages.go | 2 +- .../connector/sharepoint/api/pages_test.go | 4 +- .../connector/sharepoint/collection.go | 134 +++++++++++++++--- .../connector/sharepoint/collection_test.go | 127 ++++++++++++----- .../connector/sharepoint/data_collections.go | 5 +- .../sharepoint/data_collections_test.go | 44 ++++++ .../connector/sharepoint/helper_test.go | 10 ++ 7 files changed, 267 insertions(+), 59 deletions(-) diff --git a/src/internal/connector/sharepoint/api/pages.go b/src/internal/connector/sharepoint/api/pages.go index 16eb3f0ae..a62fbc40a 100644 --- a/src/internal/connector/sharepoint/api/pages.go +++ b/src/internal/connector/sharepoint/api/pages.go @@ -19,7 +19,7 @@ import ( // GetSitePages retrieves a collection of Pages related to the give Site. // Returns error if error experienced during the call -func GetSitePage( +func GetSitePages( ctx context.Context, serv *discover.BetaService, siteID string, diff --git a/src/internal/connector/sharepoint/api/pages_test.go b/src/internal/connector/sharepoint/api/pages_test.go index c6295748f..58d48ef11 100644 --- a/src/internal/connector/sharepoint/api/pages_test.go +++ b/src/internal/connector/sharepoint/api/pages_test.go @@ -61,7 +61,7 @@ func (suite *SharePointPageSuite) TestFetchPages() { } } -func (suite *SharePointPageSuite) TestGetSitePage() { +func (suite *SharePointPageSuite) TestGetSitePages() { ctx, flush := tester.NewContext() defer flush() @@ -71,7 +71,7 @@ func (suite *SharePointPageSuite) TestGetSitePage() { require.NotNil(t, tuples) jobs := []string{tuples[0].ID} - pages, err := api.GetSitePage(ctx, suite.service, suite.siteID, jobs) + pages, err := api.GetSitePages(ctx, suite.service, suite.siteID, jobs) assert.NoError(t, err) assert.NotEmpty(t, pages) } diff --git a/src/internal/connector/sharepoint/collection.go b/src/internal/connector/sharepoint/collection.go index b657c294e..ca07399eb 100644 --- a/src/internal/connector/sharepoint/collection.go +++ b/src/internal/connector/sharepoint/collection.go @@ -3,18 +3,22 @@ package sharepoint import ( "bytes" "context" + "fmt" "io" "time" + absser "github.com/microsoft/kiota-abstractions-go/serialization" kw "github.com/microsoft/kiota-serialization-json-go" - "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph" + sapi "github.com/alcionai/corso/src/internal/connector/sharepoint/api" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -27,6 +31,7 @@ const ( Unknown DataCategory = iota List Drive + Pages ) var ( @@ -36,6 +41,12 @@ var ( _ data.StreamModTime = &Item{} ) +type numMetrics struct { + attempts int + success int + totalBytes int64 +} + // Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported // by the oneDrive.Collection as the calls are identical for populating the Collection type Collection struct { @@ -46,7 +57,9 @@ type Collection struct { // jobs contain the SharePoint.Site.ListIDs for the associated list(s). jobs []string // M365 IDs of the items of this collection + category DataCategory service graph.Servicer + ctrl control.Options betaService *api.BetaService statusUpdater support.StatusUpdater } @@ -55,6 +68,7 @@ type Collection struct { func NewCollection( folderPath path.Path, service graph.Servicer, + category DataCategory, statusUpdater support.StatusUpdater, ) *Collection { c := &Collection{ @@ -63,6 +77,7 @@ func NewCollection( data: make(chan data.Stream, collectionChannelBufferSize), service: service, statusUpdater: statusUpdater, + category: category, } return c @@ -160,10 +175,9 @@ func (sc *Collection) finishPopulation(ctx context.Context, attempts, success in // populate utility function to retrieve data from back store for a given collection func (sc *Collection) populate(ctx context.Context) { var ( - objects, success int - totalBytes, arrayLength int64 - errs error - writer = kw.NewJsonSerializationWriter() + metrics numMetrics + errs error + writer = kw.NewJsonSerializationWriter() ) // TODO: Insert correct ID for CollectionProgress @@ -176,25 +190,50 @@ func (sc *Collection) populate(ctx context.Context) { defer func() { close(colProgress) - sc.finishPopulation(ctx, objects, success, totalBytes, errs) + sc.finishPopulation(ctx, metrics.attempts, metrics.success, metrics.totalBytes, errs) }() - // Retrieve list data from M365 + // Switch retrieval function based on category + switch sc.category { + case List: + metrics, errs = sc.retrieveLists(ctx, writer, colProgress) + case Pages: + metrics, errs = sc.retrievePages(ctx, writer, colProgress) + } +} + +// retrieveLists utility function for collection that downloads and serializes +// models.Listable objects based on M365 IDs from the jobs field. +func (sc *Collection) retrieveLists( + ctx context.Context, + wtr *kw.JsonSerializationWriter, + progress chan<- struct{}, +) (numMetrics, error) { + var ( + errs error + metrics numMetrics + ) + lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs) if err != nil { - errs = support.WrapAndAppend(sc.fullPath.ResourceOwner(), err, errs) + return metrics, errors.Wrap(err, sc.fullPath.ResourceOwner()) } - objects += len(lists) - // Write Data and Send + metrics.attempts += len(lists) + // For each models.Listable, object is serialized and the metrics are collected. + // The progress is objected via the passed in channel. for _, lst := range lists { - byteArray, err := serializeListContent(writer, lst) + byteArray, err := serializeContent(wtr, lst) if err != nil { errs = support.WrapAndAppend(*lst.GetId(), err, errs) + if sc.ctrl.FailFast { + return metrics, errs + } + continue } - arrayLength = int64(len(byteArray)) + arrayLength := int64(len(byteArray)) if arrayLength > 0 { t := time.Now() @@ -202,9 +241,9 @@ func (sc *Collection) populate(ctx context.Context) { t = *t1 } - totalBytes += arrayLength + metrics.totalBytes += arrayLength - success++ + metrics.success++ sc.data <- &Item{ id: *lst.GetId(), data: io.NopCloser(bytes.NewReader(byteArray)), @@ -212,15 +251,76 @@ func (sc *Collection) populate(ctx context.Context) { modTime: t, } - colProgress <- struct{}{} + progress <- struct{}{} } } + + return metrics, nil } -func serializeListContent(writer *kw.JsonSerializationWriter, lst models.Listable) ([]byte, error) { +func (sc *Collection) retrievePages( + ctx context.Context, + wtr *kw.JsonSerializationWriter, + progress chan<- struct{}, +) (numMetrics, error) { + var ( + errs error + metrics numMetrics + ) + + betaService := sc.betaService + if betaService == nil { + return metrics, fmt.Errorf("beta service not found in collection") + } + + pages, err := sapi.GetSitePages(ctx, betaService, sc.fullPath.ResourceOwner(), sc.jobs) + if err != nil { + return metrics, errors.Wrap(err, sc.fullPath.ResourceOwner()) + } + + metrics.attempts = len(pages) + // For each models.Pageable, object is serialize and the metrics are collected and returned. + // Pageable objects are not supported in v1.0 of msgraph at this time. + // TODO: Verify Parsable interface supported with modified-Pageable + for _, pg := range pages { + byteArray, err := serializeContent(wtr, pg) + if err != nil { + errs = support.WrapAndAppend(*pg.GetId(), err, errs) + if sc.ctrl.FailFast { + return metrics, errs + } + + continue + } + + arrayLength := int64(len(byteArray)) + + if arrayLength > 0 { + t := time.Now() + if t1 := pg.GetLastModifiedDateTime(); t1 != nil { + t = *t1 + } + + metrics.totalBytes += arrayLength + metrics.success++ + sc.data <- &Item{ + id: *pg.GetId(), + data: io.NopCloser(bytes.NewReader(byteArray)), + info: sharePointPageInfo(pg, arrayLength), + modTime: t, + } + + progress <- struct{}{} + } + } + + return numMetrics{}, nil +} + +func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) { defer writer.Close() - err := writer.WriteObjectValue("", lst) + err := writer.WriteObjectValue("", obj) if err != nil { return nil, err } diff --git a/src/internal/connector/sharepoint/collection_test.go b/src/internal/connector/sharepoint/collection_test.go index c2b1ac830..494287457 100644 --- a/src/internal/connector/sharepoint/collection_test.go +++ b/src/internal/connector/sharepoint/collection_test.go @@ -14,6 +14,7 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/connector/mockconnector" "github.com/alcionai/corso/src/internal/connector/onedrive" + "github.com/alcionai/corso/src/internal/connector/sharepoint/api" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" @@ -50,7 +51,7 @@ func TestSharePointCollectionSuite(t *testing.T) { suite.Run(t, new(SharePointCollectionSuite)) } -func (suite *SharePointCollectionSuite) TestSharePointDataReader_Valid() { +func (suite *SharePointCollectionSuite) TestCollection_Item_Read() { t := suite.T() m := []byte("test message") name := "aFile" @@ -65,50 +66,105 @@ func (suite *SharePointCollectionSuite) TestSharePointDataReader_Valid() { assert.Equal(t, readData, m) } -// TestSharePointListCollection tests basic functionality to create +// TestListCollection tests basic functionality to create // SharePoint collection and to use the data stream channel. -func (suite *SharePointCollectionSuite) TestSharePointListCollection() { +func (suite *SharePointCollectionSuite) TestCollection_Items() { t := suite.T() + tenant := "some" + user := "user" + dirRoot := "directory" + tables := []struct { + name, itemName string + category DataCategory + getDir func(t *testing.T) path.Path + getItem func(t *testing.T, itemName string) *Item + }{ + { + name: "List", + itemName: "MockListing", + category: List, + getDir: func(t *testing.T) path.Path { + dir, err := path.Builder{}.Append(dirRoot). + ToDataLayerSharePointPath( + tenant, + user, + path.ListsCategory, + false) + require.NoError(t, err) - ow := kioser.NewJsonSerializationWriter() - listing := mockconnector.GetMockListDefault("Mock List") - testName := "MockListing" - listing.SetDisplayName(&testName) + return dir + }, + getItem: func(t *testing.T, name string) *Item { + ow := kioser.NewJsonSerializationWriter() + listing := mockconnector.GetMockListDefault(name) + listing.SetDisplayName(&name) - err := ow.WriteObjectValue("", listing) - require.NoError(t, err) + err := ow.WriteObjectValue("", listing) + require.NoError(t, err) - byteArray, err := ow.GetSerializedContent() - require.NoError(t, err) + byteArray, err := ow.GetSerializedContent() + require.NoError(t, err) - dir, err := path.Builder{}.Append("directory"). - ToDataLayerSharePointPath( - "some", - "user", - path.ListsCategory, - false) - require.NoError(t, err) + data := &Item{ + id: name, + data: io.NopCloser(bytes.NewReader(byteArray)), + info: sharePointListInfo(listing, int64(len(byteArray))), + } - col := NewCollection(dir, nil, nil) - col.data <- &Item{ - id: testName, - data: io.NopCloser(bytes.NewReader(byteArray)), - info: sharePointListInfo(listing, int64(len(byteArray))), + return data + }, + }, + { + name: "Pages", + itemName: "MockPages", + category: Pages, + getDir: func(t *testing.T) path.Path { + dir, err := path.Builder{}.Append(dirRoot). + ToDataLayerSharePointPath( + tenant, + user, + path.PagesCategory, + false) + require.NoError(t, err) + + return dir + }, + getItem: func(t *testing.T, itemName string) *Item { + byteArray := mockconnector.GetMockPage(itemName) + page, err := support.CreatePageFromBytes(byteArray) + require.NoError(t, err) + + data := &Item{ + id: itemName, + data: io.NopCloser(bytes.NewReader(byteArray)), + info: api.PageInfo(page, int64(len(byteArray))), + } + + return data + }, + }, } - readItems := []data.Stream{} + for _, test := range tables { + t.Run(test.name, func(t *testing.T) { + col := NewCollection(test.getDir(t), nil, test.category, nil) + col.data <- test.getItem(t, test.itemName) - for item := range col.Items() { - readItems = append(readItems, item) + readItems := []data.Stream{} + + for item := range col.Items() { + readItems = append(readItems, item) + } + + require.Equal(t, len(readItems), 1) + item := readItems[0] + shareInfo, ok := item.(data.StreamInfo) + require.True(t, ok) + require.NotNil(t, shareInfo.Info()) + require.NotNil(t, shareInfo.Info().SharePoint) + assert.Equal(t, test.itemName, shareInfo.Info().SharePoint.ItemName) + }) } - - require.Equal(t, len(readItems), 1) - item := readItems[0] - shareInfo, ok := item.(data.StreamInfo) - require.True(t, ok) - require.NotNil(t, shareInfo.Info()) - require.NotNil(t, shareInfo.Info().SharePoint) - assert.Equal(t, testName, shareInfo.Info().SharePoint.ItemName) } func (suite *SharePointCollectionSuite) TestCollectPages() { @@ -122,7 +178,6 @@ func (suite *SharePointCollectionSuite) TestCollectPages() { nil, account.AzureTenantID, suite.siteID, - nil, &MockGraphService{}, control.Defaults(), ) @@ -131,7 +186,7 @@ func (suite *SharePointCollectionSuite) TestCollectPages() { } // TestRestoreListCollection verifies Graph Restore API for the List Collection -func (suite *SharePointCollectionSuite) TestRestoreListCollection() { +func (suite *SharePointCollectionSuite) TestListCollection_Restore() { ctx, flush := tester.NewContext() defer flush() diff --git a/src/internal/connector/sharepoint/data_collections.go b/src/internal/connector/sharepoint/data_collections.go index adb8a215e..ce17c9c8d 100644 --- a/src/internal/connector/sharepoint/data_collections.go +++ b/src/internal/connector/sharepoint/data_collections.go @@ -118,7 +118,7 @@ func collectLists( return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID) } - collection := NewCollection(dir, serv, updater.UpdateStatus) + collection := NewCollection(dir, serv, List, updater.UpdateStatus) collection.AddJob(tuple.id) spcs = append(spcs, collection) @@ -172,7 +172,6 @@ func collectPages( creds account.M365Config, serv graph.Servicer, tenantID, siteID string, - scope selectors.SharePointScope, updater statusUpdater, ctrlOpts control.Options, ) ([]data.BackupCollection, error) { @@ -204,7 +203,7 @@ func collectPages( return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID) } - collection := NewCollection(dir, serv, updater.UpdateStatus) + collection := NewCollection(dir, serv, Pages, updater.UpdateStatus) collection.betaService = betaService collection.AddJob(tuple.ID) diff --git a/src/internal/connector/sharepoint/data_collections_test.go b/src/internal/connector/sharepoint/data_collections_test.go index 10a1e25b0..775cda23f 100644 --- a/src/internal/connector/sharepoint/data_collections_test.go +++ b/src/internal/connector/sharepoint/data_collections_test.go @@ -5,10 +5,12 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" + "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/selectors" @@ -128,3 +130,45 @@ func driveItem(name string, path string, isFile bool) models.DriveItemable { return item } + +type SharePointPagesSuite struct { + suite.Suite +} + +func TestSharePointPagesSuite(t *testing.T) { + tester.RunOnAny( + t, + tester.CorsoCITests, + tester.CorsoGraphConnectorTests, + tester.CorsoGraphConnectorSharePointTests) + suite.Run(t, new(SharePointPagesSuite)) +} + +func (suite *SharePointPagesSuite) TestCollectPages() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + siteID := tester.M365SiteID(t) + a := tester.NewM365Account(t) + account, err := a.M365Config() + require.NoError(t, err) + + updateFunc := func(*support.ConnectorOperationStatus) { + t.Log("Updater Called ") + } + + updater := &MockUpdater{UpdateState: updateFunc} + + col, err := collectPages( + ctx, + account, + nil, + account.AzureTenantID, + siteID, + updater, + control.Options{}, + ) + assert.NoError(t, err) + assert.NotEmpty(t, col) +} diff --git a/src/internal/connector/sharepoint/helper_test.go b/src/internal/connector/sharepoint/helper_test.go index 30d589389..536ee20df 100644 --- a/src/internal/connector/sharepoint/helper_test.go +++ b/src/internal/connector/sharepoint/helper_test.go @@ -17,6 +17,16 @@ import ( // --------------------------------------------------------------------------- type MockGraphService struct{} +type MockUpdater struct { + UpdateState func(*support.ConnectorOperationStatus) +} + +func (mu *MockUpdater) UpdateStatus(input *support.ConnectorOperationStatus) { + if mu.UpdateState != nil { + mu.UpdateState(input) + } +} + //------------------------------------------------------------ // Interface Functions: @See graph.Service //------------------------------------------------------------ From c63aa94204dd964879be92ae3641c7f3dacdb584 Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Wed, 8 Feb 2023 10:59:41 -0800 Subject: [PATCH 27/45] Wrapper for Collections to make them RestoreCollections (#2431) ## Description Create a wrapper struct that provides a `Fetch(ctx, name) (Stream, error)` function that always returns `ErrNotFound`. A future PR is going to expand the `RestoreCollection` interface to include that function and I wanted to reduce the amount of chaff that would come out of it This PR just creates the wrapper and moves `ErrNotFound` from the kopia package to `data` package to avoid import cycles ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * #1944 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/cli/backup/exchange.go | 16 ++++++++-------- src/cli/backup/onedrive.go | 6 +++--- src/cli/backup/sharepoint.go | 16 ++++++++-------- src/cli/restore/exchange.go | 4 ++-- src/cli/restore/onedrive.go | 4 ++-- src/cli/restore/sharepoint.go | 4 ++-- src/internal/data/data_collection.go | 14 ++++++++++++++ src/internal/kopia/model_store.go | 18 +++++++++--------- src/internal/kopia/model_store_test.go | 11 ++++++----- src/internal/kopia/wrapper.go | 2 +- src/internal/kopia/wrapper_test.go | 2 +- src/internal/operations/manifests.go | 4 ++-- 12 files changed, 58 insertions(+), 43 deletions(-) diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 5a12dba87..149da9f71 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -12,7 +12,7 @@ import ( "github.com/alcionai/corso/src/cli/options" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" @@ -323,16 +323,16 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { return nil } -func exchangeBackupCreateSelectors(userIDs, data []string) *selectors.ExchangeBackup { +func exchangeBackupCreateSelectors(userIDs, cats []string) *selectors.ExchangeBackup { sel := selectors.NewExchangeBackup(userIDs) - if len(data) == 0 { + if len(cats) == 0 { sel.Include(sel.ContactFolders(selectors.Any())) sel.Include(sel.MailFolders(selectors.Any())) sel.Include(sel.EventCalendars(selectors.Any())) } - for _, d := range data { + for _, d := range cats { switch d { case dataContacts: sel.Include(sel.ContactFolders(selectors.Any())) @@ -346,12 +346,12 @@ func exchangeBackupCreateSelectors(userIDs, data []string) *selectors.ExchangeBa return sel } -func validateExchangeBackupCreateFlags(userIDs, data []string) error { +func validateExchangeBackupCreateFlags(userIDs, cats []string) error { if len(userIDs) == 0 { return errors.New("--user requires one or more email addresses or the wildcard '*'") } - for _, d := range data { + for _, d := range cats { if d != dataContacts && d != dataEmail && d != dataEvents { return errors.New( d + " is an unrecognized data type; must be one of " + dataContacts + ", " + dataEmail + ", or " + dataEvents) @@ -394,7 +394,7 @@ func listExchangeCmd(cmd *cobra.Command, args []string) error { if len(backupID) > 0 { b, err := r.Backup(ctx, model.StableID(backupID)) if err != nil { - if errors.Is(err, kopia.ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID)) } @@ -502,7 +502,7 @@ func runDetailsExchangeCmd( d, _, errs := r.BackupDetails(ctx, backupID) // TODO: log/track recoverable errors if errs.Err() != nil { - if errors.Is(errs.Err(), kopia.ErrNotFound) { + if errors.Is(errs.Err(), data.ErrNotFound) { return nil, errors.Errorf("No backup exists with the id %s", backupID) } diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 2b60432ff..a99ad6d2b 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -12,7 +12,7 @@ import ( "github.com/alcionai/corso/src/cli/options" . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" - "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" @@ -294,7 +294,7 @@ func listOneDriveCmd(cmd *cobra.Command, args []string) error { if len(backupID) > 0 { b, err := r.Backup(ctx, model.StableID(backupID)) if err != nil { - if errors.Is(err, kopia.ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID)) } @@ -394,7 +394,7 @@ func runDetailsOneDriveCmd( d, _, errs := r.BackupDetails(ctx, backupID) // TODO: log/track recoverable errors if errs.Err() != nil { - if errors.Is(errs.Err(), kopia.ErrNotFound) { + if errors.Is(errs.Err(), data.ErrNotFound) { return nil, errors.Errorf("no backup exists with the id %s", backupID) } diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index c2a155334..12dd002d4 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -14,7 +14,7 @@ import ( "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/connector" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" @@ -266,7 +266,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { return nil } -func validateSharePointBackupCreateFlags(sites, weburls, data []string) error { +func validateSharePointBackupCreateFlags(sites, weburls, cats []string) error { if len(sites) == 0 && len(weburls) == 0 { return errors.New( "requires one or more --" + @@ -276,7 +276,7 @@ func validateSharePointBackupCreateFlags(sites, weburls, data []string) error { ) } - for _, d := range data { + for _, d := range cats { if d != dataLibraries && d != dataPages { return errors.New( d + " is an unrecognized data type; either " + dataLibraries + "or " + dataPages, @@ -290,7 +290,7 @@ func validateSharePointBackupCreateFlags(sites, weburls, data []string) error { // TODO: users might specify a data type, this only supports AllData(). func sharePointBackupCreateSelectors( ctx context.Context, - sites, weburls, data []string, + sites, weburls, cats []string, gc *connector.GraphConnector, ) (*selectors.SharePointBackup, error) { if len(sites) == 0 && len(weburls) == 0 { @@ -321,13 +321,13 @@ func sharePointBackupCreateSelectors( } sel := selectors.NewSharePointBackup(union) - if len(data) == 0 { + if len(cats) == 0 { sel.Include(sel.AllData()) return sel, nil } - for _, d := range data { + for _, d := range cats { switch d { case dataLibraries: sel.Include(sel.Libraries(selectors.Any())) @@ -372,7 +372,7 @@ func listSharePointCmd(cmd *cobra.Command, args []string) error { if len(backupID) > 0 { b, err := r.Backup(ctx, model.StableID(backupID)) if err != nil { - if errors.Is(err, kopia.ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID)) } @@ -513,7 +513,7 @@ func runDetailsSharePointCmd( d, _, errs := r.BackupDetails(ctx, backupID) // TODO: log/track recoverable errors if errs.Err() != nil { - if errors.Is(errs.Err(), kopia.ErrNotFound) { + if errors.Is(errs.Err(), data.ErrNotFound) { return nil, errors.Errorf("no backup exists with the id %s", backupID) } diff --git a/src/cli/restore/exchange.go b/src/cli/restore/exchange.go index d0801061f..6188ed89c 100644 --- a/src/cli/restore/exchange.go +++ b/src/cli/restore/exchange.go @@ -10,7 +10,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" - "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/repository" ) @@ -228,7 +228,7 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error { ds, err := ro.Run(ctx) if err != nil { - if errors.Is(err, kopia.ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID)) } diff --git a/src/cli/restore/onedrive.go b/src/cli/restore/onedrive.go index bd8dc7816..7c4ac1337 100644 --- a/src/cli/restore/onedrive.go +++ b/src/cli/restore/onedrive.go @@ -10,7 +10,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" - "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/repository" ) @@ -171,7 +171,7 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error { ds, err := ro.Run(ctx) if err != nil { - if errors.Is(err, kopia.ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID)) } diff --git a/src/cli/restore/sharepoint.go b/src/cli/restore/sharepoint.go index 13414f6b6..fb30d9587 100644 --- a/src/cli/restore/sharepoint.go +++ b/src/cli/restore/sharepoint.go @@ -10,7 +10,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" - "github.com/alcionai/corso/src/internal/kopia" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/repository" ) @@ -166,7 +166,7 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error { ds, err := ro.Run(ctx) if err != nil { - if errors.Is(err, kopia.ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID)) } diff --git a/src/internal/data/data_collection.go b/src/internal/data/data_collection.go index 840268169..794b4bc16 100644 --- a/src/internal/data/data_collection.go +++ b/src/internal/data/data_collection.go @@ -1,6 +1,8 @@ package data import ( + "context" + "errors" "io" "time" @@ -12,6 +14,8 @@ import ( // standard ifaces // ------------------------------------------------------------------------------------------------ +var ErrNotFound = errors.New("not found") + type CollectionState int const ( @@ -66,6 +70,16 @@ type RestoreCollection interface { Collection } +// NotFoundRestoreCollection is a wrapper for a Collection that returns +// ErrNotFound for all Fetch calls. +type NotFoundRestoreCollection struct { + Collection +} + +func (c NotFoundRestoreCollection) Fetch(context.Context, string) (Stream, error) { + return nil, ErrNotFound +} + // Stream represents a single item within a Collection // that can be consumed as a stream (it embeds io.Reader) type Stream interface { diff --git a/src/internal/kopia/model_store.go b/src/internal/kopia/model_store.go index 495e94cff..2c6661d22 100644 --- a/src/internal/kopia/model_store.go +++ b/src/internal/kopia/model_store.go @@ -10,6 +10,7 @@ import ( "github.com/pkg/errors" "golang.org/x/exp/maps" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/model" ) @@ -20,7 +21,6 @@ const ( ) var ( - ErrNotFound = errors.New("not found") errNoModelStoreID = errors.New("model has no ModelStoreID") errNoStableID = errors.New("model has no StableID") errBadTagKey = errors.New("tag key overlaps with required key") @@ -281,7 +281,7 @@ func (ms *ModelStore) getModelStoreID( } if len(metadata) == 0 { - return "", errors.Wrap(ErrNotFound, "getting ModelStoreID") + return "", errors.Wrap(data.ErrNotFound, "getting ModelStoreID") } if len(metadata) != 1 { @@ -302,7 +302,7 @@ func (ms *ModelStore) Get( ctx context.Context, s model.Schema, id model.StableID, - data model.Model, + m model.Model, ) error { if !s.Valid() { return errors.WithStack(errUnrecognizedSchema) @@ -313,7 +313,7 @@ func (ms *ModelStore) Get( return err } - return transmuteErr(ms.GetWithModelStoreID(ctx, s, modelID, data)) + return transmuteErr(ms.GetWithModelStoreID(ctx, s, modelID, m)) } // GetWithModelStoreID deserializes the model with the given ModelStoreID into @@ -323,7 +323,7 @@ func (ms *ModelStore) GetWithModelStoreID( ctx context.Context, s model.Schema, id manifest.ID, - data model.Model, + m model.Model, ) error { if !s.Valid() { return errors.WithStack(errUnrecognizedSchema) @@ -333,7 +333,7 @@ func (ms *ModelStore) GetWithModelStoreID( return errors.WithStack(errNoModelStoreID) } - metadata, err := ms.c.GetManifest(ctx, id, data) + metadata, err := ms.c.GetManifest(ctx, id, m) if err != nil { return errors.Wrap(transmuteErr(err), "getting model data") } @@ -343,7 +343,7 @@ func (ms *ModelStore) GetWithModelStoreID( } return errors.Wrap( - ms.populateBaseModelFromMetadata(data.Base(), metadata), + ms.populateBaseModelFromMetadata(m.Base(), metadata), "getting model by ID", ) } @@ -457,7 +457,7 @@ func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.Stabl latest, err := ms.getModelStoreID(ctx, s, id) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { return nil } @@ -490,7 +490,7 @@ func (ms *ModelStore) DeleteWithModelStoreID(ctx context.Context, id manifest.ID func transmuteErr(err error) error { switch { case errors.Is(err, manifest.ErrNotFound): - return ErrNotFound + return data.ErrNotFound default: return err } diff --git a/src/internal/kopia/model_store_test.go b/src/internal/kopia/model_store_test.go index a984cd479..ab697fb76 100644 --- a/src/internal/kopia/model_store_test.go +++ b/src/internal/kopia/model_store_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup" @@ -360,9 +361,9 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet_WithTags() { func (suite *ModelStoreIntegrationSuite) TestGet_NotFoundErrors() { t := suite.T() - assert.ErrorIs(t, suite.m.Get(suite.ctx, model.BackupOpSchema, "baz", nil), ErrNotFound) + assert.ErrorIs(t, suite.m.Get(suite.ctx, model.BackupOpSchema, "baz", nil), data.ErrNotFound) assert.ErrorIs( - t, suite.m.GetWithModelStoreID(suite.ctx, model.BackupOpSchema, "baz", nil), ErrNotFound) + t, suite.m.GetWithModelStoreID(suite.ctx, model.BackupOpSchema, "baz", nil), data.ErrNotFound) } func (suite *ModelStoreIntegrationSuite) TestPutGetOfTypeBadVersion() { @@ -630,7 +631,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() { } err = m.GetWithModelStoreID(ctx, theModelType, oldModelID, nil) - assert.ErrorIs(t, err, ErrNotFound) + assert.ErrorIs(t, err, data.ErrNotFound) }) } } @@ -691,7 +692,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutDelete() { returned := &fooModel{} err := suite.m.GetWithModelStoreID(suite.ctx, theModelType, foo.ModelStoreID, returned) - assert.ErrorIs(t, err, ErrNotFound) + assert.ErrorIs(t, err, data.ErrNotFound) } func (suite *ModelStoreIntegrationSuite) TestPutDelete_BadIDsNoop() { @@ -775,7 +776,7 @@ func (suite *ModelStoreRegressionSuite) TestFailDuringWriteSessionHasNoVisibleEf assert.ErrorIs(t, err, assert.AnError) err = m.GetWithModelStoreID(ctx, theModelType, newID, nil) - assert.ErrorIs(t, err, ErrNotFound) + assert.ErrorIs(t, err, data.ErrNotFound) returned := &fooModel{} require.NoError( diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index d78e874c6..db2f8e645 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -317,7 +317,7 @@ func getItemStream( ) if err != nil { if strings.Contains(err.Error(), "entry not found") { - err = errors.Wrap(ErrNotFound, err.Error()) + err = errors.Wrap(data.ErrNotFound, err.Error()) } return nil, errors.Wrap(err, "getting nested object handle") diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 4761adc33..03f42bcc3 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -500,7 +500,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { // Files that had an error shouldn't make a dir entry in kopia. If they do we // may run into kopia-assisted incrementals issues because only mod time and // not file size is checked for StreamingFiles. - assert.ErrorIs(t, err, ErrNotFound, "errored file is restorable") + assert.ErrorIs(t, err, data.ErrNotFound, "errored file is restorable") } type backedupFile struct { diff --git a/src/internal/operations/manifests.go b/src/internal/operations/manifests.go index dcfb415c9..86ad7f1b5 100644 --- a/src/internal/operations/manifests.go +++ b/src/internal/operations/manifests.go @@ -97,7 +97,7 @@ func produceManifestsAndMetadata( if err != nil { // if no backup exists for any of the complete manifests, we want // to fall back to a complete backup. - if errors.Is(err, kopia.ErrNotFound) { + if errors.Is(err, data.ErrNotFound) { logger.Ctx(ctx).Infow("backup missing, falling back to full backup", clues.In(mctx).Slice()...) return ms, nil, false, nil } @@ -118,7 +118,7 @@ func produceManifestsAndMetadata( } colls, err := collectMetadata(mctx, mr, man, metadataFiles, tenantID) - if err != nil && !errors.Is(err, kopia.ErrNotFound) { + if err != nil && !errors.Is(err, data.ErrNotFound) { // prior metadata isn't guaranteed to exist. // if it doesn't, we'll just have to do a // full backup for that data. From d4ecd535075a3d771f886584e7933d80611b100f Mon Sep 17 00:00:00 2001 From: InfraOwner <120140348+InfraOwner@users.noreply.github.com> Date: Wed, 8 Feb 2023 12:00:29 -0700 Subject: [PATCH 28/45] [Snyk] Security upgrade alpine from 3.16 to 3.17 (#2439) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit

This PR was automatically created by Snyk using the credentials of a real user.


Keeping your Docker base image up-to-date means you’ll benefit from security fixes in the latest version of your chosen image. #### Changes included in this PR - build/Dockerfile We recommend upgrading to `alpine:3.17`, as this image has only 0 known vulnerabilities. To do this, merge this pull request, then verify your application still works as expected. --- **Note:** _You are seeing this because you or someone else with access to this repository has authorized Snyk to open fix PRs._ For more information: 🧐 [View latest project report](https://app.snyk.io/org/alcion/project/4c1165db-1d77-4278-b861-48e29b49c4e7?utm_source=github-enterprise&utm_medium=referral&page=fix-pr) 🛠 [Adjust project settings](https://app.snyk.io/org/alcion/project/4c1165db-1d77-4278-b861-48e29b49c4e7?utm_source=github-enterprise&utm_medium=referral&page=fix-pr/settings) [//]: # 'snyk:metadata:{"prId":"c955a1d4-b78e-425c-87ad-3a2f6ea2ae3c","prPublicId":"c955a1d4-b78e-425c-87ad-3a2f6ea2ae3c","dependencies":[{"name":"alpine","from":"3.16","to":"3.17"}],"packageManager":"dockerfile","projectPublicId":"4c1165db-1d77-4278-b861-48e29b49c4e7","projectUrl":"https://app.snyk.io/org/alcion/project/4c1165db-1d77-4278-b861-48e29b49c4e7?utm_source=github-enterprise&utm_medium=referral&page=fix-pr","type":"auto","patch":[],"vulns":[],"upgrade":[],"isBreakingChange":false,"env":"prod","prType":"fix","templateVariants":["updated-fix-title"],"priorityScoreList":[]}' --- **Learn how to fix vulnerabilities with free interactive lessons:** 🦉 [Learn about vulnerability in an interactive lesson of Snyk Learn.](https://learn.snyk.io/?loc=fix-pr) --- build/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/Dockerfile b/build/Dockerfile index 8a7eaf299..ae88dfc8f 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -6,7 +6,7 @@ COPY src . ARG CORSO_BUILD_LDFLAGS="" RUN go build -o corso -ldflags "$CORSO_BUILD_LDFLAGS" -FROM alpine:3.16 +FROM alpine:3.17 LABEL org.opencontainers.image.title="Corso" LABEL org.opencontainers.image.description="Free, Secure, and Open-Source Backup for Microsoft 365" From a7fd90b2f8b28991cbc20c5e18daa9adc689411f Mon Sep 17 00:00:00 2001 From: Keepers Date: Wed, 8 Feb 2023 13:50:55 -0700 Subject: [PATCH 29/45] add fault/clues to kopia, pt 1 (#2365) ## Description Begins adding fault and clues to kopia. Part 1 just covers the surface kopia/Wrapper, and all the upstream packages that call it. This also replaces the progress multierr with a fault errs. RestoreMultipleItems changes from always handling errors in failFast mode to checking for failFast configuraton, and handling bestEffort otherwise. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/kopia/upload.go | 14 +-- src/internal/kopia/upload_test.go | 43 +++++++-- src/internal/kopia/wrapper.go | 88 +++++++++++-------- src/internal/kopia/wrapper_test.go | 38 ++++---- src/internal/operations/backup.go | 20 +++-- .../operations/backup_integration_test.go | 3 +- src/internal/operations/backup_test.go | 14 +-- src/internal/operations/common.go | 6 +- src/internal/operations/manifests.go | 7 +- src/internal/operations/manifests_test.go | 9 +- src/internal/operations/restore.go | 5 +- src/internal/streamstore/streamstore.go | 8 +- src/internal/streamstore/streamstore_test.go | 6 +- src/pkg/repository/repository.go | 3 +- 14 files changed, 164 insertions(+), 100 deletions(-) diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index 7ada05013..a4ae1fbcc 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -15,6 +15,7 @@ import ( "time" "unsafe" + "github.com/alcionai/clues" "github.com/hashicorp/go-multierror" "github.com/kopia/kopia/fs" "github.com/kopia/kopia/fs/virtualfs" @@ -25,6 +26,7 @@ import ( "github.com/alcionai/corso/src/internal/data" D "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -137,7 +139,7 @@ type corsoProgress struct { toMerge map[string]path.Path mu sync.RWMutex totalBytes int64 - errs *multierror.Error + errs *fault.Errors } // Kopia interface function used as a callback when kopia finishes processing a @@ -167,11 +169,11 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) { // never had to materialize their details in-memory. if d.info == nil { if d.prevPath == nil { - cp.errs = multierror.Append(cp.errs, errors.Errorf( - "item sourced from previous backup with no previous path. Service: %s, Category: %s", - d.repoPath.Service().String(), - d.repoPath.Category().String(), - )) + cp.errs.Add(clues.New("item sourced from previous backup with no previous path"). + WithAll( + "service", d.repoPath.Service().String(), + "category", d.repoPath.Category().String(), + )) return } diff --git a/src/internal/kopia/upload_test.go b/src/internal/kopia/upload_test.go index 55877db17..e284d4d67 100644 --- a/src/internal/kopia/upload_test.go +++ b/src/internal/kopia/upload_test.go @@ -22,6 +22,7 @@ import ( "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" ) @@ -456,6 +457,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() { UploadProgress: &snapshotfs.NullUploadProgress{}, deets: bd, pending: map[string]*itemDetails{}, + errs: fault.New(true), } ci := test.cachedItems(suite.targetFileName, suite.targetFilePath) @@ -503,6 +505,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() { UploadProgress: &snapshotfs.NullUploadProgress{}, deets: bd, pending: map[string]*itemDetails{}, + errs: fault.New(true), } for k, v := range cachedItems { @@ -518,7 +521,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() { assert.Empty(t, cp.pending) assert.Empty(t, bd.Details().Entries) - assert.Error(t, cp.errs.ErrorOrNil()) + assert.Error(t, cp.errs.Err()) } func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() { @@ -533,6 +536,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() { deets: bd, pending: map[string]*itemDetails{}, toMerge: map[string]path.Path{}, + errs: fault.New(true), } deets := &itemDetails{info: &details.ItemInfo{}, repoPath: suite.targetFilePath} @@ -605,6 +609,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch deets: bd, pending: map[string]*itemDetails{}, toMerge: map[string]path.Path{}, + errs: fault.New(true), } deets := &itemDetails{ @@ -629,6 +634,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() { UploadProgress: &snapshotfs.NullUploadProgress{}, deets: bd, pending: map[string]*itemDetails{}, + errs: fault.New(true), } ci := test.cachedItems(suite.targetFileName, suite.targetFilePath) @@ -681,7 +687,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() { user2Encoded: 42, } - progress := &corsoProgress{pending: map[string]*itemDetails{}} + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + errs: fault.New(true), + } collections := []data.BackupCollection{ mockconnector.NewMockExchangeCollection( @@ -791,7 +800,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory() for _, test := range table { suite.T().Run(test.name, func(t *testing.T) { - progress := &corsoProgress{pending: map[string]*itemDetails{}} + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + errs: fault.New(true), + } dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress) require.NoError(t, err) @@ -971,7 +983,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() { ctx, flush := tester.NewContext() defer flush() - progress := &corsoProgress{pending: map[string]*itemDetails{}} + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + errs: fault.New(true), + } cols := []data.BackupCollection{} for _, s := range test.states { @@ -1249,7 +1264,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() { ctx, flush := tester.NewContext() defer flush() - progress := &corsoProgress{pending: map[string]*itemDetails{}} + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + errs: fault.New(true), + } msw := &mockSnapshotWalker{ snapshotRoot: getBaseSnapshot(), } @@ -1951,7 +1969,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto ctx, flush := tester.NewContext() defer flush() - progress := &corsoProgress{pending: map[string]*itemDetails{}} + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + errs: fault.New(true), + } msw := &mockSnapshotWalker{ snapshotRoot: getBaseSnapshot(), } @@ -2097,7 +2118,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre }, ) - progress := &corsoProgress{pending: map[string]*itemDetails{}} + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + errs: fault.New(true), + } mc := mockconnector.NewMockExchangeCollection(suite.testPath, 1) mc.PrevPath = mc.FullPath() mc.ColState = data.DeletedState @@ -2346,7 +2370,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt }, ) - progress := &corsoProgress{pending: map[string]*itemDetails{}} + progress := &corsoProgress{ + pending: map[string]*itemDetails{}, + errs: fault.New(true), + } mc := mockconnector.NewMockExchangeCollection(inboxPath, 1) mc.PrevPath = mc.FullPath() diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index db2f8e645..d49241d36 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/hashicorp/go-multierror" + "github.com/alcionai/clues" "github.com/kopia/kopia/fs" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/manifest" @@ -17,6 +17,7 @@ import ( D "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -101,7 +102,11 @@ func (w *Wrapper) Close(ctx context.Context) error { err := w.c.Close(ctx) w.c = nil - return errors.Wrap(err, "closing Wrapper") + if err != nil { + return clues.Wrap(err, "closing Wrapper").WithClues(ctx) + } + + return nil } type IncrementalBase struct { @@ -122,9 +127,10 @@ func (w Wrapper) BackupCollections( globalExcludeSet map[string]struct{}, tags map[string]string, buildTreeWithBase bool, + errs *fault.Errors, ) (*BackupStats, *details.Builder, map[string]path.Path, error) { if w.c == nil { - return nil, nil, nil, errNotConnected + return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx) } ctx, end := D.Span(ctx, "kopia:backupCollections") @@ -138,6 +144,7 @@ func (w Wrapper) BackupCollections( pending: map[string]*itemDetails{}, deets: &details.Builder{}, toMerge: map[string]path.Path{}, + errs: errs, } // When running an incremental backup, we need to pass the prior @@ -165,14 +172,12 @@ func (w Wrapper) BackupCollections( previousSnapshots, dirTree, tags, - progress, - ) + progress) if err != nil { - combinedErrs := multierror.Append(nil, err, progress.errs) - return nil, nil, nil, combinedErrs.ErrorOrNil() + return nil, nil, nil, err } - return s, progress.deets, progress.toMerge, progress.errs.ErrorOrNil() + return s, progress.deets, progress.toMerge, progress.errs.Err() } func (w Wrapper) makeSnapshotWithRoot( @@ -197,9 +202,7 @@ func (w Wrapper) makeSnapshotWithRoot( logger.Ctx(ctx).Infow( "using snapshots for kopia-assisted incrementals", - "snapshot_ids", - snapIDs, - ) + "snapshot_ids", snapIDs) tags := map[string]string{} @@ -224,6 +227,8 @@ func (w Wrapper) makeSnapshotWithRoot( OnUpload: bc.Count, }, func(innerCtx context.Context, rw repo.RepositoryWriter) error { + log := logger.Ctx(innerCtx) + si := snapshot.SourceInfo{ Host: corsoHost, UserName: corsoUser, @@ -240,8 +245,8 @@ func (w Wrapper) makeSnapshotWithRoot( } policyTree, err := policy.TreeForSourceWithOverride(innerCtx, w.c, si, errPolicy) if err != nil { - err = errors.Wrap(err, "get policy tree") - logger.Ctx(innerCtx).Errorw("kopia backup", err) + err = clues.Wrap(err, "get policy tree").WithClues(ctx) + log.With("err", err).Errorw("building kopia backup", clues.InErr(err).Slice()...) return err } @@ -253,16 +258,16 @@ func (w Wrapper) makeSnapshotWithRoot( man, err = u.Upload(innerCtx, root, policyTree, si, prevSnaps...) if err != nil { - err = errors.Wrap(err, "uploading data") - logger.Ctx(innerCtx).Errorw("kopia backup", err) + err = clues.Wrap(err, "uploading data").WithClues(ctx) + log.With("err", err).Errorw("uploading kopia backup", clues.InErr(err).Slice()...) return err } man.Tags = tags if _, err := snapshot.SaveSnapshot(innerCtx, rw, man); err != nil { - err = errors.Wrap(err, "saving snapshot") - logger.Ctx(innerCtx).Errorw("kopia backup", err) + err = clues.Wrap(err, "saving snapshot").WithClues(ctx) + log.With("err", err).Errorw("persisting kopia backup snapshot", clues.InErr(err).Slice()...) return err } @@ -272,7 +277,7 @@ func (w Wrapper) makeSnapshotWithRoot( // Telling kopia to always flush may hide other errors if it fails while // flushing the write session (hence logging above). if err != nil { - return nil, errors.Wrap(err, "kopia backup") + return nil, clues.Wrap(err, "kopia backup") } res := manifestToStats(man, progress, bc) @@ -286,12 +291,15 @@ func (w Wrapper) getSnapshotRoot( ) (fs.Entry, error) { man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID)) if err != nil { - return nil, errors.Wrap(err, "getting snapshot handle") + return nil, clues.Wrap(err, "getting snapshot handle").WithClues(ctx) } rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man) + if err != nil { + return nil, clues.Wrap(err, "getting root directory").WithClues(ctx) + } - return rootDirEntry, errors.Wrap(err, "getting root directory") + return rootDirEntry, nil } // getItemStream looks up the item at the given path starting from snapshotRoot. @@ -306,7 +314,7 @@ func getItemStream( bcounter ByteCounter, ) (data.Stream, error) { if itemPath == nil { - return nil, errors.WithStack(errNoRestorePath) + return nil, clues.Stack(errNoRestorePath).WithClues(ctx) } // GetNestedEntry handles nil properly. @@ -317,15 +325,15 @@ func getItemStream( ) if err != nil { if strings.Contains(err.Error(), "entry not found") { - err = errors.Wrap(data.ErrNotFound, err.Error()) + err = clues.Stack(data.ErrNotFound, err).WithClues(ctx) } - return nil, errors.Wrap(err, "getting nested object handle") + return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx) } f, ok := e.(fs.File) if !ok { - return nil, errors.New("requested object is not a file") + return nil, clues.New("requested object is not a file").WithClues(ctx) } if bcounter != nil { @@ -334,12 +342,12 @@ func getItemStream( r, err := f.Open(ctx) if err != nil { - return nil, errors.Wrap(err, "opening file") + return nil, clues.Wrap(err, "opening file").WithClues(ctx) } decodedName, err := decodeElement(f.Name()) if err != nil { - return nil, errors.Wrap(err, "decoding file name") + return nil, clues.Wrap(err, "decoding file name").WithClues(ctx) } return &kopiaDataStream{ @@ -368,12 +376,13 @@ func (w Wrapper) RestoreMultipleItems( snapshotID string, paths []path.Path, bcounter ByteCounter, + errs *fault.Errors, ) ([]data.RestoreCollection, error) { ctx, end := D.Span(ctx, "kopia:restoreMultipleItems") defer end() if len(paths) == 0 { - return nil, errors.WithStack(errNoRestorePath) + return nil, clues.Stack(errNoRestorePath).WithClues(ctx) } snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID) @@ -381,22 +390,23 @@ func (w Wrapper) RestoreMultipleItems( return nil, err } - var ( - errs *multierror.Error - // Maps short ID of parent path to data collection for that folder. - cols = map[string]*kopiaDataCollection{} - ) + // Maps short ID of parent path to data collection for that folder. + cols := map[string]*kopiaDataCollection{} for _, itemPath := range paths { + if errs.Err() != nil { + return nil, errs.Err() + } + ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter) if err != nil { - errs = multierror.Append(errs, err) + errs.Add(err) continue } parentPath, err := itemPath.Dir() if err != nil { - errs = multierror.Append(errs, errors.Wrap(err, "making directory collection")) + errs.Add(clues.Wrap(err, "making directory collection").WithClues(ctx)) continue } @@ -414,7 +424,7 @@ func (w Wrapper) RestoreMultipleItems( res = append(res, c) } - return res, errs.ErrorOrNil() + return res, errs.Err() } // DeleteSnapshot removes the provided manifest from kopia. @@ -425,7 +435,7 @@ func (w Wrapper) DeleteSnapshot( mid := manifest.ID(snapshotID) if len(mid) == 0 { - return errors.New("attempt to delete unidentified snapshot") + return clues.New("attempt to delete unidentified snapshot").WithClues(ctx) } err := repo.WriteSession( @@ -434,7 +444,7 @@ func (w Wrapper) DeleteSnapshot( repo.WriteSessionOptions{Purpose: "KopiaWrapperBackupDeletion"}, func(innerCtx context.Context, rw repo.RepositoryWriter) error { if err := rw.DeleteManifest(ctx, mid); err != nil { - return errors.Wrap(err, "deleting snapshot") + return clues.Wrap(err, "deleting snapshot").WithClues(ctx) } return nil @@ -443,7 +453,7 @@ func (w Wrapper) DeleteSnapshot( // Telling kopia to always flush may hide other errors if it fails while // flushing the write session (hence logging above). if err != nil { - return errors.Wrap(err, "kopia deleting backup manifest") + return clues.Wrap(err, "deleting backup manifest").WithClues(ctx) } return nil @@ -464,7 +474,7 @@ func (w Wrapper) FetchPrevSnapshotManifests( tags map[string]string, ) ([]*ManifestEntry, error) { if w.c == nil { - return nil, errors.WithStack(errNotConnected) + return nil, clues.Stack(errNotConnected).WithClues(ctx) } return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 03f42bcc3..e754ff9cf 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -19,6 +19,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/mockconnector" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -269,7 +270,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() { nil, tags, true, - ) + fault.New(true)) assert.NoError(t, err) assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files") @@ -357,7 +358,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { nil, tags, true, - ) + fault.New(true)) require.NoError(t, err) require.NoError(t, k.Compression(ctx, "gzip")) @@ -374,8 +375,8 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { fp1, fp2, }, - nil) - + nil, + fault.New(true)) require.NoError(t, err) assert.Equal(t, 2, len(result)) @@ -475,7 +476,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { nil, tags, true, - ) + fault.New(true)) require.NoError(t, err) assert.Equal(t, 0, stats.ErrorCount) @@ -496,7 +497,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { string(stats.SnapshotID), []path.Path{failedPath}, &ic, - ) + fault.New(true)) // Files that had an error shouldn't make a dir entry in kopia. If they do we // may run into kopia-assisted incrementals issues because only mod time and // not file size is checked for StreamingFiles. @@ -536,7 +537,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() nil, nil, true, - ) + fault.New(true)) require.NoError(t, err) assert.Equal(t, BackupStats{}, *s) @@ -695,7 +696,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { nil, tags, false, - ) + fault.New(true)) require.NoError(t, err) require.Equal(t, stats.ErrorCount, 0) require.Equal(t, stats.TotalFileCount, expectedFiles) @@ -825,7 +826,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { excluded, tags, true, - ) + fault.New(true)) require.NoError(t, err) assert.Equal(t, test.expectedCachedItems, stats.CachedFileCount) assert.Equal(t, test.expectedUncachedItems, stats.UncachedFileCount) @@ -845,7 +846,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() { suite.files[suite.testPath1.String()][0].itemPath, }, &ic, - ) + fault.New(true)) test.restoreCheck(t, err) }) } @@ -902,7 +903,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() { suite.testPath1, suite.files[suite.testPath2.String()][0].itemPath, }, - expectedCollections: 2, + expectedCollections: 0, expectedErr: assert.Error, }, { @@ -912,7 +913,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() { doesntExist, suite.files[suite.testPath2.String()][0].itemPath, }, - expectedCollections: 2, + expectedCollections: 0, expectedErr: assert.Error, }, } @@ -939,9 +940,14 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() { suite.ctx, string(suite.snapshotID), test.inputPaths, - &ic) + &ic, + fault.New(true)) test.expectedErr(t, err) + if err != nil { + return + } + assert.Len(t, result, test.expectedCollections) assert.Less(t, int64(0), ic.i) testForFiles(t, expected, result) @@ -981,7 +987,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems_Errors() suite.ctx, test.snapshotID, test.paths, - nil) + nil, + fault.New(true)) assert.Error(t, err) assert.Empty(t, c) }) @@ -1001,7 +1008,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestDeleteSnapshot() { suite.ctx, string(suite.snapshotID), []path.Path{itemPath}, - &ic) + &ic, + fault.New(true)) assert.Error(t, err, "snapshot should be deleted") assert.Empty(t, c) assert.Zero(t, ic.i) diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index a3fde4cb8..a82c0d30c 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -26,6 +26,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" @@ -100,7 +101,7 @@ type backupStats struct { } type detailsWriter interface { - WriteBackupDetails(context.Context, *details.Details) (string, error) + WriteBackupDetails(context.Context, *details.Details, *fault.Errors) (string, error) } // --------------------------------------------------------------------------- @@ -259,7 +260,8 @@ func (op *BackupOperation) do( cs, excludes, backupID, - op.incremental && canUseMetaData) + op.incremental && canUseMetaData, + op.Errors) if err != nil { return nil, errors.Wrap(err, "persisting collection backups") } @@ -272,7 +274,8 @@ func (op *BackupOperation) do( detailsStore, mans, toMerge, - deets) + deets, + op.Errors) if err != nil { return nil, errors.Wrap(err, "merging details") } @@ -335,6 +338,7 @@ type backuper interface { excluded map[string]struct{}, tags map[string]string, buildTreeWithBase bool, + errs *fault.Errors, ) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) } @@ -393,6 +397,7 @@ func consumeBackupDataCollections( excludes map[string]struct{}, backupID model.StableID, isIncremental bool, + errs *fault.Errors, ) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) { complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data")) defer func() { @@ -460,7 +465,8 @@ func consumeBackupDataCollections( // OneDrive replace this with `excludes`. nil, tags, - isIncremental) + isIncremental, + errs) if err != nil { if kopiaStats == nil { return nil, nil, nil, err @@ -500,6 +506,7 @@ func mergeDetails( mans []*kopia.ManifestEntry, shortRefsFromPrevBackup map[string]path.Path, deets *details.Builder, + errs *fault.Errors, ) error { // Don't bother loading any of the base details if there's nothing we need to // merge. @@ -529,7 +536,8 @@ func mergeDetails( ctx, model.StableID(bID), ms, - detailsStore) + detailsStore, + errs) if err != nil { return clues.New("fetching base details for backup").WithClues(mctx) } @@ -650,7 +658,7 @@ func (op *BackupOperation) createBackupModels( return clues.New("no backup details to record").WithClues(ctx) } - detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails) + detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails, op.Errors) if err != nil { return clues.Wrap(err, "creating backupDetails model").WithClues(ctx) } diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index 277e5a40d..83033ab3b 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -31,6 +31,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/store" @@ -250,7 +251,7 @@ func checkMetadataFilesExist( pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName) } - cols, err := kw.RestoreMultipleItems(ctx, bup.SnapshotID, paths, nil) + cols, err := kw.RestoreMultipleItems(ctx, bup.SnapshotID, paths, nil, fault.New(true)) assert.NoError(t, err) for _, col := range cols { diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index 6e9afda8a..12ebcef6c 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -23,6 +23,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/store" @@ -62,6 +63,7 @@ func (mr *mockRestorer) RestoreMultipleItems( snapshotID string, paths []path.Path, bc kopia.ByteCounter, + errs *fault.Errors, ) ([]data.RestoreCollection, error) { mr.gotPaths = append(mr.gotPaths, paths...) @@ -98,6 +100,7 @@ func (mbu mockBackuper) BackupCollections( excluded map[string]struct{}, tags map[string]string, buildTreeWithBase bool, + errs *fault.Errors, ) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) { if mbu.checkFunc != nil { mbu.checkFunc(bases, cs, tags, buildTreeWithBase) @@ -115,6 +118,7 @@ type mockDetailsReader struct { func (mdr mockDetailsReader) ReadBackupDetails( ctx context.Context, detailsID string, + errs *fault.Errors, ) (*details.Details, error) { r := mdr.entries[detailsID] @@ -578,7 +582,7 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat nil, model.StableID(""), true, - ) + fault.New(true)) }) } } @@ -1060,7 +1064,6 @@ func (suite *BackupOpSuite) TestBackupOperation_MergeBackupDetails_AddsItems() { mdr := mockDetailsReader{entries: test.populatedDetails} w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}} - deets := details.Builder{} err := mergeDetails( @@ -1070,8 +1073,7 @@ func (suite *BackupOpSuite) TestBackupOperation_MergeBackupDetails_AddsItems() { test.inputMans, test.inputShortRefsFromPrevBackup, &deets, - ) - + fault.New(true)) test.errCheck(t, err) if err != nil { return @@ -1168,7 +1170,6 @@ func (suite *BackupOpSuite) TestBackupOperation_MergeBackupDetails_AddsFolders() mdr := mockDetailsReader{entries: populatedDetails} w := &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}} - deets := details.Builder{} err := mergeDetails( @@ -1178,8 +1179,7 @@ func (suite *BackupOpSuite) TestBackupOperation_MergeBackupDetails_AddsFolders() inputMans, inputToMerge, &deets, - ) - + fault.New(true)) assert.NoError(t, err) assert.ElementsMatch(t, expectedEntries, deets.Details().Entries) } diff --git a/src/internal/operations/common.go b/src/internal/operations/common.go index addbeb5ac..18266a734 100644 --- a/src/internal/operations/common.go +++ b/src/internal/operations/common.go @@ -8,11 +8,12 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/store" ) type detailsReader interface { - ReadBackupDetails(ctx context.Context, detailsID string) (*details.Details, error) + ReadBackupDetails(ctx context.Context, detailsID string, errs *fault.Errors) (*details.Details, error) } func getBackupAndDetailsFromID( @@ -20,13 +21,14 @@ func getBackupAndDetailsFromID( backupID model.StableID, ms *store.Wrapper, detailsStore detailsReader, + errs *fault.Errors, ) (*backup.Backup, *details.Details, error) { dID, bup, err := ms.GetDetailsIDFromBackupID(ctx, backupID) if err != nil { return nil, nil, errors.Wrap(err, "getting backup details ID") } - deets, err := detailsStore.ReadBackupDetails(ctx, dID) + deets, err := detailsStore.ReadBackupDetails(ctx, dID, errs) if err != nil { return nil, nil, errors.Wrap(err, "getting backup details data") } diff --git a/src/internal/operations/manifests.go b/src/internal/operations/manifests.go index 86ad7f1b5..a1d7997ed 100644 --- a/src/internal/operations/manifests.go +++ b/src/internal/operations/manifests.go @@ -45,7 +45,7 @@ func produceManifestsAndMetadata( reasons []kopia.Reason, tenantID string, getMetadata bool, - errs fault.Adder, + errs *fault.Errors, ) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) { var ( metadataFiles = graph.AllMetadataFileNames() @@ -117,7 +117,7 @@ func produceManifestsAndMetadata( return ms, nil, false, nil } - colls, err := collectMetadata(mctx, mr, man, metadataFiles, tenantID) + colls, err := collectMetadata(mctx, mr, man, metadataFiles, tenantID, errs) if err != nil && !errors.Is(err, data.ErrNotFound) { // prior metadata isn't guaranteed to exist. // if it doesn't, we'll just have to do a @@ -183,6 +183,7 @@ func collectMetadata( man *kopia.ManifestEntry, fileNames []string, tenantID string, + errs *fault.Errors, ) ([]data.RestoreCollection, error) { paths := []path.Path{} @@ -206,7 +207,7 @@ func collectMetadata( } } - dcs, err := r.RestoreMultipleItems(ctx, string(man.ID), paths, nil) + dcs, err := r.RestoreMultipleItems(ctx, string(man.ID), paths, nil, errs) if err != nil { // Restore is best-effort and we want to keep it that way since we want to // return as much metadata as we can to reduce the work we'll need to do. diff --git a/src/internal/operations/manifests_test.go b/src/internal/operations/manifests_test.go index e1be7df54..d30f72726 100644 --- a/src/internal/operations/manifests_test.go +++ b/src/internal/operations/manifests_test.go @@ -14,6 +14,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/fault/mock" "github.com/alcionai/corso/src/pkg/path" ) @@ -226,7 +227,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() { Reasons: test.reasons, } - _, err := collectMetadata(ctx, &mr, man, test.fileNames, tid) + _, err := collectMetadata(ctx, &mr, man, test.fileNames, tid, fault.New(true)) assert.ErrorIs(t, err, test.expectErr) }) } @@ -637,8 +638,6 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { ctx, flush := tester.NewContext() defer flush() - ma := mock.NewAdder() - mans, dcs, b, err := produceManifestsAndMetadata( ctx, &test.mr, @@ -646,7 +645,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { test.reasons, tid, test.getMeta, - ma) + fault.New(true)) test.assertErr(t, err) test.assertB(t, b) @@ -936,7 +935,7 @@ func (suite *BackupManifestSuite) TestBackupOperation_CollectMetadata() { mr := &mockRestorer{} - _, err := collectMetadata(ctx, mr, test.inputMan, test.inputFiles, tenant) + _, err := collectMetadata(ctx, mr, test.inputMan, test.inputFiles, tenant, fault.New(true)) assert.NoError(t, err) checkPaths(t, test.expected, mr.gotPaths) diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index d29c0bf40..51f69cbc1 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -104,6 +104,7 @@ type restorer interface { snapshotID string, paths []path.Path, bc kopia.ByteCounter, + errs *fault.Errors, ) ([]data.RestoreCollection, error) } @@ -197,7 +198,7 @@ func (op *RestoreOperation) do( op.BackupID, op.store, detailsStore, - ) + op.Errors) if err != nil { return nil, errors.Wrap(err, "getting backup and details") } @@ -228,7 +229,7 @@ func (op *RestoreOperation) do( defer closer() defer close(kopiaComplete) - dcs, err := op.kopia.RestoreMultipleItems(ctx, bup.SnapshotID, paths, opStats.bytesRead) + dcs, err := op.kopia.RestoreMultipleItems(ctx, bup.SnapshotID, paths, opStats.bytesRead, op.Errors) if err != nil { return nil, errors.Wrap(err, "retrieving collections from repository") } diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index a97cace98..a0d55136d 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -15,6 +15,7 @@ import ( "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" ) @@ -46,6 +47,7 @@ const ( func (ss *streamStore) WriteBackupDetails( ctx context.Context, backupDetails *details.Details, + errs *fault.Errors, ) (string, error) { // construct the path of the container for the `details` item p, err := path.Builder{}. @@ -79,7 +81,8 @@ func (ss *streamStore) WriteBackupDetails( []data.BackupCollection{dc}, nil, nil, - false) + false, + errs) if err != nil { return "", errors.Wrap(err, "storing details in repository") } @@ -92,6 +95,7 @@ func (ss *streamStore) WriteBackupDetails( func (ss *streamStore) ReadBackupDetails( ctx context.Context, detailsID string, + errs *fault.Errors, ) (*details.Details, error) { // construct the path for the `details` item detailsPath, err := path.Builder{}. @@ -108,7 +112,7 @@ func (ss *streamStore) ReadBackupDetails( var bc stats.ByteCounter - dcs, err := ss.kw.RestoreMultipleItems(ctx, detailsID, []path.Path{detailsPath}, &bc) + dcs, err := ss.kw.RestoreMultipleItems(ctx, detailsID, []path.Path{detailsPath}, &bc, errs) if err != nil { return nil, errors.Wrap(err, "retrieving backup details data") } diff --git a/src/internal/streamstore/streamstore_test.go b/src/internal/streamstore/streamstore_test.go index c3259606e..cc3309a11 100644 --- a/src/internal/streamstore/streamstore_test.go +++ b/src/internal/streamstore/streamstore_test.go @@ -10,6 +10,7 @@ import ( "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" ) @@ -51,14 +52,13 @@ func (suite *StreamStoreIntegrationSuite) TestDetails() { }) deets := deetsBuilder.Details() - ss := New(kw, "tenant", path.ExchangeService) - id, err := ss.WriteBackupDetails(ctx, deets) + id, err := ss.WriteBackupDetails(ctx, deets, fault.New(true)) require.NoError(t, err) require.NotNil(t, id) - readDeets, err := ss.ReadBackupDetails(ctx, id) + readDeets, err := ss.ReadBackupDetails(ctx, id, fault.New(true)) require.NoError(t, err) require.NotNil(t, readDeets) diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index 429b0e7a7..518685134 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -323,7 +323,8 @@ func (r repository) BackupDetails( deets, err := streamstore.New( r.dataLayer, r.Account.ID(), - b.Selector.PathService()).ReadBackupDetails(ctx, dID) + b.Selector.PathService(), + ).ReadBackupDetails(ctx, dID, errs) if err != nil { return nil, nil, errs.Fail(err) } From 47f5ca1d95fde3e0ae99c1758b5e609b0b52022a Mon Sep 17 00:00:00 2001 From: Keepers Date: Wed, 8 Feb 2023 14:13:45 -0700 Subject: [PATCH 30/45] add pathEquals to filter set (#2395) ## Description Adds a pathEquals filter, and a corresponding option for setting pathEquals in the scope options. This will enable tests to restrict calendar lookups to only the primary calendar folder, instead of including all children underneath. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :robot: Test ## Issue(s) * #2388 ## Test Plan - [x] :zap: Unit test --- CHANGELOG.md | 3 +- src/pkg/filters/filters.go | 58 ++++++++++++++++++++++++++++ src/pkg/filters/filters_test.go | 67 +++++++++++++++++++++++++++++++++ src/pkg/selectors/selectors.go | 14 +++++++ 4 files changed, 141 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cccc745a7..2765f90c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,9 +28,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add versions to backups so that we can understand/handle older backup formats ### Fixed -- Backing up a calendar that has the same name as the default calendar + - Added additional backoff-retry to all OneDrive queries. - Users with `null` userType values are no longer excluded from user queries. +- Fix bug when backing up a calendar that has the same name as the default calendar ### Known Issues diff --git a/src/pkg/filters/filters.go b/src/pkg/filters/filters.go index cc08fd780..4998c6684 100644 --- a/src/pkg/filters/filters.go +++ b/src/pkg/filters/filters.go @@ -36,6 +36,8 @@ const ( TargetPathContains // "baz" equals any complete element suffix of "foo/bar/baz" TargetPathSuffix + // "foo/bar/baz" equals the complete path "foo/bar/baz" + TargetPathEquals ) func norm(s string) string { @@ -295,6 +297,44 @@ func NotPathSuffix(targets []string) Filter { return newSliceFilter(TargetPathSuffix, targets, tgts, true) } +// PathEquals creates a filter where Compare(v) is true if +// target.Equals(v) && +// split(target)[i].Equals(split(v)[i]) for _all_ i in 0..len(target)-1 +// ex: target "foo" returns true for inputs "/foo/", "/foo", and "foo/" +// but false for "/foo/bar", "bar/foo/", and "/foobar/" +// +// Unlike single-target filters, this filter accepts a +// slice of targets, will compare an input against each target +// independently, and returns true if one or more of the +// comparisons succeed. +func PathEquals(targets []string) Filter { + tgts := make([]string, len(targets)) + for i := range targets { + tgts[i] = normPathElem(targets[i]) + } + + return newSliceFilter(TargetPathEquals, targets, tgts, false) +} + +// NotPathEquals creates a filter where Compare(v) is true if +// !target.Equals(v) || +// !split(target)[i].Equals(split(v)[i]) for _all_ i in 0..len(target)-1 +// ex: target "foo" returns true "/foo/bar", "bar/foo/", and "/foobar/" +// but false for for inputs "/foo/", "/foo", and "foo/" +// +// Unlike single-target filters, this filter accepts a +// slice of targets, will compare an input against each target +// independently, and returns true if one or more of the +// comparisons succeed. +func NotPathEquals(targets []string) Filter { + tgts := make([]string, len(targets)) + for i := range targets { + tgts[i] = normPathElem(targets[i]) + } + + return newSliceFilter(TargetPathEquals, targets, tgts, true) +} + // newFilter is the standard filter constructor. func newFilter(c comparator, target string, negate bool) Filter { return Filter{ @@ -367,6 +407,9 @@ func (f Filter) Compare(input string) bool { case TargetPathSuffix: cmp = pathSuffix hasSlice = true + case TargetPathEquals: + cmp = pathEquals + hasSlice = true case Passes: return true case Fails: @@ -471,6 +514,20 @@ func pathSuffix(target, input string) bool { return strings.HasSuffix(normPathElem(input), target) } +// true if target is an _exact_ match on the input, excluding +// path delmiters. Element complete means we do not succeed +// on partial element matches (ex: "/bar" does not match +// "/foobar"). +// +// As a precondition, assumes the target value has been +// passed through normPathElem(). +// +// The input is assumed to be the complete path that may +// match the target. +func pathEquals(target, input string) bool { + return normPathElem(input) == target +} + // ---------------------------------------------------------------------------------------------------- // Helpers // ---------------------------------------------------------------------------------------------------- @@ -487,6 +544,7 @@ var prefixString = map[comparator]string{ TargetPathPrefix: "pathPfx:", TargetPathContains: "pathCont:", TargetPathSuffix: "pathSfx:", + TargetPathEquals: "pathEq:", } func (f Filter) String() string { diff --git a/src/pkg/filters/filters_test.go b/src/pkg/filters/filters_test.go index d6b361db4..0a1585a42 100644 --- a/src/pkg/filters/filters_test.go +++ b/src/pkg/filters/filters_test.go @@ -461,3 +461,70 @@ func (suite *FiltersSuite) TestPathSuffix_NormalizedTargets() { }) } } + +func (suite *FiltersSuite) TestPathEquals() { + table := []struct { + name string + targets []string + input string + expectF assert.BoolAssertionFunc + expectNF assert.BoolAssertionFunc + }{ + {"Exact - same case", []string{"fA"}, "/fA", assert.True, assert.False}, + {"Exact - different case", []string{"fa"}, "/fA", assert.True, assert.False}, + {"Exact - multiple folders", []string{"fA/fB"}, "/fA/fB", assert.True, assert.False}, + {"Exact - target variations - none", []string{"fA"}, "/fA", assert.True, assert.False}, + {"Exact - target variations - prefix", []string{"/fA"}, "/fA", assert.True, assert.False}, + {"Exact - target variations - suffix", []string{"fA/"}, "/fA", assert.True, assert.False}, + {"Exact - target variations - both", []string{"/fA/"}, "/fA", assert.True, assert.False}, + {"Exact - input variations - none", []string{"fA"}, "fA", assert.True, assert.False}, + {"Exact - input variations - prefix", []string{"fA"}, "/fA", assert.True, assert.False}, + {"Exact - input variations - suffix", []string{"fA"}, "fA/", assert.True, assert.False}, + {"Exact - input variations - both", []string{"fA"}, "/fA/", assert.True, assert.False}, + {"Partial match", []string{"f"}, "/fA/", assert.False, assert.True}, + {"Suffix - same case", []string{"fB"}, "/fA/fB", assert.False, assert.True}, + {"Suffix - different case", []string{"fb"}, "/fA/fB", assert.False, assert.True}, + {"Prefix - same case", []string{"fA"}, "/fA/fB", assert.False, assert.True}, + {"Prefix - different case", []string{"fa"}, "/fA/fB", assert.False, assert.True}, + {"Contains - same case", []string{"fB"}, "/fA/fB/fC", assert.False, assert.True}, + {"Contains - different case", []string{"fb"}, "/fA/fB/fC", assert.False, assert.True}, + {"Slice - one matches", []string{"foo", "/fA/fb", "fb"}, "/fA/fb", assert.True, assert.True}, + {"Slice - none match", []string{"foo", "fa/f", "f"}, "/fA/fb", assert.False, assert.True}, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + f := filters.PathEquals(test.targets) + nf := filters.NotPathEquals(test.targets) + + test.expectF(t, f.Compare(test.input), "filter") + test.expectNF(t, nf.Compare(test.input), "negated filter") + }) + } +} + +func (suite *FiltersSuite) TestPathEquals_NormalizedTargets() { + table := []struct { + name string + targets []string + expect []string + }{ + {"Single - no slash", []string{"fA"}, []string{"/fA/"}}, + {"Single - pre slash", []string{"/fA"}, []string{"/fA/"}}, + {"Single - suff slash", []string{"fA/"}, []string{"/fA/"}}, + {"Single - both slashes", []string{"/fA/"}, []string{"/fA/"}}, + {"Multipath - no slash", []string{"fA/fB"}, []string{"/fA/fB/"}}, + {"Multipath - pre slash", []string{"/fA/fB"}, []string{"/fA/fB/"}}, + {"Multipath - suff slash", []string{"fA/fB/"}, []string{"/fA/fB/"}}, + {"Multipath - both slashes", []string{"/fA/fB/"}, []string{"/fA/fB/"}}, + {"Multi input - no slash", []string{"fA", "fB"}, []string{"/fA/", "/fB/"}}, + {"Multi input - pre slash", []string{"/fA", "/fB"}, []string{"/fA/", "/fB/"}}, + {"Multi input - suff slash", []string{"fA/", "fB/"}, []string{"/fA/", "/fB/"}}, + {"Multi input - both slashes", []string{"/fA/", "/fB/"}, []string{"/fA/", "/fB/"}}, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + f := filters.PathEquals(test.targets) + assert.Equal(t, test.expect, f.NormalizedTargets) + }) + } +} diff --git a/src/pkg/selectors/selectors.go b/src/pkg/selectors/selectors.go index 8a76b8f38..8a9c02337 100644 --- a/src/pkg/selectors/selectors.go +++ b/src/pkg/selectors/selectors.go @@ -343,6 +343,7 @@ type scopeConfig struct { usePathFilter bool usePrefixFilter bool useSuffixFilter bool + useEqualsFilter bool } type option func(*scopeConfig) @@ -371,6 +372,15 @@ func SuffixMatch() option { } } +// ExactMatch ensures the selector uses an Equals comparator, instead +// of contains. Will not override a default Any() or None() +// comparator. +func ExactMatch() option { + return func(sc *scopeConfig) { + sc.useEqualsFilter = true + } +} + // pathComparator is an internal-facing option. It is assumed that scope // constructors will provide the pathComparator option whenever a folder- // level scope (ie, a scope that compares path hierarchies) is created. @@ -433,6 +443,10 @@ func filterize(sc scopeConfig, s ...string) filters.Filter { } if sc.usePathFilter { + if sc.useEqualsFilter { + return filters.PathEquals(s) + } + if sc.usePrefixFilter { return filters.PathPrefix(s) } From 129d6b0b0c2d62b0cf6523a951f9f3b7dc992629 Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Wed, 8 Feb 2023 14:05:38 -0800 Subject: [PATCH 31/45] Add Fetch() to RestoreCollection (#2434) ## Description Add a function to fetch a file from the collection synchronously. This will help avoid data dependencies on the restore path created by splitting item information across multiple kopia files Fetch function is currently unoptimized, though deeper analysis of memory footprint should be done before changing Viewing by commit will help reduce chaff from updating tests to comply with the new interface ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * #1535 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/cmd/factory/impl/common.go | 2 +- .../exchange/data_collections_test.go | 8 +- .../connector/graph_connector_helper_test.go | 8 +- .../mockconnector/mock_data_collection.go | 11 +- .../connector/onedrive/collections_test.go | 6 +- src/internal/data/data_collection.go | 4 + src/internal/kopia/data_collection.go | 29 ++- src/internal/kopia/data_collection_test.go | 175 ++++++++++++++++++ src/internal/kopia/wrapper.go | 8 +- .../operations/backup_integration_test.go | 2 +- src/internal/operations/manifests_test.go | 53 ++++-- src/internal/operations/restore_test.go | 6 +- 12 files changed, 275 insertions(+), 37 deletions(-) diff --git a/src/cmd/factory/impl/common.go b/src/cmd/factory/impl/common.go index 2279c71a3..45d2b7a18 100644 --- a/src/cmd/factory/impl/common.go +++ b/src/cmd/factory/impl/common.go @@ -175,7 +175,7 @@ func buildCollections( mc.Data[i] = c.items[i].data } - collections = append(collections, mc) + collections = append(collections, data.NotFoundRestoreCollection{Collection: mc}) } return collections, nil diff --git a/src/internal/connector/exchange/data_collections_test.go b/src/internal/connector/exchange/data_collections_test.go index 42b24c9f3..3df22d030 100644 --- a/src/internal/connector/exchange/data_collections_test.go +++ b/src/internal/connector/exchange/data_collections_test.go @@ -174,7 +174,9 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() { ) require.NoError(t, err) - cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{coll}) + cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{ + data.NotFoundRestoreCollection{Collection: coll}, + }) test.expectError(t, err) emails := cdps[path.EmailCategory] @@ -345,7 +347,9 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() { require.NotNil(t, metadata, "collections contains a metadata collection") - cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{metadata}) + cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{ + data.NotFoundRestoreCollection{Collection: metadata}, + }) require.NoError(t, err) dps := cdps[test.scope.Category().PathType()] diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index 299509f96..ad6ea556e 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -989,7 +989,9 @@ func collectionsForInfo( } } - collections = append(collections, c) + collections = append(collections, data.NotFoundRestoreCollection{ + Collection: c, + }) kopiaEntries += len(info.items) } @@ -1034,7 +1036,9 @@ func collectionsForInfoVersion0( baseExpected[info.items[i].lookupKey] = info.items[i].data } - collections = append(collections, c) + collections = append(collections, data.NotFoundRestoreCollection{ + Collection: c, + }) totalItems += len(info.items) kopiaEntries += len(info.items) } diff --git a/src/internal/connector/mockconnector/mock_data_collection.go b/src/internal/connector/mockconnector/mock_data_collection.go index 8cd315d0d..0b643a699 100644 --- a/src/internal/connector/mockconnector/mock_data_collection.go +++ b/src/internal/connector/mockconnector/mock_data_collection.go @@ -27,13 +27,10 @@ type MockExchangeDataCollection struct { } var ( - // Needs to implement both backup and restore interfaces so we can use it in - // integration tests. - _ data.BackupCollection = &MockExchangeDataCollection{} - _ data.RestoreCollection = &MockExchangeDataCollection{} - _ data.Stream = &MockExchangeData{} - _ data.StreamInfo = &MockExchangeData{} - _ data.StreamSize = &MockExchangeData{} + _ data.BackupCollection = &MockExchangeDataCollection{} + _ data.Stream = &MockExchangeData{} + _ data.StreamInfo = &MockExchangeData{} + _ data.StreamSize = &MockExchangeData{} ) // NewMockExchangeDataCollection creates an data collection that will return the specified number of diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index 1fae8ee9a..3cc5dbcb5 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -996,7 +996,7 @@ func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() { ) require.NoError(t, err) - cols = append(cols, mc) + cols = append(cols, data.NotFoundRestoreCollection{Collection: mc}) } deltas, paths, err := deserializeMetadata(ctx, cols) @@ -1529,7 +1529,9 @@ func (suite *OneDriveCollectionsSuite) TestGet() { for _, baseCol := range cols { folderPath := baseCol.FullPath().String() if folderPath == metadataPath.String() { - deltas, paths, err := deserializeMetadata(ctx, []data.RestoreCollection{baseCol}) + deltas, paths, err := deserializeMetadata(ctx, []data.RestoreCollection{ + data.NotFoundRestoreCollection{Collection: baseCol}, + }) if !assert.NoError(t, err, "deserializing metadata") { continue } diff --git a/src/internal/data/data_collection.go b/src/internal/data/data_collection.go index 794b4bc16..beeffd3d7 100644 --- a/src/internal/data/data_collection.go +++ b/src/internal/data/data_collection.go @@ -68,6 +68,10 @@ type BackupCollection interface { // RestoreCollection is an extension of Collection that is used during restores. type RestoreCollection interface { Collection + // Fetch retrieves an item with the given name from the Collection if it + // exists. Items retrieved with Fetch may still appear in the channel returned + // by Items(). + Fetch(ctx context.Context, name string) (Stream, error) } // NotFoundRestoreCollection is a wrapper for a Collection that returns diff --git a/src/internal/kopia/data_collection.go b/src/internal/kopia/data_collection.go index 262ebd849..23ab824a5 100644 --- a/src/internal/kopia/data_collection.go +++ b/src/internal/kopia/data_collection.go @@ -1,8 +1,12 @@ package kopia import ( + "context" "io" + "github.com/alcionai/clues" + "github.com/kopia/kopia/fs" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/path" ) @@ -13,8 +17,10 @@ var ( ) type kopiaDataCollection struct { - path path.Path - streams []data.Stream + path path.Path + streams []data.Stream + snapshotRoot fs.Entry + counter ByteCounter } func (kdc *kopiaDataCollection) Items() <-chan data.Stream { @@ -35,6 +41,25 @@ func (kdc kopiaDataCollection) FullPath() path.Path { return kdc.path } +func (kdc kopiaDataCollection) Fetch( + ctx context.Context, + name string, +) (data.Stream, error) { + if kdc.snapshotRoot == nil { + return nil, clues.New("no snapshot root") + } + + p, err := kdc.FullPath().Append(name, true) + if err != nil { + return nil, clues.Wrap(err, "creating item path") + } + + // TODO(ashmrtn): We could possibly hold a reference to the folder this + // collection corresponds to, but that requires larger changes for the + // creation of these collections. + return getItemStream(ctx, p, kdc.snapshotRoot, kdc.counter) +} + type kopiaDataStream struct { reader io.ReadCloser uuid string diff --git a/src/internal/kopia/data_collection_test.go b/src/internal/kopia/data_collection_test.go index c28c54af8..7c2ed4894 100644 --- a/src/internal/kopia/data_collection_test.go +++ b/src/internal/kopia/data_collection_test.go @@ -2,14 +2,20 @@ package kopia import ( "bytes" + "context" + "errors" "io" "testing" + "github.com/kopia/kopia/fs" + "github.com/kopia/kopia/fs/virtualfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/alcionai/corso/src/internal/connector/mockconnector" "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/path" ) @@ -113,3 +119,172 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() { }) } } + +// These types are needed because we check that a fs.File was returned. +// Unfortunately fs.StreamingFile and fs.File have different interfaces so we +// have to fake things. +type mockSeeker struct{} + +func (s mockSeeker) Seek(offset int64, whence int) (int64, error) { + return 0, errors.New("not implemented") +} + +type mockReader struct { + io.ReadCloser + mockSeeker +} + +func (r mockReader) Entry() (fs.Entry, error) { + return nil, errors.New("not implemented") +} + +type mockFile struct { + // Use for Entry interface. + fs.StreamingFile + r io.ReadCloser +} + +func (f *mockFile) Open(ctx context.Context) (fs.Reader, error) { + return mockReader{ReadCloser: f.r}, nil +} + +func (suite *KopiaDataCollectionUnitSuite) TestFetch() { + var ( + tenant = "a-tenant" + user = "a-user" + service = path.ExchangeService.String() + category = path.EmailCategory + folder1 = "folder1" + folder2 = "folder2" + + noErrFileName = "noError" + errFileName = "error" + + noErrFileData = "foo bar baz" + + errReader = &mockconnector.MockExchangeData{ + ReadErr: assert.AnError, + } + ) + + // Needs to be a function so we can switch the serialization version as + // needed. + getLayout := func(serVersion uint32) fs.Entry { + return virtualfs.NewStaticDirectory(encodeAsPath(tenant), []fs.Entry{ + virtualfs.NewStaticDirectory(encodeAsPath(service), []fs.Entry{ + virtualfs.NewStaticDirectory(encodeAsPath(user), []fs.Entry{ + virtualfs.NewStaticDirectory(encodeAsPath(category.String()), []fs.Entry{ + virtualfs.NewStaticDirectory(encodeAsPath(folder1), []fs.Entry{ + virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{ + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(noErrFileName), + nil, + ), + r: newBackupStreamReader( + serVersion, + io.NopCloser(bytes.NewReader([]byte(noErrFileData))), + ), + }, + &mockFile{ + StreamingFile: virtualfs.StreamingFileFromReader( + encodeAsPath(errFileName), + nil, + ), + r: newBackupStreamReader( + serVersion, + errReader.ToReader(), + ), + }, + }), + }), + }), + }), + }), + }) + } + + b := path.Builder{}.Append(folder1, folder2) + pth, err := b.ToDataLayerExchangePathForCategory( + tenant, + user, + category, + false, + ) + require.NoError(suite.T(), err) + + table := []struct { + name string + inputName string + inputSerializationVersion uint32 + expectedData []byte + lookupErr assert.ErrorAssertionFunc + readErr assert.ErrorAssertionFunc + notFoundErr bool + }{ + { + name: "FileFound_NoError", + inputName: noErrFileName, + inputSerializationVersion: serializationVersion, + expectedData: []byte(noErrFileData), + lookupErr: assert.NoError, + readErr: assert.NoError, + }, + { + name: "FileFound_ReadError", + inputName: errFileName, + inputSerializationVersion: serializationVersion, + lookupErr: assert.NoError, + readErr: assert.Error, + }, + { + name: "FileFound_VersionError", + inputName: noErrFileName, + inputSerializationVersion: serializationVersion + 1, + lookupErr: assert.NoError, + readErr: assert.Error, + }, + { + name: "FileNotFound", + inputName: "foo", + inputSerializationVersion: serializationVersion + 1, + lookupErr: assert.Error, + notFoundErr: true, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + + root := getLayout(test.inputSerializationVersion) + c := &i64counter{} + + col := &kopiaDataCollection{path: pth, snapshotRoot: root, counter: c} + + s, err := col.Fetch(ctx, test.inputName) + + test.lookupErr(t, err) + + if err != nil { + if test.notFoundErr { + assert.ErrorIs(t, err, data.ErrNotFound) + } + + return + } + + fileData, err := io.ReadAll(s.ToReader()) + + test.readErr(t, err) + + if err != nil { + return + } + + assert.Equal(t, test.expectedData, fileData) + }) + } +} diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index d49241d36..c829b5ccf 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -412,13 +412,19 @@ func (w Wrapper) RestoreMultipleItems( c, ok := cols[parentPath.ShortRef()] if !ok { - cols[parentPath.ShortRef()] = &kopiaDataCollection{path: parentPath} + cols[parentPath.ShortRef()] = &kopiaDataCollection{ + path: parentPath, + snapshotRoot: snapshotRoot, + counter: bcounter, + } c = cols[parentPath.ShortRef()] } c.streams = append(c.streams, ds) } + // Can't use the maps package to extract the values because we need to convert + // from *kopiaDataCollection to data.RestoreCollection too. res := make([]data.RestoreCollection, 0, len(cols)) for _, c := range cols { res = append(res, c) diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index 83033ab3b..d20be4c31 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -410,7 +410,7 @@ func buildCollections( mc.Data[i] = c.items[i].data } - collections = append(collections, mc) + collections = append(collections, data.NotFoundRestoreCollection{Collection: mc}) } return collections diff --git a/src/internal/operations/manifests_test.go b/src/internal/operations/manifests_test.go index d30f72726..24c948320 100644 --- a/src/internal/operations/manifests_test.go +++ b/src/internal/operations/manifests_test.go @@ -435,7 +435,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta bool assertErr assert.ErrorAssertionFunc assertB assert.BoolAssertionFunc - expectDCS []data.RestoreCollection + expectDCS []mockColl expectNilMans bool }{ { @@ -539,7 +539,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { name: "man missing backup id", mr: mockManifestRestorer{ mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ - "id": {mockColl{id: "id_coll"}}, + "id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}}, }}, mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "")}, }, @@ -566,8 +566,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { name: "one complete, one incomplete", mr: mockManifestRestorer{ mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ - "id": {mockColl{id: "id_coll"}}, - "incmpl_id": {mockColl{id: "incmpl_id_coll"}}, + "id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}}, + "incmpl_id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "incmpl_id_coll"}}}, }}, mans: []*kopia.ManifestEntry{ makeMan(path.EmailCategory, "id", "", "bid"), @@ -579,13 +579,13 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta: true, assertErr: assert.NoError, assertB: assert.True, - expectDCS: []data.RestoreCollection{mockColl{id: "id_coll"}}, + expectDCS: []mockColl{{id: "id_coll"}}, }, { name: "single valid man", mr: mockManifestRestorer{ mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ - "id": {mockColl{id: "id_coll"}}, + "id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}}, }}, mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "bid")}, }, @@ -594,14 +594,14 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta: true, assertErr: assert.NoError, assertB: assert.True, - expectDCS: []data.RestoreCollection{mockColl{id: "id_coll"}}, + expectDCS: []mockColl{{id: "id_coll"}}, }, { name: "multiple valid mans", mr: mockManifestRestorer{ mockRestorer: mockRestorer{collsByID: map[string][]data.RestoreCollection{ - "mail": {mockColl{id: "mail_coll"}}, - "contact": {mockColl{id: "contact_coll"}}, + "mail": {data.NotFoundRestoreCollection{Collection: mockColl{id: "mail_coll"}}}, + "contact": {data.NotFoundRestoreCollection{Collection: mockColl{id: "contact_coll"}}}, }}, mans: []*kopia.ManifestEntry{ makeMan(path.EmailCategory, "mail", "", "bid"), @@ -613,9 +613,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { getMeta: true, assertErr: assert.NoError, assertB: assert.True, - expectDCS: []data.RestoreCollection{ - mockColl{id: "mail_coll"}, - mockColl{id: "contact_coll"}, + expectDCS: []mockColl{ + {id: "mail_coll"}, + {id: "contact_coll"}, }, }, { @@ -658,16 +658,33 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { expect, got := []string{}, []string{} for _, dc := range test.expectDCS { - mc, ok := dc.(mockColl) - assert.True(t, ok) - - expect = append(expect, mc.id) + expect = append(expect, dc.id) } for _, dc := range dcs { - mc, ok := dc.(mockColl) - assert.True(t, ok) + if !assert.IsTypef( + t, + data.NotFoundRestoreCollection{}, + dc, + "unexpected type returned [%T]", + dc, + ) { + continue + } + tmp := dc.(data.NotFoundRestoreCollection) + + if !assert.IsTypef( + t, + mockColl{}, + tmp.Collection, + "unexpected type returned [%T]", + tmp.Collection, + ) { + continue + } + + mc := tmp.Collection.(mockColl) got = append(got, mc.id) } diff --git a/src/internal/operations/restore_test.go b/src/internal/operations/restore_test.go index a3974ae64..42c78bbbe 100644 --- a/src/internal/operations/restore_test.go +++ b/src/internal/operations/restore_test.go @@ -62,7 +62,11 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() { bytesRead: &stats.ByteCounter{ NumBytes: 42, }, - cs: []data.RestoreCollection{&mockconnector.MockExchangeDataCollection{}}, + cs: []data.RestoreCollection{ + data.NotFoundRestoreCollection{ + Collection: &mockconnector.MockExchangeDataCollection{}, + }, + }, gc: &support.ConnectorOperationStatus{ ObjectCount: 1, Successful: 1, From 924d345ace42303a7ba5dbf6b4b53e2602ac5820 Mon Sep 17 00:00:00 2001 From: Keepers Date: Wed, 8 Feb 2023 16:25:26 -0700 Subject: [PATCH 32/45] add fault/clues to the rest of internal/kopia (#2375) ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/kopia/conn.go | 116 +++++++++--------- src/internal/kopia/conn_test.go | 2 +- src/internal/kopia/model_store.go | 161 ++++++++++++++----------- src/internal/kopia/s3.go | 10 +- src/internal/kopia/snapshot_manager.go | 37 ++---- src/internal/kopia/upload.go | 39 +++--- src/pkg/repository/repository.go | 4 +- 7 files changed, 190 insertions(+), 179 deletions(-) diff --git a/src/internal/kopia/conn.go b/src/internal/kopia/conn.go index 1756de6ea..5a775cb4d 100644 --- a/src/internal/kopia/conn.go +++ b/src/internal/kopia/conn.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/alcionai/clues" "github.com/kopia/kopia/fs" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/blob" @@ -17,7 +18,6 @@ import ( "github.com/kopia/kopia/snapshot/snapshotfs" "github.com/pkg/errors" - "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/pkg/storage" ) @@ -29,11 +29,9 @@ const ( defaultSchedulingInterval = time.Second * 0 ) -const defaultConfigErrTmpl = "setting default repo config values" - var ( - errInit = errors.New("initializing repo") - errConnect = errors.New("connecting repo") + ErrSettingDefaultConfig = errors.New("setting default repo config values") + ErrorRepoAlreadyExists = errors.New("repo already exists") ) // Having all fields set to 0 causes it to keep max-int versions of snapshots. @@ -53,19 +51,6 @@ type snapshotLoader interface { SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) } -type ErrorRepoAlreadyExists struct { - common.Err -} - -func RepoAlreadyExistsError(e error) error { - return ErrorRepoAlreadyExists{*common.EncapsulateError(e)} -} - -func IsRepoAlreadyExistsError(e error) bool { - var erae ErrorRepoAlreadyExists - return errors.As(e, &erae) -} - var ( _ snapshotManager = &conn{} _ snapshotLoader = &conn{} @@ -87,22 +72,22 @@ func NewConn(s storage.Storage) *conn { func (w *conn) Initialize(ctx context.Context) error { bst, err := blobStoreByProvider(ctx, w.storage) if err != nil { - return errors.Wrap(err, errInit.Error()) + return errors.Wrap(err, "initializing storage") } defer bst.Close(ctx) cfg, err := w.storage.CommonConfig() if err != nil { - return err + return clues.Stack(err).WithClues(ctx) } // todo - issue #75: nil here should be a storage.NewRepoOptions() if err = repo.Initialize(ctx, bst, nil, cfg.CorsoPassphrase); err != nil { if errors.Is(err, repo.ErrAlreadyInitialized) { - return RepoAlreadyExistsError(err) + return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) } - return errors.Wrap(err, errInit.Error()) + return clues.Wrap(err, "initialzing repo").WithClues(ctx) } return w.commonConnect( @@ -117,13 +102,13 @@ func (w *conn) Initialize(ctx context.Context) error { func (w *conn) Connect(ctx context.Context) error { bst, err := blobStoreByProvider(ctx, w.storage) if err != nil { - return errors.Wrap(err, errInit.Error()) + return errors.Wrap(err, "initializing storage") } defer bst.Close(ctx) cfg, err := w.storage.CommonConfig() if err != nil { - return err + return clues.Stack(err).WithClues(ctx) } return w.commonConnect( @@ -162,14 +147,18 @@ func (w *conn) commonConnect( password, opts, ); err != nil { - return errors.Wrap(err, errConnect.Error()) + return clues.Wrap(err, "connecting to repo").WithClues(ctx) } if err := w.open(ctx, cfgFile, password); err != nil { - return err + return clues.Stack(err).WithClues(ctx) } - return w.setDefaultConfigValues(ctx) + if err := w.setDefaultConfigValues(ctx); err != nil { + return clues.Stack(err).WithClues(ctx) + } + + return nil } func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage, error) { @@ -177,7 +166,7 @@ func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage, case storage.ProviderS3: return s3BlobStorage(ctx, s) default: - return nil, errors.New("storage provider details are required") + return nil, clues.New("storage provider details are required").WithClues(ctx) } } @@ -204,7 +193,11 @@ func (w *conn) close(ctx context.Context) error { err := w.Repository.Close(ctx) w.Repository = nil - return errors.Wrap(err, "closing repository connection") + if err != nil { + return clues.Wrap(err, "closing repository connection").WithClues(ctx) + } + + return nil } func (w *conn) open(ctx context.Context, configPath, password string) error { @@ -216,7 +209,7 @@ func (w *conn) open(ctx context.Context, configPath, password string) error { // TODO(ashmrtnz): issue #75: nil here should be storage.ConnectionOptions(). rep, err := repo.Open(ctx, configPath, password, nil) if err != nil { - return errors.Wrap(err, "opening repository connection") + return clues.Wrap(err, "opening repository connection").WithClues(ctx) } w.Repository = rep @@ -229,7 +222,7 @@ func (w *conn) wrap() error { defer w.mu.Unlock() if w.refCount == 0 { - return errors.New("conn already closed") + return clues.New("conn already closed") } w.refCount++ @@ -240,12 +233,12 @@ func (w *conn) wrap() error { func (w *conn) setDefaultConfigValues(ctx context.Context) error { p, err := w.getGlobalPolicyOrEmpty(ctx) if err != nil { - return errors.Wrap(err, defaultConfigErrTmpl) + return clues.Stack(ErrSettingDefaultConfig, err) } changed, err := updateCompressionOnPolicy(defaultCompressor, p) if err != nil { - return errors.Wrap(err, defaultConfigErrTmpl) + return clues.Stack(ErrSettingDefaultConfig, err) } if updateRetentionOnPolicy(defaultRetention, p) { @@ -260,10 +253,11 @@ func (w *conn) setDefaultConfigValues(ctx context.Context) error { return nil } - return errors.Wrap( - w.writeGlobalPolicy(ctx, "UpdateGlobalPolicyWithDefaults", p), - "updating global policy with defaults", - ) + if err := w.writeGlobalPolicy(ctx, "UpdateGlobalPolicyWithDefaults", p); err != nil { + return clues.Wrap(err, "updating global policy with defaults") + } + + return nil } // Compression attempts to set the global compression policy for the kopia repo @@ -273,7 +267,7 @@ func (w *conn) Compression(ctx context.Context, compressor string) error { // compressor was given. comp := compression.Name(compressor) if err := checkCompressor(comp); err != nil { - return err + return clues.Stack(err).WithClues(ctx) } p, err := w.getGlobalPolicyOrEmpty(ctx) @@ -283,17 +277,18 @@ func (w *conn) Compression(ctx context.Context, compressor string) error { changed, err := updateCompressionOnPolicy(compressor, p) if err != nil { - return err + return clues.Stack(err).WithClues(ctx) } if !changed { return nil } - return errors.Wrap( - w.writeGlobalPolicy(ctx, "UpdateGlobalCompressionPolicy", p), - "updating global compression policy", - ) + if err := w.writeGlobalPolicy(ctx, "UpdateGlobalCompressionPolicy", p); err != nil { + return clues.Wrap(err, "updating global compression policy") + } + + return nil } func updateCompressionOnPolicy(compressor string, p *policy.Policy) (bool, error) { @@ -349,7 +344,7 @@ func (w *conn) getPolicyOrEmpty(ctx context.Context, si snapshot.SourceInfo) (*p return &policy.Policy{}, nil } - return nil, errors.Wrapf(err, "getting backup policy for %+v", si) + return nil, clues.Wrap(err, "getting backup policy").With("source_info", si).WithClues(ctx) } return p, nil @@ -370,16 +365,22 @@ func (w *conn) writePolicy( si snapshot.SourceInfo, p *policy.Policy, ) error { - err := repo.WriteSession( - ctx, - w.Repository, - repo.WriteSessionOptions{Purpose: purpose}, - func(innerCtx context.Context, rw repo.RepositoryWriter) error { - return policy.SetPolicy(ctx, rw, si, p) - }, - ) + ctx = clues.Add(ctx, "source_info", si) - return errors.Wrapf(err, "updating policy for %+v", si) + writeOpts := repo.WriteSessionOptions{Purpose: purpose} + cb := func(innerCtx context.Context, rw repo.RepositoryWriter) error { + if err := policy.SetPolicy(ctx, rw, si, p); err != nil { + return clues.Stack(err).WithClues(innerCtx) + } + + return nil + } + + if err := repo.WriteSession(ctx, w.Repository, writeOpts, cb); err != nil { + return clues.Wrap(err, "updating policy").WithClues(ctx) + } + + return nil } func checkCompressor(compressor compression.Name) error { @@ -389,14 +390,19 @@ func checkCompressor(compressor compression.Name) error { } } - return errors.Errorf("unknown compressor type %s", compressor) + return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor))) } func (w *conn) LoadSnapshots( ctx context.Context, ids []manifest.ID, ) ([]*snapshot.Manifest, error) { - return snapshot.LoadSnapshots(ctx, w.Repository, ids) + mans, err := snapshot.LoadSnapshots(ctx, w.Repository, ids) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + return mans, nil } func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) { diff --git a/src/internal/kopia/conn_test.go b/src/internal/kopia/conn_test.go index 05ff5e110..2b2aaee5c 100644 --- a/src/internal/kopia/conn_test.go +++ b/src/internal/kopia/conn_test.go @@ -85,7 +85,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() { err := k.Initialize(ctx) assert.Error(t, err) - assert.True(t, IsRepoAlreadyExistsError(err)) + assert.ErrorIs(t, err, ErrorRepoAlreadyExists) } func (suite *WrapperIntegrationSuite) TestBadProviderErrors() { diff --git a/src/internal/kopia/model_store.go b/src/internal/kopia/model_store.go index 2c6661d22..58111dfa9 100644 --- a/src/internal/kopia/model_store.go +++ b/src/internal/kopia/model_store.go @@ -4,6 +4,7 @@ import ( "context" "strconv" + "github.com/alcionai/clues" "github.com/google/uuid" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/manifest" @@ -59,7 +60,7 @@ func (ms *ModelStore) Close(ctx context.Context) error { // bad model type is given. func tagsForModel(s model.Schema, tags map[string]string) (map[string]string, error) { if _, ok := tags[manifest.TypeLabelKey]; ok { - return nil, errors.WithStack(errBadTagKey) + return nil, clues.Stack(errBadTagKey) } res := make(map[string]string, len(tags)+1) @@ -80,11 +81,11 @@ func tagsForModelWithID( tags map[string]string, ) (map[string]string, error) { if !s.Valid() { - return nil, errors.WithStack(errUnrecognizedSchema) + return nil, clues.Stack(errUnrecognizedSchema) } if len(id) == 0 { - return nil, errors.WithStack(errNoStableID) + return nil, clues.Stack(errNoStableID) } res, err := tagsForModel(s, tags) @@ -93,13 +94,13 @@ func tagsForModelWithID( } if _, ok := res[stableIDKey]; ok { - return nil, errors.WithStack(errBadTagKey) + return nil, clues.Stack(errBadTagKey) } res[stableIDKey] = string(id) if _, ok := res[modelVersionKey]; ok { - return nil, errors.WithStack(errBadTagKey) + return nil, clues.Stack(errBadTagKey) } res[modelVersionKey] = strconv.Itoa(version) @@ -117,7 +118,7 @@ func putInner( create bool, ) error { if !s.Valid() { - return errors.WithStack(errUnrecognizedSchema) + return clues.Stack(errUnrecognizedSchema).WithClues(ctx) } base := m.Base() @@ -128,13 +129,13 @@ func putInner( tmpTags, err := tagsForModelWithID(s, base.ID, base.Version, base.Tags) if err != nil { // Will be wrapped at a higher layer. - return err + return clues.Stack(err).WithClues(ctx) } id, err := w.PutManifest(ctx, tmpTags, m) if err != nil { // Will be wrapped at a higher layer. - return err + return clues.Stack(err).WithClues(ctx) } base.ModelStoreID = id @@ -150,7 +151,7 @@ func (ms *ModelStore) Put( m model.Model, ) error { if !s.Valid() { - return errors.WithStack(errUnrecognizedSchema) + return clues.Stack(errUnrecognizedSchema) } m.Base().Version = ms.modelVersion @@ -162,14 +163,16 @@ func (ms *ModelStore) Put( func(innerCtx context.Context, w repo.RepositoryWriter) error { err := putInner(innerCtx, w, s, m, true) if err != nil { - return err + return clues.Stack(err).WithClues(innerCtx) } return nil - }, - ) + }) + if err != nil { + return clues.Wrap(err, "putting model").WithClues(ctx) + } - return errors.Wrap(err, "putting model") + return nil } func stripHiddenTags(tags map[string]string) { @@ -184,7 +187,7 @@ func (ms ModelStore) populateBaseModelFromMetadata( ) error { id, ok := m.Labels[stableIDKey] if !ok { - return errors.WithStack(errNoStableID) + return clues.Stack(errNoStableID) } v, err := strconv.Atoi(m.Labels[modelVersionKey]) @@ -193,7 +196,7 @@ func (ms ModelStore) populateBaseModelFromMetadata( } if v != ms.modelVersion { - return errors.Errorf("bad model version %s", m.Labels[modelVersionKey]) + return clues.Wrap(clues.New(m.Labels[modelVersionKey]), "bad model version") } base.ModelStoreID = m.ID @@ -211,7 +214,7 @@ func (ms ModelStore) baseModelFromMetadata( ) (*model.BaseModel, error) { res := &model.BaseModel{} if err := ms.populateBaseModelFromMetadata(res, m); err != nil { - return nil, err + return nil, clues.Stack(err).WithAll("metadata_id", m.ID, "metadata_modtime", m.ModTime) } return res, nil @@ -226,21 +229,21 @@ func (ms *ModelStore) GetIDsForType( tags map[string]string, ) ([]*model.BaseModel, error) { if !s.Valid() { - return nil, errors.WithStack(errUnrecognizedSchema) + return nil, clues.Stack(errUnrecognizedSchema).WithClues(ctx) } if _, ok := tags[stableIDKey]; ok { - return nil, errors.WithStack(errBadTagKey) + return nil, clues.Stack(errBadTagKey).WithClues(ctx) } tmpTags, err := tagsForModel(s, tags) if err != nil { - return nil, errors.Wrap(err, "getting model metadata") + return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx) } metadata, err := ms.c.FindManifests(ctx, tmpTags) if err != nil { - return nil, errors.Wrap(err, "getting model metadata") + return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx) } res := make([]*model.BaseModel, 0, len(metadata)) @@ -248,7 +251,7 @@ func (ms *ModelStore) GetIDsForType( for _, m := range metadata { bm, err := ms.baseModelFromMetadata(m) if err != nil { - return nil, errors.Wrap(err, "parsing model metadata") + return nil, clues.Wrap(err, "parsing model metadata").WithClues(ctx) } res = append(res, bm) @@ -266,30 +269,30 @@ func (ms *ModelStore) getModelStoreID( id model.StableID, ) (manifest.ID, error) { if !s.Valid() { - return "", errors.WithStack(errUnrecognizedSchema) + return "", clues.Stack(errUnrecognizedSchema).WithClues(ctx) } if len(id) == 0 { - return "", errors.WithStack(errNoStableID) + return "", clues.Stack(errNoStableID).WithClues(ctx) } tags := map[string]string{stableIDKey: string(id)} metadata, err := ms.c.FindManifests(ctx, tags) if err != nil { - return "", errors.Wrap(err, "getting ModelStoreID") + return "", clues.Wrap(err, "getting ModelStoreID").WithClues(ctx) } if len(metadata) == 0 { - return "", errors.Wrap(data.ErrNotFound, "getting ModelStoreID") + return "", clues.Wrap(data.ErrNotFound, "getting ModelStoreID").WithClues(ctx) } if len(metadata) != 1 { - return "", errors.New("multiple models with same StableID") + return "", clues.New("multiple models with same StableID").WithClues(ctx) } if metadata[0].Labels[manifest.TypeLabelKey] != s.String() { - return "", errors.WithStack(errModelTypeMismatch) + return "", clues.Stack(errModelTypeMismatch).WithClues(ctx) } return metadata[0].ID, nil @@ -305,7 +308,7 @@ func (ms *ModelStore) Get( m model.Model, ) error { if !s.Valid() { - return errors.WithStack(errUnrecognizedSchema) + return clues.Stack(errUnrecognizedSchema).WithClues(ctx) } modelID, err := ms.getModelStoreID(ctx, s, id) @@ -313,7 +316,7 @@ func (ms *ModelStore) Get( return err } - return transmuteErr(ms.GetWithModelStoreID(ctx, s, modelID, m)) + return ms.GetWithModelStoreID(ctx, s, modelID, m) } // GetWithModelStoreID deserializes the model with the given ModelStoreID into @@ -326,26 +329,34 @@ func (ms *ModelStore) GetWithModelStoreID( m model.Model, ) error { if !s.Valid() { - return errors.WithStack(errUnrecognizedSchema) + return clues.Stack(errUnrecognizedSchema).WithClues(ctx) } if len(id) == 0 { - return errors.WithStack(errNoModelStoreID) + return clues.Stack(errNoModelStoreID).WithClues(ctx) } metadata, err := ms.c.GetManifest(ctx, id, m) if err != nil { - return errors.Wrap(transmuteErr(err), "getting model data") + if errors.Is(err, manifest.ErrNotFound) { + err = data.ErrNotFound + } + + return clues.Wrap(err, "getting model data").WithClues(ctx) } - if metadata.Labels[manifest.TypeLabelKey] != s.String() { - return errors.WithStack(errModelTypeMismatch) + mdlbl := metadata.Labels[manifest.TypeLabelKey] + if mdlbl != s.String() { + return clues.Stack(errModelTypeMismatch). + WithClues(ctx). + WithAll("expected_label", s, "got_label", mdlbl) } - return errors.Wrap( - ms.populateBaseModelFromMetadata(m.Base(), metadata), - "getting model by ID", - ) + if err := ms.populateBaseModelFromMetadata(m.Base(), metadata); err != nil { + return clues.Wrap(err, "getting model by ID").WithClues(ctx) + } + + return nil } // checkPrevModelVersion compares the ModelType and ModelStoreID in this model @@ -359,26 +370,31 @@ func (ms *ModelStore) checkPrevModelVersion( b *model.BaseModel, ) error { if !s.Valid() { - return errors.WithStack(errUnrecognizedSchema) + return clues.Stack(errUnrecognizedSchema).WithClues(ctx) } id, err := ms.getModelStoreID(ctx, s, b.ID) if err != nil { - return err + return clues.Stack(err).WithClues(ctx) } // We actually got something back during our lookup. meta, err := ms.c.GetManifest(ctx, id, nil) if err != nil { - return errors.Wrap(err, "getting previous model version") + return clues.Wrap(err, "getting previous model version").WithClues(ctx) } if meta.ID != b.ModelStoreID { - return errors.New("updated model has different ModelStoreID") + return clues.New("updated model has different ModelStoreID"). + WithClues(ctx). + WithAll("expected_id", meta.ID, "model_store_id", b.ModelStoreID) } - if meta.Labels[manifest.TypeLabelKey] != s.String() { - return errors.New("updated model has different model type") + mdlbl := meta.Labels[manifest.TypeLabelKey] + if mdlbl != s.String() { + return clues.New("updated model has different model type"). + WithClues(ctx). + WithAll("expected_label", s, "got_label", mdlbl) } return nil @@ -396,12 +412,12 @@ func (ms *ModelStore) Update( m model.Model, ) error { if !s.Valid() { - return errors.WithStack(errUnrecognizedSchema) + return clues.Stack(errUnrecognizedSchema).WithClues(ctx) } base := m.Base() if len(base.ModelStoreID) == 0 { - return errors.WithStack(errNoModelStoreID) + return clues.Stack(errNoModelStoreID).WithClues(ctx) } base.Version = ms.modelVersion @@ -415,8 +431,11 @@ func (ms *ModelStore) Update( ctx, ms.c, repo.WriteSessionOptions{Purpose: "ModelStoreUpdate"}, - func(innerCtx context.Context, w repo.RepositoryWriter) (innerErr error) { - oldID := base.ModelStoreID + func(innerCtx context.Context, w repo.RepositoryWriter) error { + var ( + innerErr error + oldID = base.ModelStoreID + ) defer func() { if innerErr != nil { @@ -429,19 +448,26 @@ func (ms *ModelStore) Update( return innerErr } + // if equal, everything worked out fine. + // if not, we handle the cleanup below. + if oldID == base.ModelStoreID { + return nil + } + // If we fail at this point no changes will be made to the manifest store // in kopia, making it appear like nothing ever happened. At worst some // orphaned content blobs may be uploaded, but they should be garbage // collected the next time kopia maintenance is run. - if oldID != base.ModelStoreID { - innerErr = w.DeleteManifest(innerCtx, oldID) + innerErr = w.DeleteManifest(innerCtx, oldID) + if innerErr != nil { + return clues.Stack(innerErr).WithClues(ctx) } - return innerErr + return nil }, ) if err != nil { - return errors.Wrap(err, "updating model") + return clues.Wrap(err, "updating model").WithClues(ctx) } return nil @@ -452,7 +478,7 @@ func (ms *ModelStore) Update( // have the same StableID. func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.StableID) error { if !s.Valid() { - return errors.WithStack(errUnrecognizedSchema) + return clues.Stack(errUnrecognizedSchema).WithClues(ctx) } latest, err := ms.getModelStoreID(ctx, s, id) @@ -472,26 +498,17 @@ func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.Stabl // exist. func (ms *ModelStore) DeleteWithModelStoreID(ctx context.Context, id manifest.ID) error { if len(id) == 0 { - return errors.WithStack(errNoModelStoreID) + return clues.Stack(errNoModelStoreID).WithClues(ctx) } - err := repo.WriteSession( - ctx, - ms.c, - repo.WriteSessionOptions{Purpose: "ModelStoreDelete"}, - func(innerCtx context.Context, w repo.RepositoryWriter) error { - return w.DeleteManifest(innerCtx, id) - }, - ) - - return errors.Wrap(err, "deleting model") -} - -func transmuteErr(err error) error { - switch { - case errors.Is(err, manifest.ErrNotFound): - return data.ErrNotFound - default: - return err + opts := repo.WriteSessionOptions{Purpose: "ModelStoreDelete"} + cb := func(innerCtx context.Context, w repo.RepositoryWriter) error { + return w.DeleteManifest(innerCtx, id) } + + if err := repo.WriteSession(ctx, ms.c, opts, cb); err != nil { + return clues.Wrap(err, "deleting model").WithClues(ctx) + } + + return nil } diff --git a/src/internal/kopia/s3.go b/src/internal/kopia/s3.go index 3d0f3144e..5810487dc 100644 --- a/src/internal/kopia/s3.go +++ b/src/internal/kopia/s3.go @@ -3,6 +3,7 @@ package kopia import ( "context" + "github.com/alcionai/clues" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/blob/s3" @@ -16,7 +17,7 @@ const ( func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error) { cfg, err := s.S3Config() if err != nil { - return nil, err + return nil, clues.Stack(err).WithClues(ctx) } endpoint := defaultS3Endpoint @@ -32,5 +33,10 @@ func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error) DoNotVerifyTLS: cfg.DoNotVerifyTLS, } - return s3.New(ctx, &opts, false) + store, err := s3.New(ctx, &opts, false) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + return store, nil } diff --git a/src/internal/kopia/snapshot_manager.go b/src/internal/kopia/snapshot_manager.go index f15c8ae99..45b1ffabe 100644 --- a/src/internal/kopia/snapshot_manager.go +++ b/src/internal/kopia/snapshot_manager.go @@ -4,6 +4,7 @@ import ( "context" "sort" + "github.com/alcionai/clues" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/snapshot" "github.com/pkg/errors" @@ -218,9 +219,7 @@ func fetchPrevManifests( found = append(found, man.Manifest) logger.Ctx(ctx).Infow( "reusing cached complete snapshot", - "snapshot_id", - man.ID, - ) + "snapshot_id", man.ID) } return found, nil @@ -251,29 +250,19 @@ func fetchPrevSnapshotManifests( for _, reason := range reasons { logger.Ctx(ctx).Infow( "searching for previous manifests for reason", - "service", - reason.Service.String(), - "category", - reason.Category.String(), - ) + "service", reason.Service.String(), + "category", reason.Category.String()) - found, err := fetchPrevManifests( - ctx, - sm, - mans, - reason, - tags, - ) + found, err := fetchPrevManifests(ctx, sm, mans, reason, tags) if err != nil { - logger.Ctx(ctx).Warnw( - "fetching previous snapshot manifests for service/category/resource owner", - "error", - err, - "service", - reason.Service.String(), - "category", - reason.Category.String(), - ) + logger.Ctx(ctx). + With( + "err", err, + "service", reason.Service.String(), + "category", reason.Category.String()). + Warnw( + "fetching previous snapshot manifests for service/category/resource owner", + clues.InErr(err).Slice()...) // Snapshot can still complete fine, just not as efficient. continue diff --git a/src/internal/kopia/upload.go b/src/internal/kopia/upload.go index a4ae1fbcc..e25e4ed0f 100644 --- a/src/internal/kopia/upload.go +++ b/src/internal/kopia/upload.go @@ -16,7 +16,6 @@ import ( "unsafe" "github.com/alcionai/clues" - "github.com/hashicorp/go-multierror" "github.com/kopia/kopia/fs" "github.com/kopia/kopia/fs/virtualfs" "github.com/kopia/kopia/repo/manifest" @@ -258,29 +257,26 @@ func collectionEntries( cb func(context.Context, fs.Entry) error, streamedEnts data.BackupCollection, progress *corsoProgress, -) (map[string]struct{}, *multierror.Error) { +) (map[string]struct{}, error) { if streamedEnts == nil { return nil, nil } var ( - errs *multierror.Error // Track which items have already been seen so we can skip them if we see // them again in the data from the base snapshot. seen = map[string]struct{}{} items = streamedEnts.Items() - log = logger.Ctx(ctx) ) for { select { case <-ctx.Done(): - errs = multierror.Append(errs, ctx.Err()) - return seen, errs + return seen, clues.Stack(ctx.Err()).WithClues(ctx) case e, ok := <-items: if !ok { - return seen, errs + return seen, nil } encodedName := encodeAsPath(e.UUID()) @@ -304,9 +300,9 @@ func collectionEntries( itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true) if err != nil { err = errors.Wrap(err, "getting full item path") - errs = multierror.Append(errs, err) + progress.errs.Add(err) - log.Error(err) + logger.Ctx(ctx).With("err", err).Errorw("getting full item path", clues.InErr(err).Slice()...) continue } @@ -344,13 +340,12 @@ func collectionEntries( entry := virtualfs.StreamingFileWithModTimeFromReader( encodedName, modTime, - newBackupStreamReader(serializationVersion, e.ToReader()), - ) + newBackupStreamReader(serializationVersion, e.ToReader())) + if err := cb(ctx, entry); err != nil { // Kopia's uploader swallows errors in most cases, so if we see // something here it's probably a big issue and we should return. - errs = multierror.Append(errs, errors.Wrapf(err, "executing callback on %q", itemPath)) - return seen, errs + return seen, clues.Wrap(err, "executing callback").WithClues(ctx).With("item_path", itemPath) } } } @@ -456,11 +451,14 @@ func getStreamItemFunc( // Return static entries in this directory first. for _, d := range staticEnts { if err := cb(ctx, d); err != nil { - return errors.Wrap(err, "executing callback on static directory") + return clues.Wrap(err, "executing callback on static directory").WithClues(ctx) } } - seen, errs := collectionEntries(ctx, cb, streamedEnts, progress) + seen, err := collectionEntries(ctx, cb, streamedEnts, progress) + if err != nil { + return errors.Wrap(err, "streaming collection entries") + } if err := streamBaseEntries( ctx, @@ -472,13 +470,10 @@ func getStreamItemFunc( globalExcludeSet, progress, ); err != nil { - errs = multierror.Append( - errs, - errors.Wrap(err, "streaming base snapshot entries"), - ) + return errors.Wrap(err, "streaming base snapshot entries") } - return errs.ErrorOrNil() + return nil } } @@ -935,9 +930,7 @@ func inflateDirTree( logger.Ctx(ctx).Infow( "merging hierarchies from base snapshots", - "snapshot_ids", - baseIDs, - ) + "snapshot_ids", baseIDs) for _, snap := range baseSnaps { if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil { diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index 518685134..da715f61e 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -98,8 +98,8 @@ func Initialize( kopiaRef := kopia.NewConn(s) if err := kopiaRef.Initialize(ctx); err != nil { // replace common internal errors so that sdk users can check results with errors.Is() - if kopia.IsRepoAlreadyExistsError(err) { - return nil, clues.Stack(ErrorRepoAlreadyExists).WithClues(ctx) + if errors.Is(err, kopia.ErrorRepoAlreadyExists) { + return nil, clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx) } return nil, errors.Wrap(err, "initializing kopia") From a440aa9a34a455ce944f6889ff40150ac20bd881 Mon Sep 17 00:00:00 2001 From: Vaibhav Kamra Date: Wed, 8 Feb 2023 17:55:30 -0800 Subject: [PATCH 33/45] Re-enable repository tests (#2451) ## Description The load tests (`repository_load_test.go`) overrides the default [TestMain](https://medium.com/goingogo/why-use-testmain-for-testing-in-go-dafb52b406bc). This results in the other tests in the repository_test package not running. The fix is to move the load tests into a sub-package ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test - [x] :green_heart: E2E --- src/Makefile | 2 +- src/pkg/repository/{ => loadtest}/repository_load_test.go | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/pkg/repository/{ => loadtest}/repository_load_test.go (100%) diff --git a/src/Makefile b/src/Makefile index 0d31d26d1..73474de0c 100644 --- a/src/Makefile +++ b/src/Makefile @@ -74,4 +74,4 @@ load-test: -mutexprofile=mutex.prof \ -trace=trace.out \ -outputdir=test_results \ - ./pkg/repository/repository_load_test.go \ No newline at end of file + ./pkg/repository/loadtest/repository_load_test.go \ No newline at end of file diff --git a/src/pkg/repository/repository_load_test.go b/src/pkg/repository/loadtest/repository_load_test.go similarity index 100% rename from src/pkg/repository/repository_load_test.go rename to src/pkg/repository/loadtest/repository_load_test.go From 667d2d8e29b292f057acc7ded866e08748d71333 Mon Sep 17 00:00:00 2001 From: Keepers Date: Wed, 8 Feb 2023 19:50:07 -0700 Subject: [PATCH 34/45] add fault/clues to graph_conector.go (#2376) ## Description Refactors error handling in graph_connector. Also begins some error refactoring in support by moving StackTraceErrror style funcs into a more normalized handler in graph/errors.go. And removes the (Non)Recoverable error wraps which we weren't using anyway. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/connector/data_collections.go | 45 ++++++++ .../connector/exchange/api/contacts.go | 4 +- src/internal/connector/exchange/api/events.go | 4 +- src/internal/connector/exchange/api/mail.go | 4 +- src/internal/connector/graph/errors.go | 48 ++++++++ src/internal/connector/graph_connector.go | 109 ++++-------------- .../graph_connector_disconnected_test.go | 52 --------- src/internal/connector/support/errors.go | 23 +--- src/internal/connector/support/errors_test.go | 20 ---- .../operations/backup_integration_test.go | 3 +- src/pkg/fault/fault.go | 6 + src/pkg/fault/mock/mock.go | 7 +- 12 files changed, 140 insertions(+), 185 deletions(-) diff --git a/src/internal/connector/data_collections.go b/src/internal/connector/data_collections.go index 51beb4eb2..4cd167ea6 100644 --- a/src/internal/connector/data_collections.go +++ b/src/internal/connector/data_collections.go @@ -16,6 +16,8 @@ import ( "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" D "github.com/alcionai/corso/src/internal/diagnostics" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -226,3 +228,46 @@ func (gc *GraphConnector) OneDriveDataCollections( return collections, allExcludes, errs } + +// RestoreDataCollections restores data from the specified collections +// into M365 using the GraphAPI. +// SideEffect: gc.status is updated at the completion of operation +func (gc *GraphConnector) RestoreDataCollections( + ctx context.Context, + backupVersion int, + acct account.Account, + selector selectors.Selector, + dest control.RestoreDestination, + opts control.Options, + dcs []data.RestoreCollection, +) (*details.Details, error) { + ctx, end := D.Span(ctx, "connector:restore") + defer end() + + var ( + status *support.ConnectorOperationStatus + err error + deets = &details.Builder{} + ) + + creds, err := acct.M365Config() + if err != nil { + return nil, errors.Wrap(err, "malformed azure credentials") + } + + switch selector.Service { + case selectors.ServiceExchange: + status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets) + case selectors.ServiceOneDrive: + status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets) + case selectors.ServiceSharePoint: + status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets) + default: + err = errors.Errorf("restore data from service %s not supported", selector.Service.String()) + } + + gc.incrementAwaitingMessages() + gc.UpdateStatus(status) + + return deets.Details(), err +} diff --git a/src/internal/connector/exchange/api/contacts.go b/src/internal/connector/exchange/api/contacts.go index ac4afeb34..8457c94d1 100644 --- a/src/internal/connector/exchange/api/contacts.go +++ b/src/internal/connector/exchange/api/contacts.go @@ -291,6 +291,8 @@ func (c Contacts) Serialize( return nil, fmt.Errorf("expected Contactable, got %T", item) } + ctx = clues.Add(ctx, "item_id", *contact.GetId()) + var ( err error writer = kioser.NewJsonSerializationWriter() @@ -299,7 +301,7 @@ func (c Contacts) Serialize( defer writer.Close() if err = writer.WriteObjectValue("", contact); err != nil { - return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID)) + return nil, clues.Stack(err).WithClues(ctx) } bs, err := writer.GetSerializedContent() diff --git a/src/internal/connector/exchange/api/events.go b/src/internal/connector/exchange/api/events.go index b9c16f319..adf218685 100644 --- a/src/internal/connector/exchange/api/events.go +++ b/src/internal/connector/exchange/api/events.go @@ -340,6 +340,8 @@ func (c Events) Serialize( return nil, fmt.Errorf("expected Eventable, got %T", item) } + ctx = clues.Add(ctx, "item_id", *event.GetId()) + var ( err error writer = kioser.NewJsonSerializationWriter() @@ -348,7 +350,7 @@ func (c Events) Serialize( defer writer.Close() if err = writer.WriteObjectValue("", event); err != nil { - return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID)) + return nil, clues.Stack(err).WithClues(ctx) } bs, err := writer.GetSerializedContent() diff --git a/src/internal/connector/exchange/api/mail.go b/src/internal/connector/exchange/api/mail.go index 01a485fbb..5ac96b93a 100644 --- a/src/internal/connector/exchange/api/mail.go +++ b/src/internal/connector/exchange/api/mail.go @@ -321,6 +321,8 @@ func (c Mail) Serialize( return nil, fmt.Errorf("expected Messageable, got %T", item) } + ctx = clues.Add(ctx, "item_id", *msg.GetId()) + var ( err error writer = kioser.NewJsonSerializationWriter() @@ -329,7 +331,7 @@ func (c Mail) Serialize( defer writer.Close() if err = writer.WriteObjectValue("", msg); err != nil { - return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID)) + return nil, clues.Stack(err).WithClues(ctx) } bs, err := writer.GetSerializedContent() diff --git a/src/internal/connector/graph/errors.go b/src/internal/connector/graph/errors.go index 21116057d..4ac9b3ed5 100644 --- a/src/internal/connector/graph/errors.go +++ b/src/internal/connector/graph/errors.go @@ -2,6 +2,7 @@ package graph import ( "context" + "fmt" "net/url" "os" @@ -176,3 +177,50 @@ func hasErrorCode(err error, codes ...string) bool { return slices.Contains(codes, *oDataError.GetError().GetCode()) } + +// ErrData is a helper function that extracts ODataError metadata from +// the error. If the error is not an ODataError type, returns an empty +// slice. The returned value is guaranteed to be an even-length pairing +// of key, value tuples. +func ErrData(e error) []any { + result := make([]any, 0) + + if e == nil { + return result + } + + odErr, ok := e.(odataerrors.ODataErrorable) + if !ok { + return result + } + + // Get MainError + mainErr := odErr.GetError() + + result = appendIf(result, "odataerror_code", mainErr.GetCode()) + result = appendIf(result, "odataerror_message", mainErr.GetMessage()) + result = appendIf(result, "odataerror_target", mainErr.GetTarget()) + + for i, d := range mainErr.GetDetails() { + pfx := fmt.Sprintf("odataerror_details_%d_", i) + result = appendIf(result, pfx+"code", d.GetCode()) + result = appendIf(result, pfx+"message", d.GetMessage()) + result = appendIf(result, pfx+"target", d.GetTarget()) + } + + inner := mainErr.GetInnererror() + if inner != nil { + result = appendIf(result, "odataerror_inner_cli_req_id", inner.GetClientRequestId()) + result = appendIf(result, "odataerror_inner_req_id", inner.GetRequestId()) + } + + return result +} + +func appendIf(a []any, k string, v *string) []any { + if v == nil { + return a + } + + return append(a, k, *v) +} diff --git a/src/internal/connector/graph_connector.go b/src/internal/connector/graph_connector.go index 7b7b7a072..888749690 100644 --- a/src/internal/connector/graph_connector.go +++ b/src/internal/connector/graph_connector.go @@ -4,11 +4,14 @@ package connector import ( "context" + "fmt" "net/http" "runtime/trace" "strings" "sync" + "github.com/alcionai/clues" + "github.com/hashicorp/go-multierror" "github.com/microsoft/kiota-abstractions-go/serialization" msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" "github.com/microsoftgraph/msgraph-sdk-go/models" @@ -17,18 +20,12 @@ import ( "github.com/alcionai/corso/src/internal/connector/discovery" "github.com/alcionai/corso/src/internal/connector/discovery/api" - "github.com/alcionai/corso/src/internal/connector/exchange" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/connector/sharepoint" "github.com/alcionai/corso/src/internal/connector/support" - "github.com/alcionai/corso/src/internal/data" D "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/pkg/account" - "github.com/alcionai/corso/src/pkg/backup/details" - "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/filters" - "github.com/alcionai/corso/src/pkg/selectors" ) // --------------------------------------------------------------------------- @@ -74,7 +71,7 @@ func NewGraphConnector( ) (*GraphConnector, error) { m365, err := acct.M365Config() if err != nil { - return nil, errors.Wrap(err, "retrieving m365 account configuration") + return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx) } gc := GraphConnector{ @@ -87,12 +84,12 @@ func NewGraphConnector( gc.Service, err = gc.createService() if err != nil { - return nil, errors.Wrap(err, "creating service connection") + return nil, clues.Wrap(err, "creating service connection").WithClues(ctx) } gc.Owners, err = api.NewClient(m365) if err != nil { - return nil, errors.Wrap(err, "creating api client") + return nil, clues.Wrap(err, "creating api client").WithClues(ctx) } // TODO(ashmrtn): When selectors only encapsulate a single resource owner that @@ -174,8 +171,7 @@ func (gc *GraphConnector) setTenantSites(ctx context.Context) error { gc.tenant, sharepoint.GetAllSitesForTenant, models.CreateSiteCollectionResponseFromDiscriminatorValue, - identifySite, - ) + identifySite) if err != nil { return err } @@ -194,22 +190,23 @@ const personalSitePath = "sharepoint.com/personal/" func identifySite(item any) (string, string, error) { m, ok := item.(models.Siteable) if !ok { - return "", "", errors.New("iteration retrieved non-Site item") + return "", "", clues.New("iteration retrieved non-Site item").With("item_type", fmt.Sprintf("%T", item)) } if m.GetName() == nil { // the built-in site at "https://{tenant-domain}/search" never has a name. if m.GetWebUrl() != nil && strings.HasSuffix(*m.GetWebUrl(), "/search") { - return "", "", errKnownSkippableCase + // TODO: pii siteID, on this and all following cases + return "", "", clues.Stack(errKnownSkippableCase).With("site_id", *m.GetId()) } - return "", "", errors.Errorf("no name for Site: %s", *m.GetId()) + return "", "", clues.New("site has no name").With("site_id", *m.GetId()) } // personal (ie: oneDrive) sites have to be filtered out server-side. url := m.GetWebUrl() if url != nil && strings.Contains(*url, personalSitePath) { - return "", "", errKnownSkippableCase + return "", "", clues.Stack(errKnownSkippableCase).With("site_id", *m.GetId()) } return *m.GetWebUrl(), *m.GetId(), nil @@ -261,49 +258,6 @@ func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls return idsl, nil } -// RestoreDataCollections restores data from the specified collections -// into M365 using the GraphAPI. -// SideEffect: gc.status is updated at the completion of operation -func (gc *GraphConnector) RestoreDataCollections( - ctx context.Context, - backupVersion int, - acct account.Account, - selector selectors.Selector, - dest control.RestoreDestination, - opts control.Options, - dcs []data.RestoreCollection, -) (*details.Details, error) { - ctx, end := D.Span(ctx, "connector:restore") - defer end() - - var ( - status *support.ConnectorOperationStatus - err error - deets = &details.Builder{} - ) - - creds, err := acct.M365Config() - if err != nil { - return nil, errors.Wrap(err, "malformed azure credentials") - } - - switch selector.Service { - case selectors.ServiceExchange: - status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets) - case selectors.ServiceOneDrive: - status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets) - case selectors.ServiceSharePoint: - status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets) - default: - err = errors.Errorf("restore data from service %s not supported", selector.Service.String()) - } - - gc.incrementAwaitingMessages() - gc.UpdateStatus(status) - - return deets.Details(), err -} - // AwaitStatus waits for all gc tasks to complete and then returns status func (gc *GraphConnector) AwaitStatus() *support.ConnectorOperationStatus { defer func() { @@ -359,30 +313,27 @@ func getResources( response, err := query(ctx, gs) if err != nil { - return nil, errors.Wrapf( - err, - "retrieving resources for tenant %s: %s", - tenantID, - support.ConnectorStackErrorTrace(err), - ) + return nil, clues.Wrap(err, "retrieving tenant's resources"). + WithClues(ctx). + WithAll(graph.ErrData(err)...) } + errs := &multierror.Error{} + iter, err := msgraphgocore.NewPageIterator(response, gs.Adapter(), parser) if err != nil { - return nil, errors.Wrap(err, support.ConnectorStackErrorTrace(err)) + return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...) } - var iterErrs error - callbackFunc := func(item any) bool { k, v, err := identify(item) if err != nil { - if errors.Is(err, errKnownSkippableCase) { - return true + if !errors.Is(err, errKnownSkippableCase) { + errs = multierror.Append(errs, clues.Stack(err). + WithClues(ctx). + With("query_url", gs.Adapter().GetBaseUrl())) } - iterErrs = support.WrapAndAppend(gs.Adapter().GetBaseUrl(), err, iterErrs) - return true } @@ -392,20 +343,8 @@ func getResources( } if err := iter.Iterate(ctx, callbackFunc); err != nil { - return nil, errors.Wrap(err, support.ConnectorStackErrorTrace(err)) + return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...) } - return resources, iterErrs -} - -// IsRecoverableError returns true iff error is a RecoverableGCEerror -func IsRecoverableError(e error) bool { - var recoverable support.RecoverableGCError - return errors.As(e, &recoverable) -} - -// IsNonRecoverableError returns true iff error is a NonRecoverableGCEerror -func IsNonRecoverableError(e error) bool { - var nonRecoverable support.NonRecoverableGCError - return errors.As(e, &nonRecoverable) + return resources, errs.ErrorOrNil() } diff --git a/src/internal/connector/graph_connector_disconnected_test.go b/src/internal/connector/graph_connector_disconnected_test.go index 2f17ae026..506b55345 100644 --- a/src/internal/connector/graph_connector_disconnected_test.go +++ b/src/internal/connector/graph_connector_disconnected_test.go @@ -116,58 +116,6 @@ func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_Status() { suite.Equal(2, gc.Status().FolderCount) } -func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_ErrorChecking() { - tests := []struct { - name string - err error - returnRecoverable assert.BoolAssertionFunc - returnNonRecoverable assert.BoolAssertionFunc - }{ - { - name: "Neither Option", - err: errors.New("regular error"), - returnRecoverable: assert.False, - returnNonRecoverable: assert.False, - }, - { - name: "Validate Recoverable", - err: support.SetRecoverableError(errors.New("Recoverable")), - returnRecoverable: assert.True, - returnNonRecoverable: assert.False, - }, - { - name: "Validate NonRecoverable", - err: support.SetNonRecoverableError(errors.New("Non-recoverable")), - returnRecoverable: assert.False, - returnNonRecoverable: assert.True, - }, - { - name: "Wrapped Recoverable", - err: support.WrapAndAppend( - "Wrapped Recoverable", - support.SetRecoverableError(errors.New("Recoverable")), - nil), - returnRecoverable: assert.True, - returnNonRecoverable: assert.False, - }, - { - name: "On Nil", - err: nil, - returnRecoverable: assert.False, - returnNonRecoverable: assert.False, - }, - } - for _, test := range tests { - suite.T().Run(test.name, func(t *testing.T) { - recoverable := IsRecoverableError(test.err) - nonRecoverable := IsNonRecoverableError(test.err) - test.returnRecoverable(suite.T(), recoverable, "Test: %s Recoverable-received %v", test.name, recoverable) - test.returnNonRecoverable(suite.T(), nonRecoverable, "Test: %s non-recoverable: %v", test.name, nonRecoverable) - t.Logf("Is nil: %v", test.err == nil) - }) - } -} - func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs() { users := []string{ "elliotReid@someHospital.org", diff --git a/src/internal/connector/support/errors.go b/src/internal/connector/support/errors.go index 8f73ea8fa..26c2e9aca 100644 --- a/src/internal/connector/support/errors.go +++ b/src/internal/connector/support/errors.go @@ -8,29 +8,8 @@ import ( multierror "github.com/hashicorp/go-multierror" msgraph_errors "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" "github.com/pkg/errors" - - "github.com/alcionai/corso/src/internal/common" ) -// GraphConnector has two types of errors that are exported -// RecoverableGCError is a query error that can be overcome with time -type RecoverableGCError struct { - common.Err -} - -func SetRecoverableError(e error) error { - return RecoverableGCError{*common.EncapsulateError(e)} -} - -// NonRecoverableGCError is a permanent query error -type NonRecoverableGCError struct { - common.Err -} - -func SetNonRecoverableError(e error) error { - return NonRecoverableGCError{*common.EncapsulateError(e)} -} - // WrapErrorAndAppend helper function used to attach identifying information to an error // and return it as a mulitierror func WrapAndAppend(identifier string, e, previous error) error { @@ -101,7 +80,7 @@ func ConnectorStackErrorTraceWrap(e error, prefix string) error { return errors.Wrap(e, prefix) } -// ConnectorStackErrorTracew is a helper function that extracts +// ConnectorStackErrorTrace is a helper function that extracts // the stack trace for oDataErrors, if the error has one. func ConnectorStackErrorTrace(e error) string { eMessage := "" diff --git a/src/internal/connector/support/errors_test.go b/src/internal/connector/support/errors_test.go index a81d3c10d..b8d39df7a 100644 --- a/src/internal/connector/support/errors_test.go +++ b/src/internal/connector/support/errors_test.go @@ -41,26 +41,6 @@ func (suite *GraphConnectorErrorSuite) TestWrapAndAppend_OnVar() { suite.True(strings.Contains(received.Error(), id)) } -func (suite *GraphConnectorErrorSuite) TestAsRecoverableError() { - err := assert.AnError - - rcv := RecoverableGCError{} - suite.False(errors.As(err, &rcv)) - - aRecoverable := SetRecoverableError(err) - suite.True(errors.As(aRecoverable, &rcv)) -} - -func (suite *GraphConnectorErrorSuite) TestAsNonRecoverableError() { - err := assert.AnError - - noRecover := NonRecoverableGCError{} - suite.False(errors.As(err, &noRecover)) - - nonRecoverable := SetNonRecoverableError(err) - suite.True(errors.As(nonRecoverable, &noRecover)) -} - func (suite *GraphConnectorErrorSuite) TestWrapAndAppend_Add3() { errOneTwo := WrapAndAppend("user1", assert.AnError, assert.AnError) combined := WrapAndAppend("unix36", assert.AnError, errOneTwo) diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index d20be4c31..55269c426 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -347,8 +347,7 @@ func generateContainerOfItems( sel, dest, control.Options{RestorePermissions: true}, - dataColls, - ) + dataColls) require.NoError(t, err) return deets diff --git a/src/pkg/fault/fault.go b/src/pkg/fault/fault.go index b8f57108b..d060e969b 100644 --- a/src/pkg/fault/fault.go +++ b/src/pkg/fault/fault.go @@ -84,6 +84,11 @@ func (e *Errors) Fail(err error) *Errors { return e.setErr(err) } +// Failed returns true if e.err != nil, signifying a catastrophic failure. +func (e *Errors) Failed() bool { + return e.err != nil +} + // setErr handles setting errors.err. Sync locking gets // handled upstream of this call. func (e *Errors) setErr(err error) *Errors { @@ -99,6 +104,7 @@ func (e *Errors) setErr(err error) *Errors { type Adder interface { Add(err error) *Errors + Failed() bool } // Add appends the error to the slice of recoverable and diff --git a/src/pkg/fault/mock/mock.go b/src/pkg/fault/mock/mock.go index 4d3fd06cd..7076f134c 100644 --- a/src/pkg/fault/mock/mock.go +++ b/src/pkg/fault/mock/mock.go @@ -4,7 +4,8 @@ import "github.com/alcionai/corso/src/pkg/fault" // Adder mocks an adder interface for testing. type Adder struct { - Errs []error + FailFast bool + Errs []error } func NewAdder() *Adder { @@ -15,3 +16,7 @@ func (ma *Adder) Add(err error) *fault.Errors { ma.Errs = append(ma.Errs, err) return fault.New(true) } + +func (ma *Adder) Failed() bool { + return ma.FailFast && len(ma.Errs) > 0 +} From 81ecb072ffa5640afda94b211c790464ec5ab334 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Thu, 9 Feb 2023 10:55:01 +0530 Subject: [PATCH 35/45] Add `make fmt` for src (#2436) ## Description Adds make command for code formatting. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup --- src/Makefile | 4 ++++ .../graph/betasdk/models/horizontal_section_layout_type.go | 2 ++ .../connector/graph/betasdk/models/page_layout_type.go | 2 ++ .../connector/graph/betasdk/models/page_promotion_type.go | 2 ++ .../connector/graph/betasdk/models/section_emphasis_type.go | 2 ++ .../connector/graph/betasdk/models/site_access_type.go | 2 ++ .../connector/graph/betasdk/models/site_security_level.go | 2 ++ .../connector/graph/betasdk/models/title_area_layout_type.go | 2 ++ .../graph/betasdk/models/title_area_text_alignment_type.go | 2 ++ src/internal/connector/sharepoint/api/helper_test.go | 2 +- src/pkg/logger/example_logger_test.go | 1 + src/pkg/selectors/scopes.go | 2 +- 12 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/Makefile b/src/Makefile index 73474de0c..c832d5d79 100644 --- a/src/Makefile +++ b/src/Makefile @@ -15,6 +15,10 @@ lint: check-lint-version golangci-lint run staticcheck ./... +fmt: + gofumpt -w . + gci write --skip-generated -s 'standard,default,prefix(github.com/alcionai/corso)' . + check-lint-version: check-lint @if [ "$(LINT_VERSION)" != "$(WANTED_LINT_VERSION)" ]; then \ echo >&2 $(BAD_LINT_MSG); \ diff --git a/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go b/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go index 80e208ffe..dd24cc509 100644 --- a/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go +++ b/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go @@ -21,6 +21,7 @@ const ( func (i HorizontalSectionLayoutType) String() string { return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i] } + func ParseHorizontalSectionLayoutType(v string) (interface{}, error) { result := NONE_HORIZONTALSECTIONLAYOUTTYPE switch v { @@ -45,6 +46,7 @@ func ParseHorizontalSectionLayoutType(v string) (interface{}, error) { } return &result, nil } + func SerializeHorizontalSectionLayoutType(values []HorizontalSectionLayoutType) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/graph/betasdk/models/page_layout_type.go b/src/internal/connector/graph/betasdk/models/page_layout_type.go index fce795760..896ac6a44 100644 --- a/src/internal/connector/graph/betasdk/models/page_layout_type.go +++ b/src/internal/connector/graph/betasdk/models/page_layout_type.go @@ -17,6 +17,7 @@ const ( func (i PageLayoutType) String() string { return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i] } + func ParsePageLayoutType(v string) (interface{}, error) { result := MICROSOFTRESERVED_PAGELAYOUTTYPE switch v { @@ -33,6 +34,7 @@ func ParsePageLayoutType(v string) (interface{}, error) { } return &result, nil } + func SerializePageLayoutType(values []PageLayoutType) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/graph/betasdk/models/page_promotion_type.go b/src/internal/connector/graph/betasdk/models/page_promotion_type.go index e78ce63f0..f072e141d 100644 --- a/src/internal/connector/graph/betasdk/models/page_promotion_type.go +++ b/src/internal/connector/graph/betasdk/models/page_promotion_type.go @@ -17,6 +17,7 @@ const ( func (i PagePromotionType) String() string { return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i] } + func ParsePagePromotionType(v string) (interface{}, error) { result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE switch v { @@ -33,6 +34,7 @@ func ParsePagePromotionType(v string) (interface{}, error) { } return &result, nil } + func SerializePagePromotionType(values []PagePromotionType) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/graph/betasdk/models/section_emphasis_type.go b/src/internal/connector/graph/betasdk/models/section_emphasis_type.go index 301ae839f..184ab8521 100644 --- a/src/internal/connector/graph/betasdk/models/section_emphasis_type.go +++ b/src/internal/connector/graph/betasdk/models/section_emphasis_type.go @@ -18,6 +18,7 @@ const ( func (i SectionEmphasisType) String() string { return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i] } + func ParseSectionEmphasisType(v string) (interface{}, error) { result := NONE_SECTIONEMPHASISTYPE switch v { @@ -36,6 +37,7 @@ func ParseSectionEmphasisType(v string) (interface{}, error) { } return &result, nil } + func SerializeSectionEmphasisType(values []SectionEmphasisType) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/graph/betasdk/models/site_access_type.go b/src/internal/connector/graph/betasdk/models/site_access_type.go index 2d4cedffe..ff07d2dfa 100644 --- a/src/internal/connector/graph/betasdk/models/site_access_type.go +++ b/src/internal/connector/graph/betasdk/models/site_access_type.go @@ -16,6 +16,7 @@ const ( func (i SiteAccessType) String() string { return []string{"block", "full", "limited"}[i] } + func ParseSiteAccessType(v string) (interface{}, error) { result := BLOCK_SITEACCESSTYPE switch v { @@ -30,6 +31,7 @@ func ParseSiteAccessType(v string) (interface{}, error) { } return &result, nil } + func SerializeSiteAccessType(values []SiteAccessType) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/graph/betasdk/models/site_security_level.go b/src/internal/connector/graph/betasdk/models/site_security_level.go index 0c75c164e..ac12a6696 100644 --- a/src/internal/connector/graph/betasdk/models/site_security_level.go +++ b/src/internal/connector/graph/betasdk/models/site_security_level.go @@ -25,6 +25,7 @@ const ( func (i SiteSecurityLevel) String() string { return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i] } + func ParseSiteSecurityLevel(v string) (interface{}, error) { result := USERDEFINED_SITESECURITYLEVEL switch v { @@ -45,6 +46,7 @@ func ParseSiteSecurityLevel(v string) (interface{}, error) { } return &result, nil } + func SerializeSiteSecurityLevel(values []SiteSecurityLevel) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/graph/betasdk/models/title_area_layout_type.go b/src/internal/connector/graph/betasdk/models/title_area_layout_type.go index 3621288a4..f766655f0 100644 --- a/src/internal/connector/graph/betasdk/models/title_area_layout_type.go +++ b/src/internal/connector/graph/betasdk/models/title_area_layout_type.go @@ -18,6 +18,7 @@ const ( func (i TitleAreaLayoutType) String() string { return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i] } + func ParseTitleAreaLayoutType(v string) (interface{}, error) { result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE switch v { @@ -36,6 +37,7 @@ func ParseTitleAreaLayoutType(v string) (interface{}, error) { } return &result, nil } + func SerializeTitleAreaLayoutType(values []TitleAreaLayoutType) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go b/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go index a34f41dbe..a56368f58 100644 --- a/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go +++ b/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go @@ -16,6 +16,7 @@ const ( func (i TitleAreaTextAlignmentType) String() string { return []string{"left", "center", "unknownFutureValue"}[i] } + func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) { result := LEFT_TITLEAREATEXTALIGNMENTTYPE switch v { @@ -30,6 +31,7 @@ func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) { } return &result, nil } + func SerializeTitleAreaTextAlignmentType(values []TitleAreaTextAlignmentType) []string { result := make([]string, len(values)) for i, v := range values { diff --git a/src/internal/connector/sharepoint/api/helper_test.go b/src/internal/connector/sharepoint/api/helper_test.go index 1d50263ee..23298e240 100644 --- a/src/internal/connector/sharepoint/api/helper_test.go +++ b/src/internal/connector/sharepoint/api/helper_test.go @@ -3,9 +3,9 @@ package api_test import ( "testing" - discover "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/stretchr/testify/require" + discover "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/pkg/account" ) diff --git a/src/pkg/logger/example_logger_test.go b/src/pkg/logger/example_logger_test.go index d04cc02ff..685dae2b4 100644 --- a/src/pkg/logger/example_logger_test.go +++ b/src/pkg/logger/example_logger_test.go @@ -4,6 +4,7 @@ import ( "context" "github.com/alcionai/clues" + "github.com/alcionai/corso/src/pkg/logger" ) diff --git a/src/pkg/selectors/scopes.go b/src/pkg/selectors/scopes.go index 9ea595897..5fc05e789 100644 --- a/src/pkg/selectors/scopes.go +++ b/src/pkg/selectors/scopes.go @@ -3,9 +3,9 @@ package selectors import ( "context" + "github.com/alcionai/clues" "golang.org/x/exp/maps" - "github.com/alcionai/clues" D "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/fault" From 98fa55ac54efcd15a6031ac3c6e56e7acce1bfc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Feb 2023 05:29:24 +0000 Subject: [PATCH 36/45] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20tailwindcss?= =?UTF-8?q?=20from=203.2.4=20to=203.2.6=20in=20/website=20(#2452)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [tailwindcss](https://github.com/tailwindlabs/tailwindcss) from 3.2.4 to 3.2.6.
Release notes

Sourced from tailwindcss's releases.

v3.2.6

Fixed

v3.2.5

Added

  • Add standalone CLI build for 64-bit Windows on ARM (node16-win-arm64) (#10001)

Fixed

  • Cleanup unused variantOrder (#9829)
  • Fix foo-[abc]/[def] not being handled correctly (#9866)
  • Add container queries plugin to standalone CLI (#9865)
  • Support renaming of output files by PostCSS plugin. (#9944)
  • Improve return value of resolveConfig, unwrap ResolvableTo (#9972)
  • Clip unbalanced brackets in arbitrary values (#9973)
  • Don’t reorder webkit scrollbar pseudo elements (#9991)
  • Deterministic sorting of arbitrary variants (#10016)
  • Add data key to theme types (#10023)
  • Prevent invalid arbitrary variant selectors from failing the build (#10059)
  • Properly handle subtraction followed by a variable (#10074)
  • Fix missing string[] in the theme.dropShadow types (#10072)
  • Update list of length units (#10100)
  • Fix not matching arbitrary properties when closely followed by square brackets (#10212)
  • Allow direct nesting in root or @layer nodes (#10229)
  • Don't prefix classes in arbitrary variants (#10214)
  • Fix perf regression when checking for changed content (#10234)
  • Fix missing blocklist member in the Config type (#10239)
  • Escape group names in selectors (#10276)
  • Consider earlier variants before sorting functions (#10288)
  • Allow variants with slashes (#10336)
  • Ensure generated CSS is always sorted in the same order for a given set of templates (#10382)
  • Handle variants when the same class appears multiple times in a selector (#10397)
  • Handle group/peer variants with quoted strings (#10400)
  • Parse alpha value from rgba/hsla colors when using variables (#10429)
  • Sort by layer inside variants layer (#10505)
  • Add --watch=always option to prevent exit when stdin closes (#9966)

Changed

  • Alphabetize theme keys in default config (#9953)
  • Update esbuild to v17 (#10368)
  • Include outline-color in transition and transition-colors utilities (#10385)
Changelog

Sourced from tailwindcss's changelog.

[3.2.6] - 2023-02-08

Fixed

[3.2.5] - 2023-02-08

Added

  • Add standalone CLI build for 64-bit Windows on ARM (node16-win-arm64) (#10001)

Fixed

  • Cleanup unused variantOrder (#9829)
  • Fix foo-[abc]/[def] not being handled correctly (#9866)
  • Add container queries plugin to standalone CLI (#9865)
  • Support renaming of output files by PostCSS plugin. (#9944)
  • Improve return value of resolveConfig, unwrap ResolvableTo (#9972)
  • Clip unbalanced brackets in arbitrary values (#9973)
  • Don’t reorder webkit scrollbar pseudo elements (#9991)
  • Deterministic sorting of arbitrary variants (#10016)
  • Add data key to theme types (#10023)
  • Prevent invalid arbitrary variant selectors from failing the build (#10059)
  • Properly handle subtraction followed by a variable (#10074)
  • Fix missing string[] in the theme.dropShadow types (#10072)
  • Update list of length units (#10100)
  • Fix not matching arbitrary properties when closely followed by square brackets (#10212)
  • Allow direct nesting in root or @layer nodes (#10229)
  • Don't prefix classes in arbitrary variants (#10214)
  • Fix perf regression when checking for changed content (#10234)
  • Fix missing blocklist member in the Config type (#10239)
  • Escape group names in selectors (#10276)
  • Consider earlier variants before sorting functions (#10288)
  • Allow variants with slashes (#10336)
  • Ensure generated CSS is always sorted in the same order for a given set of templates (#10382)
  • Handle variants when the same class appears multiple times in a selector (#10397)
  • Handle group/peer variants with quoted strings (#10400)
  • Parse alpha value from rgba/hsla colors when using variables (#10429)
  • Sort by layer inside variants layer (#10505)
  • Add --watch=always option to prevent exit when stdin closes (#9966)

Changed

  • Alphabetize theme keys in default config (#9953)
  • Update esbuild to v17 (#10368)
  • Include outline-color in transition and transition-colors utilities (#10385)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tailwindcss&package-manager=npm_and_yarn&previous-version=3.2.4&new-version=3.2.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- website/package-lock.json | 35 +++++++++++++++++------------------ website/package.json | 2 +- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index 4ef68bb92..aec922253 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -33,7 +33,7 @@ "@iconify/react": "^4.1.0", "autoprefixer": "^10.4.13", "postcss": "^8.4.21", - "tailwindcss": "^3.2.4" + "tailwindcss": "^3.2.6" } }, "node_modules/@algolia/autocomplete-core": { @@ -10569,10 +10569,9 @@ } }, "node_modules/postcss-selector-parser": { - "version": "6.0.10", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", - "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", - "license": "MIT", + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.11.tgz", + "integrity": "sha512-zbARubNdogI9j7WY4nQJBiNqQf3sLS3wCP4WfOidu+p28LofJqDH1tcXypGrcmMHhDk2t9wGhCsYe/+szLTy1g==", "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -12670,9 +12669,9 @@ } }, "node_modules/tailwindcss": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.2.4.tgz", - "integrity": "sha512-AhwtHCKMtR71JgeYDaswmZXhPcW9iuI9Sp2LvZPo9upDZ7231ZJ7eA9RaURbhpXGVlrjX4cFNlB4ieTetEb7hQ==", + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.2.6.tgz", + "integrity": "sha512-BfgQWZrtqowOQMC2bwaSNe7xcIjdDEgixWGYOd6AL0CbKHJlvhfdbINeAW76l1sO+1ov/MJ93ODJ9yluRituIw==", "dev": true, "dependencies": { "arg": "^5.0.2", @@ -12689,12 +12688,12 @@ "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.0.0", - "postcss": "^8.4.18", + "postcss": "^8.0.9", "postcss-import": "^14.1.0", "postcss-js": "^4.0.0", "postcss-load-config": "^3.1.4", "postcss-nested": "6.0.0", - "postcss-selector-parser": "^6.0.10", + "postcss-selector-parser": "^6.0.11", "postcss-value-parser": "^4.2.0", "quick-lru": "^5.1.1", "resolve": "^1.22.1" @@ -21556,9 +21555,9 @@ } }, "postcss-selector-parser": { - "version": "6.0.10", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", - "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.11.tgz", + "integrity": "sha512-zbARubNdogI9j7WY4nQJBiNqQf3sLS3wCP4WfOidu+p28LofJqDH1tcXypGrcmMHhDk2t9wGhCsYe/+szLTy1g==", "requires": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -23009,9 +23008,9 @@ } }, "tailwindcss": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.2.4.tgz", - "integrity": "sha512-AhwtHCKMtR71JgeYDaswmZXhPcW9iuI9Sp2LvZPo9upDZ7231ZJ7eA9RaURbhpXGVlrjX4cFNlB4ieTetEb7hQ==", + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.2.6.tgz", + "integrity": "sha512-BfgQWZrtqowOQMC2bwaSNe7xcIjdDEgixWGYOd6AL0CbKHJlvhfdbINeAW76l1sO+1ov/MJ93ODJ9yluRituIw==", "dev": true, "requires": { "arg": "^5.0.2", @@ -23028,12 +23027,12 @@ "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.0.0", - "postcss": "^8.4.18", + "postcss": "^8.0.9", "postcss-import": "^14.1.0", "postcss-js": "^4.0.0", "postcss-load-config": "^3.1.4", "postcss-nested": "6.0.0", - "postcss-selector-parser": "^6.0.10", + "postcss-selector-parser": "^6.0.11", "postcss-value-parser": "^4.2.0", "quick-lru": "^5.1.1", "resolve": "^1.22.1" diff --git a/website/package.json b/website/package.json index 63f86b146..1ae1bc561 100644 --- a/website/package.json +++ b/website/package.json @@ -39,7 +39,7 @@ "@iconify/react": "^4.1.0", "autoprefixer": "^10.4.13", "postcss": "^8.4.21", - "tailwindcss": "^3.2.4" + "tailwindcss": "^3.2.6" }, "browserslist": { "production": [ From 78e8e1a4a83ce1f6cbd5a3a05549296ec2a6ed91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Feb 2023 06:43:56 +0000 Subject: [PATCH 37/45] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20@sideway/form?= =?UTF-8?q?ula=20from=203.0.0=20to=203.0.1=20in=20/website=20(#2454)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@sideway/formula](https://github.com/sideway/formula) from 3.0.0 to 3.0.1.
Commits
Maintainer changes

This version was pushed to npm by marsup, a new releaser for @​sideway/formula since your current version.


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@sideway/formula&package-manager=npm_and_yarn&previous-version=3.0.0&new-version=3.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/alcionai/corso/network/alerts).
--- website/package-lock.json | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index aec922253..36768e41c 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -3023,10 +3023,9 @@ } }, "node_modules/@sideway/formula": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.0.tgz", - "integrity": "sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg==", - "license": "BSD-3-Clause" + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" }, "node_modules/@sideway/pinpoint": { "version": "2.0.0", @@ -16537,9 +16536,9 @@ } }, "@sideway/formula": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.0.tgz", - "integrity": "sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg==" + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" }, "@sideway/pinpoint": { "version": "2.0.0", From 6c2c873cc58d16ab62b851cb39042cf5666d7266 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Feb 2023 13:09:31 +0000 Subject: [PATCH 38/45] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.196=20to=201.44.197=20in=20/src=20(#?= =?UTF-8?q?2453)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.196 to 1.44.197.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.197 (2023-02-08)

Service Client Updates

  • service/backup: Updates service API and documentation
  • service/cloudfront: Updates service API and documentation
    • CloudFront Origin Access Control extends support to AWS Elemental MediaStore origins.
  • service/glue: Updates service API and documentation
    • DirectJDBCSource + Glue 4.0 streaming options
  • service/lakeformation: Updates service API
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.196&new-version=1.44.197)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 24d305739..4056edfbc 100644 --- a/src/go.mod +++ b/src/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e - github.com/aws/aws-sdk-go v1.44.196 + github.com/aws/aws-sdk-go v1.44.197 github.com/aws/aws-xray-sdk-go v1.8.0 github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 diff --git a/src/go.sum b/src/go.sum index 61562d958..da19f8a2e 100644 --- a/src/go.sum +++ b/src/go.sum @@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/aws/aws-sdk-go v1.44.196 h1:e3h9M7fpnRHwHOohYmYjgVbcCBvkxKwZiT7fGrxRn28= -github.com/aws/aws-sdk-go v1.44.196/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.197 h1:pkg/NZsov9v/CawQWy+qWVzJMIZRQypCtYjUBXFomF8= +github.com/aws/aws-sdk-go v1.44.197/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY= github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= From 2643fc2c89368cdf3f152ddc98d89a4406d4a10b Mon Sep 17 00:00:00 2001 From: Danny Date: Thu, 9 Feb 2023 09:39:00 -0500 Subject: [PATCH 39/45] GC: Backup: SharePoint: Pages Connect Pipeline (#2220) ## Description Finalize the backup workflow for `SharePoint.Pages.` Populate functions parallelizes Fix for Incorrect Status during backup ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature ## Issue(s) * closes #2071 * closes #2257 * related to #2173 ## Test Plan - [x] :zap: Unit test --- src/internal/connector/data_collections.go | 2 +- .../connector/data_collections_test.go | 2 +- src/internal/connector/sharepoint/api/api.go | 2 + .../connector/sharepoint/api/pages.go | 62 +++++++++++++--- .../connector/sharepoint/collection.go | 11 ++- .../connector/sharepoint/collection_test.go | 20 +----- .../connector/sharepoint/data_collections.go | 30 +++++--- .../sharepoint/data_collections_test.go | 12 +--- .../sharepoint/datacategory_string.go | 15 ++-- src/internal/connector/sharepoint/list.go | 71 ++++++++++++++----- 10 files changed, 150 insertions(+), 77 deletions(-) diff --git a/src/internal/connector/data_collections.go b/src/internal/connector/data_collections.go index 4cd167ea6..4c1bd4461 100644 --- a/src/internal/connector/data_collections.go +++ b/src/internal/connector/data_collections.go @@ -92,7 +92,7 @@ func (gc *GraphConnector) DataCollections( ctx, gc.itemClient, sels, - gc.credentials.AzureTenantID, + gc.credentials, gc.Service, gc, ctrlOpts) diff --git a/src/internal/connector/data_collections_test.go b/src/internal/connector/data_collections_test.go index c90bee511..4484a92aa 100644 --- a/src/internal/connector/data_collections_test.go +++ b/src/internal/connector/data_collections_test.go @@ -249,7 +249,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti ctx, graph.HTTPClient(graph.NoTimeout()), test.getSelector(), - connector.credentials.AzureTenantID, + connector.credentials, connector.Service, connector, control.Options{}) diff --git a/src/internal/connector/sharepoint/api/api.go b/src/internal/connector/sharepoint/api/api.go index c05eaad6b..6c9658418 100644 --- a/src/internal/connector/sharepoint/api/api.go +++ b/src/internal/connector/sharepoint/api/api.go @@ -4,3 +4,5 @@ type Tuple struct { Name string ID string } + +const fetchChannelSize = 5 diff --git a/src/internal/connector/sharepoint/api/pages.go b/src/internal/connector/sharepoint/api/pages.go index a62fbc40a..45b32ddd5 100644 --- a/src/internal/connector/sharepoint/api/pages.go +++ b/src/internal/connector/sharepoint/api/pages.go @@ -4,11 +4,13 @@ import ( "context" "fmt" "io" + "sync" "time" "github.com/pkg/errors" discover "github.com/alcionai/corso/src/internal/connector/discovery/api" + "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites" "github.com/alcionai/corso/src/internal/connector/support" @@ -25,16 +27,55 @@ func GetSitePages( siteID string, pages []string, ) ([]models.SitePageable, error) { - col := make([]models.SitePageable, 0) - opts := retrieveSitePageOptions() + var ( + col = make([]models.SitePageable, 0) + semaphoreCh = make(chan struct{}, fetchChannelSize) + opts = retrieveSitePageOptions() + err, errs error + wg sync.WaitGroup + m sync.Mutex + ) + + defer close(semaphoreCh) + + errUpdater := func(id string, err error) { + m.Lock() + errs = support.WrapAndAppend(id, err, errs) + m.Unlock() + } + updatePages := func(page models.SitePageable) { + m.Lock() + col = append(col, page) + m.Unlock() + } for _, entry := range pages { - page, err := serv.Client().SitesById(siteID).PagesById(entry).Get(ctx, opts) - if err != nil { - return nil, support.ConnectorStackErrorTraceWrap(err, "fetching page: "+entry) - } + semaphoreCh <- struct{}{} - col = append(col, page) + wg.Add(1) + + go func(pageID string) { + defer wg.Done() + defer func() { <-semaphoreCh }() + + var page models.SitePageable + + err = graph.RunWithRetry(func() error { + page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts) + return err + }) + if err != nil { + errUpdater(pageID, errors.Wrap(err, support.ConnectorStackErrorTrace(err)+" fetching page")) + } else { + updatePages(page) + } + }(entry) + } + + wg.Wait() + + if errs != nil { + return nil, errs } return col, nil @@ -46,10 +87,15 @@ func FetchPages(ctx context.Context, bs *discover.BetaService, siteID string) ([ builder = bs.Client().SitesById(siteID).Pages() opts = fetchPageOptions() pageTuples = make([]Tuple, 0) + resp models.SitePageCollectionResponseable + err error ) for { - resp, err := builder.Get(ctx, opts) + err = graph.RunWithRetry(func() error { + resp, err = builder.Get(ctx, opts) + return err + }) if err != nil { return nil, support.ConnectorStackErrorTraceWrap(err, "failed fetching site page") } diff --git a/src/internal/connector/sharepoint/collection.go b/src/internal/connector/sharepoint/collection.go index ca07399eb..91ebf5d65 100644 --- a/src/internal/connector/sharepoint/collection.go +++ b/src/internal/connector/sharepoint/collection.go @@ -28,6 +28,7 @@ type DataCategory int //go:generate stringer -type=DataCategory const ( collectionChannelBufferSize = 50 + fetchChannelSize = 5 Unknown DataCategory = iota List Drive @@ -70,6 +71,7 @@ func NewCollection( service graph.Servicer, category DataCategory, statusUpdater support.StatusUpdater, + ctrlOpts control.Options, ) *Collection { c := &Collection{ fullPath: folderPath, @@ -78,6 +80,7 @@ func NewCollection( service: service, statusUpdater: statusUpdater, category: category, + ctrl: ctrlOpts, } return c @@ -157,7 +160,7 @@ func (sc *Collection) finishPopulation(ctx context.Context, attempts, success in status := support.CreateStatus( ctx, support.Backup, - len(sc.jobs), + 1, // 1 folder support.CollectionMetrics{ Objects: attempted, Successes: success, @@ -180,6 +183,9 @@ func (sc *Collection) populate(ctx context.Context) { writer = kw.NewJsonSerializationWriter() ) + defer func() { + sc.finishPopulation(ctx, metrics.attempts, metrics.success, int64(metrics.totalBytes), errs) + }() // TODO: Insert correct ID for CollectionProgress colProgress, closer := observe.CollectionProgress( ctx, @@ -190,7 +196,6 @@ func (sc *Collection) populate(ctx context.Context) { defer func() { close(colProgress) - sc.finishPopulation(ctx, metrics.attempts, metrics.success, metrics.totalBytes, errs) }() // Switch retrieval function based on category @@ -314,7 +319,7 @@ func (sc *Collection) retrievePages( } } - return numMetrics{}, nil + return metrics, nil } func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) { diff --git a/src/internal/connector/sharepoint/collection_test.go b/src/internal/connector/sharepoint/collection_test.go index 494287457..2f2a2c472 100644 --- a/src/internal/connector/sharepoint/collection_test.go +++ b/src/internal/connector/sharepoint/collection_test.go @@ -147,7 +147,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { for _, test := range tables { t.Run(test.name, func(t *testing.T) { - col := NewCollection(test.getDir(t), nil, test.category, nil) + col := NewCollection(test.getDir(t), nil, test.category, nil, control.Defaults()) col.data <- test.getItem(t, test.itemName) readItems := []data.Stream{} @@ -167,24 +167,6 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { } } -func (suite *SharePointCollectionSuite) TestCollectPages() { - ctx, flush := tester.NewContext() - defer flush() - - t := suite.T() - col, err := collectPages( - ctx, - suite.creds, - nil, - account.AzureTenantID, - suite.siteID, - &MockGraphService{}, - control.Defaults(), - ) - assert.NoError(t, err) - assert.NotEmpty(t, col) -} - // TestRestoreListCollection verifies Graph Restore API for the List Collection func (suite *SharePointCollectionSuite) TestListCollection_Restore() { ctx, flush := tester.NewContext() diff --git a/src/internal/connector/sharepoint/data_collections.go b/src/internal/connector/sharepoint/data_collections.go index ce17c9c8d..f98344dab 100644 --- a/src/internal/connector/sharepoint/data_collections.go +++ b/src/internal/connector/sharepoint/data_collections.go @@ -30,7 +30,7 @@ func DataCollections( ctx context.Context, itemClient *http.Client, selector selectors.Selector, - tenantID string, + creds account.M365Config, serv graph.Servicer, su statusUpdater, ctrlOpts control.Options, @@ -61,7 +61,7 @@ func DataCollections( spcs, err = collectLists( ctx, serv, - tenantID, + creds.AzureTenantID, site, su, ctrlOpts) @@ -74,7 +74,7 @@ func DataCollections( ctx, itemClient, serv, - tenantID, + creds.AzureTenantID, site, scope, su, @@ -82,6 +82,17 @@ func DataCollections( if err != nil { return nil, nil, support.WrapAndAppend(site, err, errs) } + case path.PagesCategory: + spcs, err = collectPages( + ctx, + creds, + serv, + site, + su, + ctrlOpts) + if err != nil { + return nil, nil, support.WrapAndAppend(site, err, errs) + } } collections = append(collections, spcs...) @@ -118,7 +129,7 @@ func collectLists( return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID) } - collection := NewCollection(dir, serv, List, updater.UpdateStatus) + collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts) collection.AddJob(tuple.id) spcs = append(spcs, collection) @@ -166,12 +177,12 @@ func collectLibraries( } // collectPages constructs a sharepoint Collections struct and Get()s the associated -// M365 IDs for the associated Pages +// M365 IDs for the associated Pages. func collectPages( ctx context.Context, creds account.M365Config, serv graph.Servicer, - tenantID, siteID string, + siteID string, updater statusUpdater, ctrlOpts control.Options, ) ([]data.BackupCollection, error) { @@ -180,9 +191,10 @@ func collectPages( spcs := make([]data.BackupCollection, 0) // make the betaClient + // Need to receive From DataCollection Call adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret) if err != nil { - return nil, errors.Wrap(err, "adapter for betaservice not created") + return nil, errors.New("unable to create adapter w/ env credentials") } betaService := api.NewBetaService(adpt) @@ -195,7 +207,7 @@ func collectPages( for _, tuple := range tuples { dir, err := path.Builder{}.Append(tuple.Name). ToDataLayerSharePointPath( - tenantID, + creds.AzureTenantID, siteID, path.PagesCategory, false) @@ -203,7 +215,7 @@ func collectPages( return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID) } - collection := NewCollection(dir, serv, Pages, updater.UpdateStatus) + collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts) collection.betaService = betaService collection.AddJob(tuple.ID) diff --git a/src/internal/connector/sharepoint/data_collections_test.go b/src/internal/connector/sharepoint/data_collections_test.go index 775cda23f..623b5c2e7 100644 --- a/src/internal/connector/sharepoint/data_collections_test.go +++ b/src/internal/connector/sharepoint/data_collections_test.go @@ -10,7 +10,6 @@ import ( "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" - "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/selectors" @@ -154,20 +153,13 @@ func (suite *SharePointPagesSuite) TestCollectPages() { account, err := a.M365Config() require.NoError(t, err) - updateFunc := func(*support.ConnectorOperationStatus) { - t.Log("Updater Called ") - } - - updater := &MockUpdater{UpdateState: updateFunc} - col, err := collectPages( ctx, account, nil, - account.AzureTenantID, siteID, - updater, - control.Options{}, + &MockGraphService{}, + control.Defaults(), ) assert.NoError(t, err) assert.NotEmpty(t, col) diff --git a/src/internal/connector/sharepoint/datacategory_string.go b/src/internal/connector/sharepoint/datacategory_string.go index c75c0ad92..b3281ff7f 100644 --- a/src/internal/connector/sharepoint/datacategory_string.go +++ b/src/internal/connector/sharepoint/datacategory_string.go @@ -8,19 +8,20 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[Unknown-1] - _ = x[List-2] - _ = x[Drive-3] + _ = x[Unknown-2] + _ = x[List-3] + _ = x[Drive-4] + _ = x[Pages-5] } -const _DataCategory_name = "UnknownListDrive" +const _DataCategory_name = "UnknownListDrivePages" -var _DataCategory_index = [...]uint8{0, 7, 11, 16} +var _DataCategory_index = [...]uint8{0, 7, 11, 16, 21} func (i DataCategory) String() string { - i -= 1 + i -= 2 if i < 0 || i >= DataCategory(len(_DataCategory_index)-1) { - return "DataCategory(" + strconv.FormatInt(int64(i+1), 10) + ")" + return "DataCategory(" + strconv.FormatInt(int64(i+2), 10) + ")" } return _DataCategory_name[_DataCategory_index[i]:_DataCategory_index[i+1]] } diff --git a/src/internal/connector/sharepoint/list.go b/src/internal/connector/sharepoint/list.go index 101de9722..64183c3a9 100644 --- a/src/internal/connector/sharepoint/list.go +++ b/src/internal/connector/sharepoint/list.go @@ -3,6 +3,7 @@ package sharepoint import ( "context" "fmt" + "sync" "github.com/microsoftgraph/msgraph-sdk-go/models" mssite "github.com/microsoftgraph/msgraph-sdk-go/sites" @@ -91,33 +92,65 @@ func loadSiteLists( listIDs []string, ) ([]models.Listable, error) { var ( - results = make([]models.Listable, 0) - errs error + results = make([]models.Listable, 0) + semaphoreCh = make(chan struct{}, fetchChannelSize) + errs error + wg sync.WaitGroup + m sync.Mutex ) - for _, listID := range listIDs { - entry, err := gs.Client().SitesById(siteID).ListsById(listID).Get(ctx, nil) - if err != nil { - errs = support.WrapAndAppend( - listID, - errors.Wrap(err, support.ConnectorStackErrorTrace(err)), - errs, - ) - } + defer close(semaphoreCh) + + errUpdater := func(id string, err error) { + m.Lock() + errs = support.WrapAndAppend(id, err, errs) + m.Unlock() + } + + updateLists := func(list models.Listable) { + m.Lock() + results = append(results, list) + m.Unlock() + } + + for _, listID := range listIDs { + semaphoreCh <- struct{}{} + + wg.Add(1) + + go func(id string) { + defer wg.Done() + defer func() { <-semaphoreCh }() + + var ( + entry models.Listable + err error + ) + + err = graph.RunWithRetry(func() error { + entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil) + return err + }) + if err != nil { + errUpdater(id, support.ConnectorStackErrorTraceWrap(err, "")) + return + } + + cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id) + if err != nil { + errUpdater(id, errors.Wrap(err, "unable to fetchRelationships during loadSiteLists")) + return + } - cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, listID) - if err == nil { entry.SetColumns(cols) entry.SetContentTypes(cTypes) entry.SetItems(lItems) - } else { - errs = support.WrapAndAppend("unable to fetchRelationships during loadSiteLists", err, errs) - continue - } - - results = append(results, entry) + updateLists(entry) + }(listID) } + wg.Wait() + if errs != nil { return nil, errs } From 057e0e97c0a4614ab4ce8b388d7a652ad22b566c Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Thu, 9 Feb 2023 21:56:58 +0530 Subject: [PATCH 40/45] Add goimports to make fmt (#2456) ## Description Missed adding it to `make fmt` earlier ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup --- src/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Makefile b/src/Makefile index c832d5d79..6f75be16b 100644 --- a/src/Makefile +++ b/src/Makefile @@ -17,6 +17,7 @@ lint: check-lint-version fmt: gofumpt -w . + goimports -w . gci write --skip-generated -s 'standard,default,prefix(github.com/alcionai/corso)' . check-lint-version: check-lint From 47d0eeb700834ae41eb9d2af864581a4f3bd259d Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Thu, 9 Feb 2023 22:52:23 +0530 Subject: [PATCH 41/45] Move stateOf to data (#2455) ## Description stateOf needs to be used in more places. Move it to a common location so as to expose it. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) *https://github.com/alcionai/corso/pull/2407 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- .../exchange/exchange_data_collection.go | 18 +---- .../exchange/exchange_data_collection_test.go | 55 ---------------- src/internal/data/data_collection.go | 18 +++++ src/internal/data/data_collection_test.go | 66 +++++++++++++++++++ 4 files changed, 85 insertions(+), 72 deletions(-) create mode 100644 src/internal/data/data_collection_test.go diff --git a/src/internal/connector/exchange/exchange_data_collection.go b/src/internal/connector/exchange/exchange_data_collection.go index ecb85521b..07ce33b52 100644 --- a/src/internal/connector/exchange/exchange_data_collection.go +++ b/src/internal/connector/exchange/exchange_data_collection.go @@ -107,7 +107,7 @@ func NewCollection( added: make(map[string]struct{}, 0), removed: make(map[string]struct{}, 0), prevPath: prev, - state: stateOf(prev, curr), + state: data.StateOf(prev, curr), statusUpdater: statusUpdater, user: user, items: items, @@ -116,22 +116,6 @@ func NewCollection( return collection } -func stateOf(prev, curr path.Path) data.CollectionState { - if curr == nil || len(curr.String()) == 0 { - return data.DeletedState - } - - if prev == nil || len(prev.String()) == 0 { - return data.NewState - } - - if curr.Folder() != prev.Folder() { - return data.MovedState - } - - return data.NotMovedState -} - // Items utility function to asynchronously execute process to fill data channel with // M365 exchange objects and returns the data channel func (col *Collection) Items() <-chan data.Stream { diff --git a/src/internal/connector/exchange/exchange_data_collection_test.go b/src/internal/connector/exchange/exchange_data_collection_test.go index e45f3d80c..b327c70c8 100644 --- a/src/internal/connector/exchange/exchange_data_collection_test.go +++ b/src/internal/connector/exchange/exchange_data_collection_test.go @@ -12,10 +12,8 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/connector/graph" - "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" - "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" ) @@ -118,59 +116,6 @@ func (suite *ExchangeDataCollectionSuite) TestExchangeDataCollection_NewExchange suite.Equal(fullPath, edc.FullPath()) } -func (suite *ExchangeDataCollectionSuite) TestNewCollection_state() { - fooP, err := path.Builder{}. - Append("foo"). - ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false) - require.NoError(suite.T(), err) - barP, err := path.Builder{}. - Append("bar"). - ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false) - require.NoError(suite.T(), err) - - table := []struct { - name string - prev path.Path - curr path.Path - expect data.CollectionState - }{ - { - name: "new", - curr: fooP, - expect: data.NewState, - }, - { - name: "not moved", - prev: fooP, - curr: fooP, - expect: data.NotMovedState, - }, - { - name: "moved", - prev: fooP, - curr: barP, - expect: data.MovedState, - }, - { - name: "deleted", - prev: fooP, - expect: data.DeletedState, - }, - } - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - c := NewCollection( - "u", - test.curr, test.prev, - 0, - &mockItemer{}, nil, - control.Options{}, - false) - assert.Equal(t, test.expect, c.State()) - }) - } -} - func (suite *ExchangeDataCollectionSuite) TestGetItemWithRetries() { table := []struct { name string diff --git a/src/internal/data/data_collection.go b/src/internal/data/data_collection.go index beeffd3d7..764a7b886 100644 --- a/src/internal/data/data_collection.go +++ b/src/internal/data/data_collection.go @@ -112,3 +112,21 @@ type StreamSize interface { type StreamModTime interface { ModTime() time.Time } + +// StateOf lets us figure out the state of the collection from the +// previous and current path +func StateOf(prev, curr path.Path) CollectionState { + if curr == nil || len(curr.String()) == 0 { + return DeletedState + } + + if prev == nil || len(prev.String()) == 0 { + return NewState + } + + if curr.Folder() != prev.Folder() { + return MovedState + } + + return NotMovedState +} diff --git a/src/internal/data/data_collection_test.go b/src/internal/data/data_collection_test.go new file mode 100644 index 000000000..9ca093032 --- /dev/null +++ b/src/internal/data/data_collection_test.go @@ -0,0 +1,66 @@ +package data + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/pkg/path" +) + +type DataCollectionSuite struct { + suite.Suite +} + +func TestDataCollectionSuite(t *testing.T) { + suite.Run(t, new(DataCollectionSuite)) +} + +func (suite *DataCollectionSuite) TestStateOf() { + fooP, err := path.Builder{}. + Append("foo"). + ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false) + require.NoError(suite.T(), err) + barP, err := path.Builder{}. + Append("bar"). + ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false) + require.NoError(suite.T(), err) + + table := []struct { + name string + prev path.Path + curr path.Path + expect CollectionState + }{ + { + name: "new", + curr: fooP, + expect: NewState, + }, + { + name: "not moved", + prev: fooP, + curr: fooP, + expect: NotMovedState, + }, + { + name: "moved", + prev: fooP, + curr: barP, + expect: MovedState, + }, + { + name: "deleted", + prev: fooP, + expect: DeletedState, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + state := StateOf(test.prev, test.curr) + assert.Equal(t, test.expect, state) + }) + } +} From 754981d0d2368da92d86ea43dc5999a3edfcba92 Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Thu, 9 Feb 2023 09:44:10 -0800 Subject: [PATCH 42/45] Make order insensitive permission compare (#2448) ## Description Update the comparison for OneDrive permissions so it doesn't assume a fixed order. Also fix index out of bounds errors if backup did not retrieve the expected permissions ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * #2447 ## Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- .../connector/graph_connector_helper_test.go | 38 +++++++++++++++---- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index ad6ea556e..cbbb9b0f6 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/internal/connector/mockconnector" "github.com/alcionai/corso/src/internal/connector/onedrive" @@ -652,6 +653,35 @@ func compareExchangeEvent( checkEvent(t, expectedEvent, itemEvent) } +func permissionEqual(expected onedrive.UserPermission, got onedrive.UserPermission) bool { + if !strings.EqualFold(expected.Email, got.Email) { + return false + } + + if (expected.Expiration == nil && got.Expiration != nil) || + (expected.Expiration != nil && got.Expiration == nil) { + return false + } + + if expected.Expiration != nil && + got.Expiration != nil && + !expected.Expiration.Equal(*got.Expiration) { + return false + } + + if len(expected.Roles) != len(got.Roles) { + return false + } + + for _, r := range got.Roles { + if !slices.Contains(expected.Roles, r) { + return false + } + } + + return true +} + func compareOneDriveItem( t *testing.T, expected map[string][]byte, @@ -695,13 +725,7 @@ func compareOneDriveItem( } assert.Equal(t, len(expectedMeta.Permissions), len(itemMeta.Permissions), "number of permissions after restore") - - // FIXME(meain): The permissions before and after might not be in the same order. - for i, p := range expectedMeta.Permissions { - assert.Equal(t, strings.ToLower(p.Email), strings.ToLower(itemMeta.Permissions[i].Email)) - assert.Equal(t, p.Roles, itemMeta.Permissions[i].Roles) - assert.Equal(t, p.Expiration, itemMeta.Permissions[i].Expiration) - } + testElementsMatch(t, expectedMeta.Permissions, itemMeta.Permissions, permissionEqual) } func compareItem( From dad6776861e2a02ba34e1c8465172d263cd421d3 Mon Sep 17 00:00:00 2001 From: Keepers Date: Thu, 9 Feb 2023 11:11:59 -0700 Subject: [PATCH 43/45] replace multierror with fault in setSites (#2377) ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/cli/backup/exchange.go | 16 ++++++---- src/cli/backup/onedrive.go | 16 ++++++---- src/cli/backup/sharepoint.go | 21 +++++++++----- src/cmd/factory/impl/common.go | 6 +++- src/cmd/getM365/getItem.go | 6 +++- src/cmd/purge/purge.go | 6 +++- src/internal/connector/graph_connector.go | 29 ++++++++++++------- .../graph_connector_disconnected_test.go | 5 ++-- .../connector/graph_connector_helper_test.go | 4 ++- .../connector/graph_connector_test.go | 29 ++++++++++++++----- src/internal/operations/backup.go | 2 +- .../operations/backup_integration_test.go | 7 ++++- src/internal/operations/operation.go | 3 +- src/internal/operations/restore.go | 2 +- src/pkg/services/m365/m365.go | 21 +++++++------- src/pkg/services/m365/m365_test.go | 27 ++++++++++------- 16 files changed, 133 insertions(+), 67 deletions(-) diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 149da9f71..b6bffef4c 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -272,20 +273,23 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { sel := exchangeBackupCreateSelectors(user, exchangeData) - users, err := m365.UserPNs(ctx, acct) + // TODO: log/print recoverable errors + errs := fault.New(false) + + users, err := m365.UserPNs(ctx, acct, errs) if err != nil { return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 user(s)")) } var ( - errs *multierror.Error - bIDs []model.StableID + merrs *multierror.Error + bIDs []model.StableID ) for _, discSel := range sel.SplitByResourceOwner(users) { bo, err := r.NewBackup(ctx, discSel.Selector) if err != nil { - errs = multierror.Append(errs, errors.Wrapf( + merrs = multierror.Append(merrs, errors.Wrapf( err, "Failed to initialize Exchange backup for user %s", discSel.DiscreteOwner, @@ -296,7 +300,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { err = bo.Run(ctx) if err != nil { - errs = multierror.Append(errs, errors.Wrapf( + merrs = multierror.Append(merrs, errors.Wrapf( err, "Failed to run Exchange backup for user %s", discSel.DiscreteOwner, @@ -316,7 +320,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error { backup.PrintAll(ctx, bups) - if e := errs.ErrorOrNil(); e != nil { + if e := merrs.ErrorOrNil(); e != nil { return Only(ctx, e) } diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index a99ad6d2b..64f328c7d 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -195,20 +196,23 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { sel := oneDriveBackupCreateSelectors(user) - users, err := m365.UserPNs(ctx, acct) + // TODO: log/print recoverable errors + errs := fault.New(false) + + users, err := m365.UserPNs(ctx, acct, errs) if err != nil { return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 users")) } var ( - errs *multierror.Error - bIDs []model.StableID + merrs *multierror.Error + bIDs []model.StableID ) for _, discSel := range sel.SplitByResourceOwner(users) { bo, err := r.NewBackup(ctx, discSel.Selector) if err != nil { - errs = multierror.Append(errs, errors.Wrapf( + merrs = multierror.Append(merrs, errors.Wrapf( err, "Failed to initialize OneDrive backup for user %s", discSel.DiscreteOwner, @@ -219,7 +223,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { err = bo.Run(ctx) if err != nil { - errs = multierror.Append(errs, errors.Wrapf( + merrs = multierror.Append(merrs, errors.Wrapf( err, "Failed to run OneDrive backup for user %s", discSel.DiscreteOwner, @@ -239,7 +243,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error { backup.PrintAll(ctx, bups) - if e := errs.ErrorOrNil(); e != nil { + if e := merrs.ErrorOrNil(); e != nil { return Only(ctx, e) } diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index 12dd002d4..690b670fb 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -18,6 +18,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/repository" "github.com/alcionai/corso/src/pkg/selectors" @@ -210,7 +211,10 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { defer utils.CloseRepo(ctx, r) - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites) + // TODO: log/print recoverable errors + errs := fault.New(false) + + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs) if err != nil { return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs")) } @@ -221,14 +225,14 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { } var ( - errs *multierror.Error - bIDs []model.StableID + merrs *multierror.Error + bIDs []model.StableID ) for _, discSel := range sel.SplitByResourceOwner(gc.GetSiteIDs()) { bo, err := r.NewBackup(ctx, discSel.Selector) if err != nil { - errs = multierror.Append(errs, errors.Wrapf( + merrs = multierror.Append(merrs, errors.Wrapf( err, "Failed to initialize SharePoint backup for site %s", discSel.DiscreteOwner, @@ -239,7 +243,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { err = bo.Run(ctx) if err != nil { - errs = multierror.Append(errs, errors.Wrapf( + merrs = multierror.Append(merrs, errors.Wrapf( err, "Failed to run SharePoint backup for site %s", discSel.DiscreteOwner, @@ -259,7 +263,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error { backup.PrintAll(ctx, bups) - if e := errs.ErrorOrNil(); e != nil { + if e := merrs.ErrorOrNil(); e != nil { return Only(ctx, e) } @@ -315,7 +319,10 @@ func sharePointBackupCreateSelectors( } } - union, err := gc.UnionSiteIDsAndWebURLs(ctx, sites, weburls) + // TODO: log/print recoverable errors + errs := fault.New(false) + + union, err := gc.UnionSiteIDsAndWebURLs(ctx, sites, weburls, errs) if err != nil { return nil, err } diff --git a/src/cmd/factory/impl/common.go b/src/cmd/factory/impl/common.go index 45d2b7a18..307ea65e0 100644 --- a/src/cmd/factory/impl/common.go +++ b/src/cmd/factory/impl/common.go @@ -20,6 +20,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -114,7 +115,10 @@ func getGCAndVerifyUser(ctx context.Context, userID string) (*connector.GraphCon } // build a graph connector - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users) + // TODO: log/print recoverable errors + errs := fault.New(false) + + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs) if err != nil { return nil, account.Account{}, errors.Wrap(err, "connecting to graph api") } diff --git a/src/cmd/getM365/getItem.go b/src/cmd/getM365/getItem.go index d24b27d38..7f1936206 100644 --- a/src/cmd/getM365/getItem.go +++ b/src/cmd/getM365/getItem.go @@ -23,6 +23,7 @@ import ( "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -178,7 +179,10 @@ func getGC(ctx context.Context) (*connector.GraphConnector, account.M365Config, return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details")) } - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users) + // TODO: log/print recoverable errors + errs := fault.New(false) + + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs) if err != nil { return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API")) } diff --git a/src/cmd/purge/purge.go b/src/cmd/purge/purge.go index cb9c9976f..fea9f5d0f 100644 --- a/src/cmd/purge/purge.go +++ b/src/cmd/purge/purge.go @@ -17,6 +17,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" ) @@ -260,7 +261,10 @@ func getGC(ctx context.Context) (*connector.GraphConnector, error) { } // build a graph connector - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users) + // TODO: log/print recoverable errors + errs := fault.New(false) + + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs) if err != nil { return nil, Only(ctx, errors.Wrap(err, "connecting to graph api")) } diff --git a/src/internal/connector/graph_connector.go b/src/internal/connector/graph_connector.go index 888749690..06fb86188 100644 --- a/src/internal/connector/graph_connector.go +++ b/src/internal/connector/graph_connector.go @@ -11,7 +11,6 @@ import ( "sync" "github.com/alcionai/clues" - "github.com/hashicorp/go-multierror" "github.com/microsoft/kiota-abstractions-go/serialization" msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" "github.com/microsoftgraph/msgraph-sdk-go/models" @@ -25,6 +24,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/support" D "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/filters" ) @@ -68,6 +68,7 @@ func NewGraphConnector( itemClient *http.Client, acct account.Account, r resource, + errs *fault.Errors, ) (*GraphConnector, error) { m365, err := acct.M365Config() if err != nil { @@ -103,7 +104,7 @@ func NewGraphConnector( } if r == AllResources || r == Sites { - if err = gc.setTenantSites(ctx); err != nil { + if err = gc.setTenantSites(ctx, errs); err != nil { return nil, errors.Wrap(err, "retrieveing tenant site list") } } @@ -159,7 +160,7 @@ func (gc *GraphConnector) GetUsersIds() []string { // setTenantSites queries the M365 to identify the sites in the // workspace. The sites field is updated during this method // iff the returned error is nil. -func (gc *GraphConnector) setTenantSites(ctx context.Context) error { +func (gc *GraphConnector) setTenantSites(ctx context.Context, errs *fault.Errors) error { gc.Sites = map[string]string{} ctx, end := D.Span(ctx, "gc:setTenantSites") @@ -171,7 +172,8 @@ func (gc *GraphConnector) setTenantSites(ctx context.Context) error { gc.tenant, sharepoint.GetAllSitesForTenant, models.CreateSiteCollectionResponseFromDiscriminatorValue, - identifySite) + identifySite, + errs) if err != nil { return err } @@ -227,9 +229,13 @@ func (gc *GraphConnector) GetSiteIDs() []string { // each element in the url must fully match. Ex: the webURL value "foo" will match "www.ex.com/foo", // but not match "www.ex.com/foobar". // The returned IDs are reduced to a set of unique values. -func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls []string) ([]string, error) { +func (gc *GraphConnector) UnionSiteIDsAndWebURLs( + ctx context.Context, + ids, urls []string, + errs *fault.Errors, +) ([]string, error) { if len(gc.Sites) == 0 { - if err := gc.setTenantSites(ctx); err != nil { + if err := gc.setTenantSites(ctx, errs); err != nil { return nil, err } } @@ -308,6 +314,7 @@ func getResources( query func(context.Context, graph.Servicer) (serialization.Parsable, error), parser func(parseNode serialization.ParseNode) (serialization.Parsable, error), identify func(any) (string, string, error), + errs *fault.Errors, ) (map[string]string, error) { resources := map[string]string{} @@ -318,18 +325,20 @@ func getResources( WithAll(graph.ErrData(err)...) } - errs := &multierror.Error{} - iter, err := msgraphgocore.NewPageIterator(response, gs.Adapter(), parser) if err != nil { return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...) } callbackFunc := func(item any) bool { + if errs.Failed() { + return false + } + k, v, err := identify(item) if err != nil { if !errors.Is(err, errKnownSkippableCase) { - errs = multierror.Append(errs, clues.Stack(err). + errs.Add(clues.Stack(err). WithClues(ctx). With("query_url", gs.Adapter().GetBaseUrl())) } @@ -346,5 +355,5 @@ func getResources( return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...) } - return resources, errs.ErrorOrNil() + return resources, errs.Err() } diff --git a/src/internal/connector/graph_connector_disconnected_test.go b/src/internal/connector/graph_connector_disconnected_test.go index 506b55345..774e8a050 100644 --- a/src/internal/connector/graph_connector_disconnected_test.go +++ b/src/internal/connector/graph_connector_disconnected_test.go @@ -14,6 +14,7 @@ import ( "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -66,9 +67,9 @@ func (suite *DisconnectedGraphConnectorSuite) TestBadConnection() { for _, test := range table { suite.T().Run(test.name, func(t *testing.T) { - gc, err := NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), test.acct(t), Users) + gc, err := NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), test.acct(t), Users, fault.New(true)) assert.Nil(t, gc, test.name+" failed") - assert.NotNil(t, err, test.name+"failed") + assert.NotNil(t, err, test.name+" failed") }) } } diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index cbbb9b0f6..cdd9806b1 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -22,6 +22,7 @@ import ( "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -1107,7 +1108,8 @@ func getSelectorWith( func loadConnector(ctx context.Context, t *testing.T, itemClient *http.Client, r resource) *GraphConnector { a := tester.NewM365Account(t) - connector, err := NewGraphConnector(ctx, itemClient, a, r) + + connector, err := NewGraphConnector(ctx, itemClient, a, r, fault.New(true)) require.NoError(t, err) return connector diff --git a/src/internal/connector/graph_connector_test.go b/src/internal/connector/graph_connector_test.go index 7a3d703a0..87c2343d9 100644 --- a/src/internal/connector/graph_connector_test.go +++ b/src/internal/connector/graph_connector_test.go @@ -24,6 +24,7 @@ import ( "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -125,9 +126,15 @@ func (suite *GraphConnectorUnitSuite) TestUnionSiteIDsAndWebURLs() { } for _, test := range table { suite.T().Run(test.name, func(t *testing.T) { - //nolint - result, err := gc.UnionSiteIDsAndWebURLs(context.Background(), test.ids, test.urls) + ctx, flush := tester.NewContext() + defer flush() + + errs := fault.New(true) + + result, err := gc.UnionSiteIDsAndWebURLs(ctx, test.ids, test.urls, errs) assert.NoError(t, err) + assert.NoError(t, errs.Err()) + assert.Empty(t, errs.Errs()) assert.ElementsMatch(t, test.expect, result) }) } @@ -204,18 +211,24 @@ func (suite *GraphConnectorIntegrationSuite) TestSetTenantSites() { ctx, flush := tester.NewContext() defer flush() + t := suite.T() + service, err := newConnector.createService() - require.NoError(suite.T(), err) + require.NoError(t, err) newConnector.Service = service + assert.Equal(t, 0, len(newConnector.Sites)) - suite.Equal(0, len(newConnector.Sites)) - err = newConnector.setTenantSites(ctx) - suite.NoError(err) - suite.Less(0, len(newConnector.Sites)) + errs := fault.New(true) + + err = newConnector.setTenantSites(ctx, errs) + assert.NoError(t, err) + assert.NoError(t, errs.Err()) + assert.Empty(t, errs.Errs()) + assert.Less(t, 0, len(newConnector.Sites)) for _, site := range newConnector.Sites { - suite.NotContains("sharepoint.com/personal/", site) + assert.NotContains(t, "sharepoint.com/personal/", site) } } diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index a82c0d30c..5ffec2456 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -239,7 +239,7 @@ func (op *BackupOperation) do( return nil, errors.Wrap(err, "producing manifests and metadata") } - gc, err := connectToM365(ctx, op.Selectors, op.account) + gc, err := connectToM365(ctx, op.Selectors, op.account, op.Errors) if err != nil { return nil, errors.Wrap(err, "connectng to m365") } diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index 55269c426..9c3edb4f2 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -669,7 +669,12 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { m365, err := acct.M365Config() require.NoError(t, err) - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users) + gc, err := connector.NewGraphConnector( + ctx, + graph.HTTPClient(graph.NoTimeout()), + acct, + connector.Users, + fault.New(true)) require.NoError(t, err) ac, err := api.NewClient(m365) diff --git a/src/internal/operations/operation.go b/src/internal/operations/operation.go index 32122d682..ba68a3bdd 100644 --- a/src/internal/operations/operation.go +++ b/src/internal/operations/operation.go @@ -100,6 +100,7 @@ func connectToM365( ctx context.Context, sel selectors.Selector, acct account.Account, + errs *fault.Errors, ) (*connector.GraphConnector, error) { complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Connecting to M365")) defer func() { @@ -114,7 +115,7 @@ func connectToM365( resource = connector.Sites } - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, resource) + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, resource, errs) if err != nil { return nil, err } diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index 51f69cbc1..d9cdfe1a3 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -242,7 +242,7 @@ func (op *RestoreOperation) do( opStats.resourceCount = 1 opStats.cs = dcs - gc, err := connectToM365(ctx, op.Selectors, op.account) + gc, err := connectToM365(ctx, op.Selectors, op.account, op.Errors) if err != nil { return nil, errors.Wrap(err, "connecting to M365") } diff --git a/src/pkg/services/m365/m365.go b/src/pkg/services/m365/m365.go index 19d37f0d5..9379cc028 100644 --- a/src/pkg/services/m365/m365.go +++ b/src/pkg/services/m365/m365.go @@ -11,6 +11,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/discovery" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/fault" ) type User struct { @@ -21,8 +22,8 @@ type User struct { // Users returns a list of users in the specified M365 tenant // TODO: Implement paging support -func Users(ctx context.Context, acct account.Account) ([]*User, error) { - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users) +func Users(ctx context.Context, acct account.Account, errs *fault.Errors) ([]*User, error) { + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs) if err != nil { return nil, errors.Wrap(err, "initializing M365 graph connection") } @@ -46,8 +47,8 @@ func Users(ctx context.Context, acct account.Account) ([]*User, error) { return ret, nil } -func UserIDs(ctx context.Context, acct account.Account) ([]string, error) { - users, err := Users(ctx, acct) +func UserIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { + users, err := Users(ctx, acct, errs) if err != nil { return nil, err } @@ -62,8 +63,8 @@ func UserIDs(ctx context.Context, acct account.Account) ([]string, error) { // UserPNs retrieves all user principleNames in the tenant. Principle Names // can be used analogous userIDs in graph API queries. -func UserPNs(ctx context.Context, acct account.Account) ([]string, error) { - users, err := Users(ctx, acct) +func UserPNs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { + users, err := Users(ctx, acct, errs) if err != nil { return nil, err } @@ -77,8 +78,8 @@ func UserPNs(ctx context.Context, acct account.Account) ([]string, error) { } // SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant -func SiteURLs(ctx context.Context, acct account.Account) ([]string, error) { - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites) +func SiteURLs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs) if err != nil { return nil, errors.Wrap(err, "initializing M365 graph connection") } @@ -87,8 +88,8 @@ func SiteURLs(ctx context.Context, acct account.Account) ([]string, error) { } // SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant -func SiteIDs(ctx context.Context, acct account.Account) ([]string, error) { - gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites) +func SiteIDs(ctx context.Context, acct account.Account, errs *fault.Errors) ([]string, error) { + gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs) if err != nil { return nil, errors.Wrap(err, "initializing graph connection") } diff --git a/src/pkg/services/m365/m365_test.go b/src/pkg/services/m365/m365_test.go index 0441251d0..f5f040dfa 100644 --- a/src/pkg/services/m365/m365_test.go +++ b/src/pkg/services/m365/m365_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/fault" ) type M365IntegrationSuite struct { @@ -27,18 +28,24 @@ func (suite *M365IntegrationSuite) TestUsers() { ctx, flush := tester.NewContext() defer flush() - acct := tester.NewM365Account(suite.T()) + var ( + t = suite.T() + acct = tester.NewM365Account(suite.T()) + errs = fault.New(true) + ) - users, err := Users(ctx, acct) - require.NoError(suite.T(), err) - - require.NotNil(suite.T(), users) - require.Greater(suite.T(), len(users), 0) + users, err := Users(ctx, acct, errs) + require.NoError(t, err) + require.NoError(t, errs.Err()) + require.Empty(t, errs.Errs()) + require.NotNil(t, users) + require.Greater(t, len(users), 0) for _, u := range users { - suite.T().Log(u) - assert.NotEmpty(suite.T(), u.ID) - assert.NotEmpty(suite.T(), u.PrincipalName) - assert.NotEmpty(suite.T(), u.Name) + t.Run("user_"+u.ID, func(t *testing.T) { + assert.NotEmpty(t, u.ID) + assert.NotEmpty(t, u.PrincipalName) + assert.NotEmpty(t, u.Name) + }) } } From d9d0158b6f51dcbf5fb4589718e8890e064b481a Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Thu, 9 Feb 2023 10:30:04 -0800 Subject: [PATCH 44/45] Break file order dependency for OneDrive .meta files (#2450) ## Description Begin using the Fetch() interface to retrieve OneDrive meta files inline when restoring the file. This removes the ordering dependency between .data and .meta files This does not stop .meta files from being returned over the Items() channel. That can be disabled in a future PR ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * #2447 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- .../connector/graph_connector_helper_test.go | 43 +++++- .../connector/graph_connector_test.go | 140 ++++++++++++++++++ src/internal/connector/onedrive/restore.go | 48 +++--- 3 files changed, 205 insertions(+), 26 deletions(-) diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index cdd9806b1..7e49f2c08 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -1,6 +1,7 @@ package connector import ( + "bytes" "context" "encoding/json" "io" @@ -165,6 +166,10 @@ type colInfo struct { pathElements []string category path.CategoryType items []itemInfo + // auxItems are items that can be retrieved with Fetch but won't be returned + // by Items(). These files do not directly participate in comparisosn at the + // end of a test. + auxItems []itemInfo } type restoreBackupInfo struct { @@ -969,6 +974,25 @@ func backupOutputPathFromRestore( ) } +// TODO(ashmrtn): Make this an actual mock class that can be used in other +// packages. +type mockRestoreCollection struct { + data.Collection + auxItems map[string]data.Stream +} + +func (rc mockRestoreCollection) Fetch( + ctx context.Context, + name string, +) (data.Stream, error) { + res := rc.auxItems[name] + if res == nil { + return nil, data.ErrNotFound + } + + return res, nil +} + func collectionsForInfo( t *testing.T, service path.ServiceType, @@ -991,7 +1015,7 @@ func collectionsForInfo( info.pathElements, false, ) - c := mockconnector.NewMockExchangeCollection(pth, len(info.items)) + mc := mockconnector.NewMockExchangeCollection(pth, len(info.items)) baseDestPath := backupOutputPathFromRestore(t, dest, pth) baseExpected := expectedData[baseDestPath.String()] @@ -1001,8 +1025,8 @@ func collectionsForInfo( } for i := 0; i < len(info.items); i++ { - c.Names[i] = info.items[i].name - c.Data[i] = info.items[i].data + mc.Names[i] = info.items[i].name + mc.Data[i] = info.items[i].data baseExpected[info.items[i].lookupKey] = info.items[i].data @@ -1014,9 +1038,16 @@ func collectionsForInfo( } } - collections = append(collections, data.NotFoundRestoreCollection{ - Collection: c, - }) + c := mockRestoreCollection{Collection: mc, auxItems: map[string]data.Stream{}} + + for _, aux := range info.auxItems { + c.auxItems[aux.name] = &mockconnector.MockExchangeData{ + ID: aux.name, + Reader: io.NopCloser(bytes.NewReader(aux.data)), + } + } + + collections = append(collections, c) kopiaEntries += len(info.items) } diff --git a/src/internal/connector/graph_connector_test.go b/src/internal/connector/graph_connector_test.go index 87c2343d9..8a2a2a39e 100644 --- a/src/internal/connector/graph_connector_test.go +++ b/src/internal/connector/graph_connector_test.go @@ -898,6 +898,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { lookupKey: "b" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -924,6 +931,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { lookupKey: "b" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -951,6 +965,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { lookupKey: "folder-a" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -974,6 +995,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -995,6 +1023,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1027,6 +1062,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { lookupKey: "b" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -1048,6 +1090,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1203,6 +1252,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() { lookupKey: "b" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -1229,6 +1285,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() { lookupKey: "b" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -1256,6 +1319,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() { lookupKey: "folder-a" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -1279,6 +1349,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -1300,6 +1377,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1521,6 +1605,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1554,6 +1645,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { lookupKey: "b" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -1575,6 +1673,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1608,6 +1713,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { lookupKey: "b" + onedrive.DirMetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, { pathElements: []string{ @@ -1629,6 +1741,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1673,6 +1792,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1717,6 +1843,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, @@ -1775,6 +1908,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsBackupAndNoRestore() lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, }, }, + auxItems: []itemInfo{ + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, }, }, }, diff --git a/src/internal/connector/onedrive/restore.go b/src/internal/connector/onedrive/restore.go index e2029f4cc..d5df9dedc 100644 --- a/src/internal/connector/onedrive/restore.go +++ b/src/internal/connector/onedrive/restore.go @@ -9,6 +9,7 @@ import ( "sort" "strings" + "github.com/alcionai/clues" msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" @@ -164,7 +165,6 @@ func RestoreCollection( metrics = support.CollectionMetrics{} copyBuffer = make([]byte, copyBufferSize) directory = dc.FullPath() - restoredIDs = map[string]string{} itemInfo details.ItemInfo itemID string folderPerms = map[string][]UserPermission{} @@ -226,37 +226,44 @@ func RestoreCollection( metrics.TotalBytes += int64(len(copyBuffer)) trimmedName := strings.TrimSuffix(name, DataFileSuffix) - itemID, itemInfo, err = restoreData(ctx, service, trimmedName, itemData, - drivePath.DriveID, restoreFolderID, copyBuffer, source) + itemID, itemInfo, err = restoreData( + ctx, + service, + trimmedName, + itemData, + drivePath.DriveID, + restoreFolderID, + copyBuffer, + source) if err != nil { errUpdater(itemData.UUID(), err) continue } - restoredIDs[trimmedName] = itemID - deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo) // Mark it as success without processing .meta // file if we are not restoring permissions if !restorePerms { metrics.Successes++ - } - } else if strings.HasSuffix(name, MetaFileSuffix) { - if !restorePerms { continue } - meta, err := getMetadata(itemData.ToReader()) + // Fetch item permissions from the collection and restore them. + metaName := trimmedName + MetaFileSuffix + + permsFile, err := dc.Fetch(ctx, metaName) if err != nil { - errUpdater(itemData.UUID(), err) + errUpdater(metaName, clues.Wrap(err, "getting item metadata")) continue } - trimmedName := strings.TrimSuffix(name, MetaFileSuffix) - restoreID, ok := restoredIDs[trimmedName] - if !ok { - errUpdater(itemData.UUID(), fmt.Errorf("item not available to restore permissions")) + metaReader := permsFile.ToReader() + meta, err := getMetadata(metaReader) + metaReader.Close() + + if err != nil { + errUpdater(metaName, clues.Wrap(err, "deserializing item metadata")) continue } @@ -264,21 +271,22 @@ func RestoreCollection( ctx, service, drivePath.DriveID, - restoreID, + itemID, parentPerms, meta.Permissions, permissionIDMappings, ) if err != nil { - errUpdater(itemData.UUID(), err) + errUpdater(trimmedName, clues.Wrap(err, "restoring item permissions")) continue } - // Objects count is incremented when we restore a - // data file and success count is incremented when - // we restore a meta file as every data file - // should have an associated meta file metrics.Successes++ + } else if strings.HasSuffix(name, MetaFileSuffix) { + // Just skip this for the moment since we moved the code to the above + // item restore path. We haven't yet stopped fetching these items in + // RestoreOp, so we still need to handle them in some way. + continue } else if strings.HasSuffix(name, DirMetaFileSuffix) { trimmedName := strings.TrimSuffix(name, DirMetaFileSuffix) folderID, err := createRestoreFolder( From b3a1de89bb26302940f4d324c39fb1c956abdcfe Mon Sep 17 00:00:00 2001 From: Danny Date: Fri, 10 Feb 2023 09:23:18 -0500 Subject: [PATCH 45/45] GC: Item attachment contact support (#2465) ## Description The logic for sanitizing Contactable data for restoration of `ItemAttachable.Contact` types. Contact `Item.Attachment`s required the removal of: - `odata.Context` - `ETag` - `ParentFolder` Otherwise, the following error occurs on POST. ```bash UnableToDeserializePostBody were unable to deserialize ``` ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature - [x] :bug: Bugfix ## Issue(s) * closes #2426 ## Test Plan - [x] :zap: Unit test --- src/internal/common/ptr/pointer.go | 14 +++++++++ src/internal/connector/exchange/attachment.go | 15 ++++------ .../connector/exchange/restore_test.go | 15 ++++++++++ .../mockconnector/mock_data_message.go | 30 ++++++++++++++++++- .../connector/support/m365Transform.go | 23 ++++++++++++-- 5 files changed, 83 insertions(+), 14 deletions(-) create mode 100644 src/internal/common/ptr/pointer.go diff --git a/src/internal/common/ptr/pointer.go b/src/internal/common/ptr/pointer.go new file mode 100644 index 000000000..68d15b109 --- /dev/null +++ b/src/internal/common/ptr/pointer.go @@ -0,0 +1,14 @@ +package ptr + +// Val helper method for unwrapping strings +// Microsoft Graph saves many variables as string pointers. +// Function will safely check if the point is nil prior to +// dereferencing the pointer. If the pointer is nil, +// an empty string is returned. +func Val(ptr *string) string { + if ptr == nil { + return "" + } + + return *ptr +} diff --git a/src/internal/connector/exchange/attachment.go b/src/internal/connector/exchange/attachment.go index 075ab09a6..ed8828930 100644 --- a/src/internal/connector/exchange/attachment.go +++ b/src/internal/connector/exchange/attachment.go @@ -8,6 +8,7 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" + "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/uploadsession" "github.com/alcionai/corso/src/pkg/logger" @@ -63,19 +64,16 @@ func uploadAttachment( attachment, err = support.ToItemAttachment(attachment) if err != nil { - name := "" - if prev.GetName() != nil { - name = *prev.GetName() - } + name := ptr.Val(prev.GetName()) + msg := "item attachment restore not supported for this type. skipping upload." // TODO: (rkeepers) Update to support PII protection - msg := "item attachment restore not supported for this type. skipping upload." logger.Ctx(ctx).Infow(msg, "err", err, "attachment_name", name, "attachment_type", attachmentType, "internal_item_type", getItemAttachmentItemType(prev), - "attachment_id", *prev.GetId(), + "attachment_id", ptr.Val(prev.GetId()), ) return nil @@ -129,9 +127,6 @@ func getItemAttachmentItemType(query models.Attachmentable) string { } item := attachment.GetItem() - if item.GetOdataType() == nil { - return empty - } - return *item.GetOdataType() + return ptr.Val(item.GetOdataType()) } diff --git a/src/internal/connector/exchange/restore_test.go b/src/internal/connector/exchange/restore_test.go index e6db75129..ad0c6b192 100644 --- a/src/internal/connector/exchange/restore_test.go +++ b/src/internal/connector/exchange/restore_test.go @@ -230,6 +230,21 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { return *folder.GetId() }, }, + { + name: "Test Mail: Item Attachment_Contact", + bytes: mockconnector.GetMockMessageWithNestedItemAttachmentContact(t, + mockconnector.GetMockContactBytes("Victor"), + "Contact Item Attachment", + ), + category: path.EmailCategory, + destination: func(t *testing.T, ctx context.Context) string { + folderName := "ItemMailAttachment_Contact " + common.FormatSimpleDateTime(now) + folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) + require.NoError(t, err) + + return *folder.GetId() + }, + }, { // Restore will upload the Message without uploading the attachment name: "Test Mail: Item Attachment_NestedEvent", bytes: mockconnector.GetMockMessageWithNestedItemAttachmentEvent("Nested Item Attachment"), diff --git a/src/internal/connector/mockconnector/mock_data_message.go b/src/internal/connector/mockconnector/mock_data_message.go index 50ff3345c..9f697b80a 100644 --- a/src/internal/connector/mockconnector/mock_data_message.go +++ b/src/internal/connector/mockconnector/mock_data_message.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + absser "github.com/microsoft/kiota-abstractions-go/serialization" js "github.com/microsoft/kiota-serialization-json-go" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" @@ -706,8 +707,35 @@ func GetMockMessageWithNestedItemAttachmentMail(t *testing.T, nested []byte, sub iaNode.SetItem(nestedMessage) message.SetAttachments([]models.Attachmentable{iaNode}) + return serialize(t, message) +} + +func GetMockMessageWithNestedItemAttachmentContact(t *testing.T, nested []byte, subject string) []byte { + base := GetMockMessageBytes(subject) + message, err := hydrateMessage(base) + require.NoError(t, err) + + parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", nested) + require.NoError(t, err) + + anObject, err := parseNode.GetObjectValue(models.CreateContactFromDiscriminatorValue) + require.NoError(t, err) + + contact := anObject.(models.Contactable) + internalName := "Nested Contact" + iaNode := models.NewItemAttachment() + attachmentSize := int32(len(nested)) + iaNode.SetSize(&attachmentSize) + iaNode.SetName(&internalName) + iaNode.SetItem(contact) + message.SetAttachments([]models.Attachmentable{iaNode}) + + return serialize(t, message) +} + +func serialize(t *testing.T, item absser.Parsable) []byte { wtr := js.NewJsonSerializationWriter() - err = wtr.WriteObjectValue("", message) + err := wtr.WriteObjectValue("", item) require.NoError(t, err) byteArray, err := wtr.GetSerializedContent() diff --git a/src/internal/connector/support/m365Transform.go b/src/internal/connector/support/m365Transform.go index 4f8227a29..bcbdac898 100644 --- a/src/internal/connector/support/m365Transform.go +++ b/src/internal/connector/support/m365Transform.go @@ -306,9 +306,10 @@ func cloneColumnDefinitionable(orig models.ColumnDefinitionable) models.ColumnDe // //nolint:lll const ( - itemAttachment = "#microsoft.graph.itemAttachment" - eventItemType = "#microsoft.graph.event" - mailItemType = "#microsoft.graph.message" + itemAttachment = "#microsoft.graph.itemAttachment" + eventItemType = "#microsoft.graph.event" + mailItemType = "#microsoft.graph.message" + contactItemType = "#microsoft.graph.contact" ) // ToItemAttachment transforms internal item, OutlookItemables, into @@ -323,6 +324,13 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) itemType := item.GetOdataType() switch *itemType { + case contactItemType: + contact := item.(models.Contactable) + revised := sanitizeContact(contact) + + transform.SetItem(revised) + + return transform, nil case eventItemType: event := item.(models.Eventable) @@ -372,6 +380,15 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) // return attachments, nil // } +// sanitizeContact removes fields which prevent a Contact from +// being uploaded as an attachment. +func sanitizeContact(orig models.Contactable) models.Contactable { + orig.SetParentFolderId(nil) + orig.SetAdditionalData(nil) + + return orig +} + // sanitizeEvent transfers data into event object and // removes unique IDs from the M365 object func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {