From 4ace4bee761ccfcee1f2feb759d40dece1b52811 Mon Sep 17 00:00:00 2001 From: Abhishek Pandey Date: Tue, 22 Aug 2023 18:25:08 +0530 Subject: [PATCH 1/6] Remove duplicate mocks from kopia wrapper tests (#4083) No logic changes. Only removing duplicate test code. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/wrapper_test.go | 83 ++++++++---------------------- 1 file changed, 21 insertions(+), 62 deletions(-) diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index a21b954a9..8c511a6f0 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -25,6 +25,7 @@ import ( "github.com/alcionai/corso/src/internal/data" dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" + m365Mock "github.com/alcionai/corso/src/internal/m365/mock" exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" @@ -1128,10 +1129,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { streams = append(streams, ms) } - mc := &mockBackupCollection{ - path: storePath, - loc: locPath, - streams: streams, + mc := &m365Mock.BackupCollection{ + Path: storePath, + Loc: locPath, + Streams: streams, } return []data.BackupCollection{mc} @@ -1155,11 +1156,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { ItemInfo: details.ItemInfo{OneDrive: &info}, } - mc := &mockBackupCollection{ - path: storePath, - loc: locPath, - streams: []data.Item{ms}, - state: data.NotMovedState, + mc := &m365Mock.BackupCollection{ + Path: storePath, + Loc: locPath, + Streams: []data.Item{ms}, + CState: data.NotMovedState, } return []data.BackupCollection{mc} @@ -1293,48 +1294,6 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { testForFiles(t, ctx, expected, result) } -// TODO(pandeyabs): Switch to m365/mock/BackupCollection. -type mockBackupCollection struct { - path path.Path - loc *path.Builder - streams []data.Item - state data.CollectionState -} - -func (c *mockBackupCollection) Items(context.Context, *fault.Bus) <-chan data.Item { - res := make(chan data.Item) - - go func() { - defer close(res) - - for _, s := range c.streams { - res <- s - } - }() - - return res -} - -func (c mockBackupCollection) FullPath() path.Path { - return c.path -} - -func (c mockBackupCollection) PreviousPath() path.Path { - return c.path -} - -func (c mockBackupCollection) LocationPath() *path.Builder { - return c.loc -} - -func (c mockBackupCollection) State() data.CollectionState { - return c.state -} - -func (c mockBackupCollection) DoNotMergeItems() bool { - return false -} - func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { t := suite.T() @@ -1343,10 +1302,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) collections := []data.BackupCollection{ - &mockBackupCollection{ - path: suite.storePath1, - loc: loc1, - streams: []data.Item{ + &m365Mock.BackupCollection{ + Path: suite.storePath1, + Loc: loc1, + Streams: []data.Item{ &dataMock.Item{ ItemID: testFileName, Reader: io.NopCloser(bytes.NewReader(testFileData)), @@ -1359,10 +1318,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { }, }, }, - &mockBackupCollection{ - path: suite.storePath2, - loc: loc2, - streams: []data.Item{ + &m365Mock.BackupCollection{ + Path: suite.storePath2, + Loc: loc2, + Streams: []data.Item{ &dataMock.Item{ ItemID: testFileName3, Reader: io.NopCloser(bytes.NewReader(testFileData3)), @@ -1603,11 +1562,11 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { for _, parent := range []path.Path{suite.testPath1, suite.testPath2} { loc := path.Builder{}.Append(parent.Folders()...) - collection := &mockBackupCollection{path: parent, loc: loc} + collection := &m365Mock.BackupCollection{Path: parent, Loc: loc} for _, item := range suite.files[parent.String()] { - collection.streams = append( - collection.streams, + collection.Streams = append( + collection.Streams, &dataMock.Item{ ItemID: item.itemPath.Item(), Reader: io.NopCloser(bytes.NewReader(item.data)), From 9255013d6f2c52944447ddb70607ab6a9b56cab7 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Tue, 22 Aug 2023 08:29:38 -0700 Subject: [PATCH 2/6] Refactor backup cleanup test code slightly (#4080) Switch to using functions that always return a new instance of the struct in question. Upcoming tests were having issues with state carrying over between individual tests. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * #3217 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/cleanup_backups_test.go | 298 +++++++++++---------- 1 file changed, 162 insertions(+), 136 deletions(-) diff --git a/src/internal/kopia/cleanup_backups_test.go b/src/internal/kopia/cleanup_backups_test.go index 895d9226e..ecd36848d 100644 --- a/src/internal/kopia/cleanup_backups_test.go +++ b/src/internal/kopia/cleanup_backups_test.go @@ -137,89 +137,113 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { backupTag, _ := makeTagKV(TagBackupCategory) // Current backup and snapshots. - bupCurrent := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("current-bup-id"), - ModelStoreID: manifest.ID("current-bup-msid"), - }, - SnapshotID: "current-snap-msid", - StreamStoreID: "current-deets-msid", + bupCurrent := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("current-bup-id"), + ModelStoreID: manifest.ID("current-bup-msid"), + }, + SnapshotID: "current-snap-msid", + StreamStoreID: "current-deets-msid", + } } - snapCurrent := &manifest.EntryMetadata{ - ID: "current-snap-msid", - Labels: map[string]string{ - backupTag: "0", - }, + snapCurrent := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } } - deetsCurrent := &manifest.EntryMetadata{ - ID: "current-deets-msid", + deetsCurrent := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-deets-msid", + } } // Legacy backup with details in separate model. - bupLegacy := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("legacy-bup-id"), - ModelStoreID: manifest.ID("legacy-bup-msid"), - }, - SnapshotID: "legacy-snap-msid", - DetailsID: "legacy-deets-msid", + bupLegacy := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("legacy-bup-id"), + ModelStoreID: manifest.ID("legacy-bup-msid"), + }, + SnapshotID: "legacy-snap-msid", + DetailsID: "legacy-deets-msid", + } } - snapLegacy := &manifest.EntryMetadata{ - ID: "legacy-snap-msid", - Labels: map[string]string{ - backupTag: "0", - }, + snapLegacy := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "legacy-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } } - deetsLegacy := &model.BaseModel{ - ID: "legacy-deets-id", - ModelStoreID: "legacy-deets-msid", + deetsLegacy := func() *model.BaseModel { + return &model.BaseModel{ + ID: "legacy-deets-id", + ModelStoreID: "legacy-deets-msid", + } } // Incomplete backup missing data snapshot. - bupNoSnapshot := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("ns-bup-id"), - ModelStoreID: manifest.ID("ns-bup-id-msid"), - }, - StreamStoreID: "ns-deets-msid", + bupNoSnapshot := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("ns-bup-id"), + ModelStoreID: manifest.ID("ns-bup-id-msid"), + }, + StreamStoreID: "ns-deets-msid", + } } - deetsNoSnapshot := &manifest.EntryMetadata{ - ID: "ns-deets-msid", + deetsNoSnapshot := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "ns-deets-msid", + } } // Legacy incomplete backup missing data snapshot. - bupLegacyNoSnapshot := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("ns-legacy-bup-id"), - ModelStoreID: manifest.ID("ns-legacy-bup-id-msid"), - }, - DetailsID: "ns-legacy-deets-msid", + bupLegacyNoSnapshot := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("ns-legacy-bup-id"), + ModelStoreID: manifest.ID("ns-legacy-bup-id-msid"), + }, + DetailsID: "ns-legacy-deets-msid", + } } - deetsLegacyNoSnapshot := &model.BaseModel{ - ID: "ns-legacy-deets-id", - ModelStoreID: "ns-legacy-deets-msid", + deetsLegacyNoSnapshot := func() *model.BaseModel { + return &model.BaseModel{ + ID: "ns-legacy-deets-id", + ModelStoreID: "ns-legacy-deets-msid", + } } // Incomplete backup missing details. - bupNoDetails := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("nssid-bup-id"), - ModelStoreID: manifest.ID("nssid-bup-msid"), - }, - SnapshotID: "nssid-snap-msid", + bupNoDetails := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("nssid-bup-id"), + ModelStoreID: manifest.ID("nssid-bup-msid"), + }, + SnapshotID: "nssid-snap-msid", + } } - snapNoDetails := &manifest.EntryMetadata{ - ID: "nssid-snap-msid", - Labels: map[string]string{ - backupTag: "0", - }, + snapNoDetails := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "nssid-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } } // Get some stable time so that we can do everything relative to this in the @@ -268,16 +292,16 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "OnlyCompleteBackups Noops", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, + snapCurrent(), + deetsCurrent(), + snapLegacy(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, }, time: baseTime, expectErr: assert.NoError, @@ -285,24 +309,24 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingFieldsInBackup CausesCleanup", snapshots: []*manifest.EntryMetadata{ - snapNoDetails, - deetsNoSnapshot, + snapNoDetails(), + deetsNoSnapshot(), }, detailsModels: []*model.BaseModel{ - deetsLegacyNoSnapshot, + deetsLegacyNoSnapshot(), }, backups: []backupRes{ - {bup: bupNoSnapshot}, - {bup: bupLegacyNoSnapshot}, - {bup: bupNoDetails}, + {bup: bupNoSnapshot()}, + {bup: bupLegacyNoSnapshot()}, + {bup: bupNoDetails()}, }, expectDeleteIDs: []manifest.ID{ - manifest.ID(bupNoSnapshot.ModelStoreID), - manifest.ID(bupLegacyNoSnapshot.ModelStoreID), - manifest.ID(bupNoDetails.ModelStoreID), - manifest.ID(deetsLegacyNoSnapshot.ModelStoreID), - snapNoDetails.ID, - deetsNoSnapshot.ID, + manifest.ID(bupNoSnapshot().ModelStoreID), + manifest.ID(bupLegacyNoSnapshot().ModelStoreID), + manifest.ID(bupNoDetails().ModelStoreID), + manifest.ID(deetsLegacyNoSnapshot().ModelStoreID), + snapNoDetails().ID, + deetsNoSnapshot().ID, }, time: baseTime, expectErr: assert.NoError, @@ -310,20 +334,20 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingSnapshot CausesCleanup", snapshots: []*manifest.EntryMetadata{ - deetsCurrent, + deetsCurrent(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, }, expectDeleteIDs: []manifest.ID{ - manifest.ID(bupCurrent.ModelStoreID), - deetsCurrent.ID, - manifest.ID(bupLegacy.ModelStoreID), - manifest.ID(deetsLegacy.ModelStoreID), + manifest.ID(bupCurrent().ModelStoreID), + deetsCurrent().ID, + manifest.ID(bupLegacy().ModelStoreID), + manifest.ID(deetsLegacy().ModelStoreID), }, time: baseTime, expectErr: assert.NoError, @@ -331,38 +355,39 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingDetails CausesCleanup", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - snapLegacy, + snapCurrent(), + snapLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, }, expectDeleteIDs: []manifest.ID{ - manifest.ID(bupCurrent.ModelStoreID), - manifest.ID(bupLegacy.ModelStoreID), - snapCurrent.ID, - snapLegacy.ID, + manifest.ID(bupCurrent().ModelStoreID), + manifest.ID(bupLegacy().ModelStoreID), + snapCurrent().ID, + snapLegacy().ID, }, time: baseTime, expectErr: assert.NoError, }, + // Tests with various errors from Storer. { name: "SnapshotsListError Fails", snapshotFetchErr: assert.AnError, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, }, expectErr: assert.Error, }, { name: "LegacyDetailsListError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, + snapCurrent(), }, detailsModelListErr: assert.AnError, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, }, time: baseTime, expectErr: assert.Error, @@ -370,8 +395,8 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupIDsListError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, + snapCurrent(), + deetsCurrent(), }, backupListErr: assert.AnError, time: baseTime, @@ -380,22 +405,22 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupModelGetErrorNotFound CausesCleanup", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, - snapNoDetails, + snapCurrent(), + deetsCurrent(), + snapLegacy(), + snapNoDetails(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, { - bup: bupLegacy, + bup: bupLegacy(), err: data.ErrNotFound, }, { - bup: bupNoDetails, + bup: bupNoDetails(), err: data.ErrNotFound, }, }, @@ -404,11 +429,11 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { // delete operation should ignore missing models though so there's no // issue. expectDeleteIDs: []manifest.ID{ - snapLegacy.ID, - manifest.ID(deetsLegacy.ModelStoreID), - manifest.ID(bupLegacy.ModelStoreID), - snapNoDetails.ID, - manifest.ID(bupNoDetails.ModelStoreID), + snapLegacy().ID, + manifest.ID(deetsLegacy().ModelStoreID), + manifest.ID(bupLegacy().ModelStoreID), + snapNoDetails().ID, + manifest.ID(bupNoDetails().ModelStoreID), }, time: baseTime, expectErr: assert.NoError, @@ -416,21 +441,21 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupModelGetError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, - snapNoDetails, + snapCurrent(), + deetsCurrent(), + snapLegacy(), + snapNoDetails(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, { - bup: bupLegacy, + bup: bupLegacy(), err: assert.AnError, }, - {bup: bupNoDetails}, + {bup: bupNoDetails()}, }, time: baseTime, expectErr: assert.Error, @@ -438,34 +463,35 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "DeleteError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, - snapNoDetails, + snapCurrent(), + deetsCurrent(), + snapLegacy(), + snapNoDetails(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, - {bup: bupNoDetails}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, + {bup: bupNoDetails()}, }, expectDeleteIDs: []manifest.ID{ - snapNoDetails.ID, - manifest.ID(bupNoDetails.ModelStoreID), + snapNoDetails().ID, + manifest.ID(bupNoDetails().ModelStoreID), }, deleteErr: assert.AnError, time: baseTime, expectErr: assert.Error, }, + // Tests dealing with buffer times. { name: "MissingSnapshot BarelyTooYoungForCleanup Noops", snapshots: []*manifest.EntryMetadata{ - manifestWithTime(baseTime, deetsCurrent), + manifestWithTime(baseTime, deetsCurrent()), }, backups: []backupRes{ - {bup: backupWithTime(baseTime, bupCurrent)}, + {bup: backupWithTime(baseTime, bupCurrent())}, }, time: baseTime.Add(24 * time.Hour), buffer: 24 * time.Hour, @@ -474,14 +500,14 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingSnapshot BarelyOldEnough CausesCleanup", snapshots: []*manifest.EntryMetadata{ - manifestWithTime(baseTime, deetsCurrent), + manifestWithTime(baseTime, deetsCurrent()), }, backups: []backupRes{ - {bup: backupWithTime(baseTime, bupCurrent)}, + {bup: backupWithTime(baseTime, bupCurrent())}, }, expectDeleteIDs: []manifest.ID{ - deetsCurrent.ID, - manifest.ID(bupCurrent.ModelStoreID), + deetsCurrent().ID, + manifest.ID(bupCurrent().ModelStoreID), }, time: baseTime.Add((24 * time.Hour) + time.Second), buffer: 24 * time.Hour, @@ -490,12 +516,12 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupGetErrorNotFound TooYoung Noops", snapshots: []*manifest.EntryMetadata{ - manifestWithTime(baseTime, snapCurrent), - manifestWithTime(baseTime, deetsCurrent), + manifestWithTime(baseTime, snapCurrent()), + manifestWithTime(baseTime, deetsCurrent()), }, backups: []backupRes{ { - bup: backupWithTime(baseTime, bupCurrent), + bup: backupWithTime(baseTime, bupCurrent()), err: data.ErrNotFound, }, }, From 9f9ce34add7075ee2a218f05753d57f76eeaa755 Mon Sep 17 00:00:00 2001 From: neha_gupta Date: Tue, 22 Aug 2023 22:03:59 +0530 Subject: [PATCH 3/6] add handlers for channels (#4050) add Handlers interface for Channels. #### Does this PR need a docs update or release note? - [ ] :no_entry: No #### Type of change - [ ] :sunflower: Feature #### Issue(s) * # #### Test Plan --- .../m365/collection/groups/handler.go | 18 +++++++++ src/pkg/services/m365/api/channels.go | 1 + src/pkg/services/m365/api/channels_pager.go | 39 +++++++++++++++++++ 3 files changed, 58 insertions(+) create mode 100644 src/internal/m365/collection/groups/handler.go create mode 100644 src/pkg/services/m365/api/channels.go create mode 100644 src/pkg/services/m365/api/channels_pager.go diff --git a/src/internal/m365/collection/groups/handler.go b/src/internal/m365/collection/groups/handler.go new file mode 100644 index 000000000..d4a382149 --- /dev/null +++ b/src/internal/m365/collection/groups/handler.go @@ -0,0 +1,18 @@ +package groups + +import ( + "context" + + "github.com/microsoft/kiota-abstractions-go/serialization" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +type BackupMessagesHandler interface { + GetMessageByID(ctx context.Context, teamID, channelID, itemID string) (models.ChatMessageable, error) + NewMessagePager(teamID, channelID string) api.MessageItemDeltaEnumerator + GetChannelByID(ctx context.Context, teamID, channelID string) (models.Channelable, error) + NewChannelPager(teamID, channelID string) api.ChannelItemDeltaEnumerator + GetReplyByID(ctx context.Context, teamID, channelID, messageID string) (serialization.Parsable, error) +} diff --git a/src/pkg/services/m365/api/channels.go b/src/pkg/services/m365/api/channels.go new file mode 100644 index 000000000..778f64ec1 --- /dev/null +++ b/src/pkg/services/m365/api/channels.go @@ -0,0 +1 @@ +package api diff --git a/src/pkg/services/m365/api/channels_pager.go b/src/pkg/services/m365/api/channels_pager.go new file mode 100644 index 000000000..599c09649 --- /dev/null +++ b/src/pkg/services/m365/api/channels_pager.go @@ -0,0 +1,39 @@ +package api + +import ( + "context" +) + +// --------------------------------------------------------------------------- +// item pager +// --------------------------------------------------------------------------- + +type MessageItemDeltaEnumerator interface { + GetPage(context.Context) (DeltaPageLinker, error) +} + +// TODO: implement +// var _ MessageItemDeltaEnumerator = &messagePageCtrl{} + +// type messagePageCtrl struct { +// gs graph.Servicer +// builder *teams.ItemChannelsItemMessagesRequestBuilder +// options *teams.ItemChannelsItemMessagesRequestBuilderGetRequestConfiguration +// } + +// --------------------------------------------------------------------------- +// channel pager +// --------------------------------------------------------------------------- + +type ChannelItemDeltaEnumerator interface { + GetPage(context.Context) (DeltaPageLinker, error) +} + +// TODO: implement +// var _ ChannelsItemDeltaEnumerator = &channelsPageCtrl{} + +// type channelsPageCtrl struct { +// gs graph.Servicer +// builder *teams.ItemChannelsChannelItemRequestBuilder +// options *teams.ItemChannelsChannelItemRequestBuilderGetRequestConfiguration +// } From 06862c3b8c4eb80d2a3e92da2f58d5141b98dd51 Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 22 Aug 2023 11:25:06 -0600 Subject: [PATCH 4/6] add boilerplate groups backup collection (#4082) Adds the boilerplate for groups backup collection processing. Not necessarily functional at this time, due to missing dependencies and consts that aren't yet in the branch. Thus the lack of tests. It's just good enough to keep progress rolling forward. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3989 #### Test Plan --- .../m365/collection/exchange/backup.go | 2 - .../m365/collection/exchange/collection.go | 3 +- src/internal/m365/collection/groups/backup.go | 318 ++++++++++++++++++ .../m365/collection/groups/collection.go | 180 ++++++++++ .../m365/collection/groups/handler.go | 18 - .../m365/collection/groups/handlers.go | 33 ++ src/pkg/services/m365/api/channels_pager.go | 18 +- src/pkg/services/m365/api/item_pager.go | 19 +- 8 files changed, 561 insertions(+), 30 deletions(-) create mode 100644 src/internal/m365/collection/groups/backup.go create mode 100644 src/internal/m365/collection/groups/collection.go delete mode 100644 src/internal/m365/collection/groups/handler.go create mode 100644 src/internal/m365/collection/groups/handlers.go diff --git a/src/internal/m365/collection/exchange/backup.go b/src/internal/m365/collection/exchange/backup.go index 359701629..f5ebd1783 100644 --- a/src/internal/m365/collection/exchange/backup.go +++ b/src/internal/m365/collection/exchange/backup.go @@ -75,8 +75,6 @@ func CreateCollections( return nil, clues.Wrap(err, "filling collections") } - foldersComplete <- struct{}{} - for _, coll := range collections { allCollections = append(allCollections, coll) } diff --git a/src/internal/m365/collection/exchange/collection.go b/src/internal/m365/collection/exchange/collection.go index 8e0c0f897..ba421763c 100644 --- a/src/internal/m365/collection/exchange/collection.go +++ b/src/internal/m365/collection/exchange/collection.go @@ -39,8 +39,7 @@ const ( // Collection implements the interface from data.Collection // Structure holds data for an Exchange application for a single user type Collection struct { - // M365 user - user string // M365 user + user string data chan data.Item // added is a list of existing item IDs that were added to a container diff --git a/src/internal/m365/collection/groups/backup.go b/src/internal/m365/collection/groups/backup.go new file mode 100644 index 000000000..9b31126a1 --- /dev/null +++ b/src/internal/m365/collection/groups/backup.go @@ -0,0 +1,318 @@ +package groups + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/internal/m365/support" + "github.com/alcionai/corso/src/internal/observe" + "github.com/alcionai/corso/src/internal/operations/inject" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +// TODO: incremental support +// multiple lines in this file are commented out so that +// we can focus on v0 backups and re-integrate them later +// for v1 incrementals. +// since these lines represent otherwise standard boilerplate, +// it's simpler to comment them for tracking than to delete +// and re-discover them later. + +func CreateCollections( + ctx context.Context, + bpc inject.BackupProducerConfig, + handler BackupHandler, + tenantID string, + scope selectors.GroupsScope, + // dps DeltaPaths, + su support.StatusUpdater, + errs *fault.Bus, +) ([]data.BackupCollection, error) { + ctx = clues.Add(ctx, "category", scope.Category().PathType()) + + var ( + allCollections = make([]data.BackupCollection, 0) + category = scope.Category().PathType() + qp = graph.QueryParams{ + Category: category, + ProtectedResource: bpc.ProtectedResource, + TenantID: tenantID, + } + ) + + catProgress := observe.MessageWithCompletion( + ctx, + observe.Bulletf("%s", qp.Category)) + defer close(catProgress) + + // TODO(keepers): probably shouldn't call out channels here specifically. + // This should be a generic container handler. But we don't need + // to worry about that until if/when we use this code to get email + // conversations as well. + // Also, this should be produced by the Handler. + // chanPager := handler.NewChannelsPager(qp.ProtectedResource.ID()) + // TODO(neha): enumerate channels + channels := []graph.Displayable{} + + collections, err := populateCollections( + ctx, + qp, + handler, + su, + channels, + scope, + // dps, + bpc.Options, + errs) + if err != nil { + return nil, clues.Wrap(err, "filling collections") + } + + for _, coll := range collections { + allCollections = append(allCollections, coll) + } + + return allCollections, nil +} + +func populateCollections( + ctx context.Context, + qp graph.QueryParams, + bh BackupHandler, + statusUpdater support.StatusUpdater, + channels []graph.Displayable, + scope selectors.GroupsScope, + // dps DeltaPaths, + ctrlOpts control.Options, + errs *fault.Bus, +) (map[string]data.BackupCollection, error) { + // channel ID -> BackupCollection. + channelCollections := map[string]data.BackupCollection{} + + // channel ID -> delta url or folder path lookups + // TODO(neha/keepers): figure out if deltas are stored per channel, or per group. + // deltaURLs = map[string]string{} + // currPaths = map[string]string{} + // copy of previousPaths. every channel present in the slice param + // gets removed from this map; the remaining channels at the end of + // the process have been deleted. + // tombstones = makeTombstones(dps) + + logger.Ctx(ctx).Infow("filling collections") + // , "len_deltapaths", len(dps)) + + el := errs.Local() + + for _, c := range channels { + if el.Failure() != nil { + return nil, el.Failure() + } + + cID := ptr.Val(c.GetId()) + // delete(tombstones, cID) + + var ( + err error + // dp = dps[cID] + // prevDelta = dp.Delta + // prevPathStr = dp.Path // do not log: pii; log prevPath instead + // prevPath path.Path + ictx = clues.Add( + ctx, + "channel_id", cID) + // "previous_delta", pii.SafeURL{ + // URL: prevDelta, + // SafePathElems: graph.SafeURLPathParams, + // SafeQueryKeys: graph.SafeURLQueryParams, + // }) + ) + + // currPath, locPath + // TODO(rkeepers): the handler should provide this functionality. + // Only create a collection if the path matches the scope. + if !includeContainer(ictx, qp, c, scope, qp.Category) { + continue + } + + // if len(prevPathStr) > 0 { + // if prevPath, err = pathFromPrevString(prevPathStr); err != nil { + // logger.CtxErr(ictx, err).Error("parsing prev path") + // // if the previous path is unusable, then the delta must be, too. + // prevDelta = "" + // } + // } + + // ictx = clues.Add(ictx, "previous_path", prevPath) + + // TODO: the handler should provide this implementation. + items, err := collectItems( + ctx, + bh.NewMessagePager(qp.ProtectedResource.ID(), ptr.Val(c.GetId()))) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err)) + continue + } + + // if len(newDelta.URL) > 0 { + // deltaURLs[cID] = newDelta.URL + // } else if !newDelta.Reset { + // logger.Ctx(ictx).Info("missing delta url") + // } + + var prevPath path.Path + + // TODO: retrieve from handler + currPath, err := path.Builder{}. + Append(ptr.Val(c.GetId())). + ToDataLayerPath( + qp.TenantID, + qp.ProtectedResource.ID(), + path.GroupsService, + qp.Category, + true) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err)) + continue + } + + edc := NewCollection( + qp.ProtectedResource.ID(), + currPath, + prevPath, + path.Builder{}.Append(ptr.Val(c.GetDisplayName())), + qp.Category, + statusUpdater, + ctrlOpts) + + channelCollections[cID] = &edc + + // TODO: handle deleted items for v1 backup. + // // Remove any deleted IDs from the set of added IDs because items that are + // // deleted and then restored will have a different ID than they did + // // originally. + // for _, remove := range removed { + // delete(edc.added, remove) + // edc.removed[remove] = struct{}{} + // } + + // // add the current path for the container ID to be used in the next backup + // // as the "previous path", for reference in case of a rename or relocation. + // currPaths[cID] = currPath.String() + + // FIXME: normally this goes before removal, but linters + for _, item := range items { + edc.added[ptr.Val(item.GetId())] = struct{}{} + } + } + + // TODO: handle tombstones here + + logger.Ctx(ctx).Infow( + "adding metadata collection entries", + // "num_deltas_entries", len(deltaURLs), + "num_paths_entries", len(channelCollections)) + + // col, err := graph.MakeMetadataCollection( + // qp.TenantID, + // qp.ProtectedResource.ID(), + // path.ExchangeService, + // qp.Category, + // []graph.MetadataCollectionEntry{ + // graph.NewMetadataEntry(graph.PreviousPathFileName, currPaths), + // graph.NewMetadataEntry(graph.DeltaURLsFileName, deltaURLs), + // }, + // statusUpdater) + // if err != nil { + // return nil, clues.Wrap(err, "making metadata collection") + // } + + // channelCollections["metadata"] = col + + return channelCollections, el.Failure() +} + +func collectItems( + ctx context.Context, + pager api.ChannelMessageDeltaEnumerator, +) ([]models.ChatMessageable, error) { + items := []models.ChatMessageable{} + + for { + // assume delta urls here, which allows single-token consumption + page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting page") + } + + // if graph.IsErrInvalidDelta(err) { + // logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) + + // invalidPrevDelta = true + // newPaths = map[string]string{} + + // pager.Reset() + + // continue + // } + + vals, err := pager.ValuesIn(page) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting items in page") + } + + items = append(items, vals...) + + nextLink, _ := api.NextAndDeltaLink(page) + + // if len(deltaLink) > 0 { + // newDeltaURL = deltaLink + // } + + // Check if there are more items + if len(nextLink) == 0 { + break + } + + logger.Ctx(ctx).Debugw("found nextLink", "next_link", nextLink) + pager.SetNext(nextLink) + } + + return items, nil +} + +// Returns true if the container passes the scope comparison and should be included. +// Returns: +// - the path representing the directory as it should be stored in the repository. +// - the human-readable path using display names. +// - true if the path passes the scope comparison. +func includeContainer( + ctx context.Context, + qp graph.QueryParams, + gd graph.Displayable, + scope selectors.GroupsScope, + category path.CategoryType, +) bool { + // assume a single-level hierarchy + directory := ptr.Val(gd.GetDisplayName()) + + // TODO(keepers): awaiting parent branch to update to main + ok := scope.Matches(selectors.GroupsCategoryUnknown, directory) + + logger.Ctx(ctx).With( + "included", ok, + "scope", scope, + "match_target", directory, + ).Debug("backup folder selection filter") + + return ok +} diff --git a/src/internal/m365/collection/groups/collection.go b/src/internal/m365/collection/groups/collection.go new file mode 100644 index 000000000..c1e6a4042 --- /dev/null +++ b/src/internal/m365/collection/groups/collection.go @@ -0,0 +1,180 @@ +package groups + +import ( + "bytes" + "context" + "io" + "time" + + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/m365/support" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" +) + +var ( + _ data.BackupCollection = &Collection{} + _ data.Item = &Item{} + _ data.ItemInfo = &Item{} + _ data.ItemModTime = &Item{} +) + +const ( + collectionChannelBufferSize = 1000 + numberOfRetries = 4 +) + +type Collection struct { + protectedResource string + items chan data.Item + + // added is a list of existing item IDs that were added to a container + added map[string]struct{} + // removed is a list of item IDs that were deleted from, or moved out, of a container + removed map[string]struct{} + + // items itemGetterSerializer + + category path.CategoryType + statusUpdater support.StatusUpdater + ctrl control.Options + + // FullPath is the current hierarchical path used by this collection. + fullPath path.Path + + // PrevPath is the previous hierarchical path used by this collection. + // It may be the same as fullPath, if the folder was not renamed or + // moved. It will be empty on its first retrieval. + prevPath path.Path + + // LocationPath contains the path with human-readable display names. + // IE: "/Inbox/Important" instead of "/abcdxyz123/algha=lgkhal=t" + locationPath *path.Builder + + state data.CollectionState + + // doNotMergeItems should only be true if the old delta token expired. + // doNotMergeItems bool +} + +// NewExchangeDataCollection creates an ExchangeDataCollection. +// State of the collection is set as an observation of the current +// and previous paths. If the curr path is nil, the state is assumed +// to be deleted. If the prev path is nil, it is assumed newly created. +// If both are populated, then state is either moved (if they differ), +// or notMoved (if they match). +func NewCollection( + protectedResource string, + curr, prev path.Path, + location *path.Builder, + category path.CategoryType, + statusUpdater support.StatusUpdater, + ctrlOpts control.Options, + // doNotMergeItems bool, +) Collection { + collection := Collection{ + added: make(map[string]struct{}, 0), + category: category, + ctrl: ctrlOpts, + items: make(chan data.Item, collectionChannelBufferSize), + // doNotMergeItems: doNotMergeItems, + fullPath: curr, + locationPath: location, + prevPath: prev, + removed: make(map[string]struct{}, 0), + state: data.StateOf(prev, curr), + statusUpdater: statusUpdater, + protectedResource: protectedResource, + } + + return collection +} + +// Items utility function to asynchronously execute process to fill data channel with +// M365 exchange objects and returns the data channel +func (col *Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Item { + // go col.streamItems(ctx, errs) + return col.items +} + +// FullPath returns the Collection's fullPath []string +func (col *Collection) FullPath() path.Path { + return col.fullPath +} + +// LocationPath produces the Collection's full path, but with display names +// instead of IDs in the folders. Only populated for Calendars. +func (col *Collection) LocationPath() *path.Builder { + return col.locationPath +} + +// TODO(ashmrtn): Fill in with previous path once the Controller compares old +// and new folder hierarchies. +func (col Collection) PreviousPath() path.Path { + return col.prevPath +} + +func (col Collection) State() data.CollectionState { + return col.state +} + +func (col Collection) DoNotMergeItems() bool { + // TODO: depends on whether or not deltas are valid + return true +} + +// --------------------------------------------------------------------------- +// items +// --------------------------------------------------------------------------- + +// Item represents a single item retrieved from exchange +type Item struct { + id string + // TODO: We may need this to be a "oneOf" of `message`, `contact`, etc. + // going forward. Using []byte for now but I assume we'll have + // some structured type in here (serialization to []byte can be done in `Read`) + message []byte + info *details.ExchangeInfo // temporary change to bring populate function into directory + // TODO(ashmrtn): Can probably eventually be sourced from info as there's a + // request to provide modtime in ItemInfo structs. + modTime time.Time + + // true if the item was marked by graph as deleted. + deleted bool +} + +func (i *Item) ID() string { + return i.id +} + +func (i *Item) ToReader() io.ReadCloser { + return io.NopCloser(bytes.NewReader(i.message)) +} + +func (i Item) Deleted() bool { + return i.deleted +} + +func (i *Item) Info() details.ItemInfo { + return details.ItemInfo{Exchange: i.info} +} + +func (i *Item) ModTime() time.Time { + return i.modTime +} + +func NewItem( + identifier string, + dataBytes []byte, + detail details.ExchangeInfo, + modTime time.Time, +) Item { + return Item{ + id: identifier, + message: dataBytes, + info: &detail, + modTime: modTime, + } +} diff --git a/src/internal/m365/collection/groups/handler.go b/src/internal/m365/collection/groups/handler.go deleted file mode 100644 index d4a382149..000000000 --- a/src/internal/m365/collection/groups/handler.go +++ /dev/null @@ -1,18 +0,0 @@ -package groups - -import ( - "context" - - "github.com/microsoft/kiota-abstractions-go/serialization" - "github.com/microsoftgraph/msgraph-sdk-go/models" - - "github.com/alcionai/corso/src/pkg/services/m365/api" -) - -type BackupMessagesHandler interface { - GetMessageByID(ctx context.Context, teamID, channelID, itemID string) (models.ChatMessageable, error) - NewMessagePager(teamID, channelID string) api.MessageItemDeltaEnumerator - GetChannelByID(ctx context.Context, teamID, channelID string) (models.Channelable, error) - NewChannelPager(teamID, channelID string) api.ChannelItemDeltaEnumerator - GetReplyByID(ctx context.Context, teamID, channelID, messageID string) (serialization.Parsable, error) -} diff --git a/src/internal/m365/collection/groups/handlers.go b/src/internal/m365/collection/groups/handlers.go new file mode 100644 index 000000000..f5a28fd28 --- /dev/null +++ b/src/internal/m365/collection/groups/handlers.go @@ -0,0 +1,33 @@ +package groups + +import ( + "context" + + "github.com/microsoft/kiota-abstractions-go/serialization" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +type BackupHandler interface { + GetChannelByID( + ctx context.Context, + teamID, channelID string, + ) (models.Channelable, error) + NewChannelsPager( + teamID string, + ) api.ChannelDeltaEnumerator + + GetMessageByID( + ctx context.Context, + teamID, channelID, itemID string, + ) (models.ChatMessageable, error) + NewMessagePager( + teamID, channelID string, + ) api.ChannelMessageDeltaEnumerator + + GetMessageReplies( + ctx context.Context, + teamID, channelID, messageID string, + ) (serialization.Parsable, error) +} diff --git a/src/pkg/services/m365/api/channels_pager.go b/src/pkg/services/m365/api/channels_pager.go index 599c09649..58aecaf6c 100644 --- a/src/pkg/services/m365/api/channels_pager.go +++ b/src/pkg/services/m365/api/channels_pager.go @@ -1,19 +1,21 @@ package api import ( - "context" + "github.com/microsoftgraph/msgraph-sdk-go/models" ) // --------------------------------------------------------------------------- // item pager // --------------------------------------------------------------------------- -type MessageItemDeltaEnumerator interface { - GetPage(context.Context) (DeltaPageLinker, error) +type ChannelMessageDeltaEnumerator interface { + DeltaGetPager + ValuesInPageLinker[models.ChatMessageable] + SetNextLinker } // TODO: implement -// var _ MessageItemDeltaEnumerator = &messagePageCtrl{} +// var _ ChannelMessageDeltaEnumerator = &messagePageCtrl{} // type messagePageCtrl struct { // gs graph.Servicer @@ -25,12 +27,14 @@ type MessageItemDeltaEnumerator interface { // channel pager // --------------------------------------------------------------------------- -type ChannelItemDeltaEnumerator interface { - GetPage(context.Context) (DeltaPageLinker, error) +type ChannelDeltaEnumerator interface { + DeltaGetPager + ValuesInPageLinker[models.Channelable] + SetNextLinker } // TODO: implement -// var _ ChannelsItemDeltaEnumerator = &channelsPageCtrl{} +// var _ ChannelDeltaEnumerator = &channelsPageCtrl{} // type channelsPageCtrl struct { // gs graph.Servicer diff --git a/src/pkg/services/m365/api/item_pager.go b/src/pkg/services/m365/api/item_pager.go index ef54b1a3d..4cb272d51 100644 --- a/src/pkg/services/m365/api/item_pager.go +++ b/src/pkg/services/m365/api/item_pager.go @@ -13,9 +13,18 @@ import ( ) // --------------------------------------------------------------------------- -// common interfaces and funcs +// common interfaces // --------------------------------------------------------------------------- +// TODO(keepers): replace all matching uses of GetPage with this. +type DeltaGetPager interface { + GetPage(context.Context) (DeltaPageLinker, error) +} + +type ValuesInPageLinker[T any] interface { + ValuesIn(PageLinker) ([]T, error) +} + type PageLinker interface { GetOdataNextLink() *string } @@ -25,6 +34,14 @@ type DeltaPageLinker interface { GetOdataDeltaLink() *string } +type SetNextLinker interface { + SetNext(nextLink string) +} + +// --------------------------------------------------------------------------- +// common funcs +// --------------------------------------------------------------------------- + // IsNextLinkValid separate check to investigate whether error is func IsNextLinkValid(next string) bool { return !strings.Contains(next, `users//`) From 74b92adbc35c7c0ffbc0fecad8b75f137da09491 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Aug 2023 06:01:16 +0000 Subject: [PATCH 5/6] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aws/?= =?UTF-8?q?aws-sdk-go=20from=201.44.328=20to=201.44.329=20in=20/src=20(#40?= =?UTF-8?q?94)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.328 to 1.44.329.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.329 (2023-08-22)

Service Client Updates

  • service/ce: Updates service API and documentation
  • service/globalaccelerator: Updates service documentation
  • service/rds: Updates service API, documentation, waiters, paginators, and examples
    • Adding parameters to CreateCustomDbEngineVersion reserved for future use.
  • service/verifiedpermissions: Updates service API and documentation
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.328&new-version=1.44.329)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 8438cb70f..eca537743 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.328 + github.com/aws/aws-sdk-go v1.44.329 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.1 diff --git a/src/go.sum b/src/go.sum index b88a52f28..9bd8f2480 100644 --- a/src/go.sum +++ b/src/go.sum @@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.44.328 h1:WBwlf8ym9SDQ/GTIBO9eXyvwappKJyOetWJKl4mT7ZU= -github.com/aws/aws-sdk-go v1.44.328/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.329 h1:Rqy+wYI8h+iq+FphR59KKTsHR1Lz7YiwRqFzWa7xoYU= +github.com/aws/aws-sdk-go v1.44.329/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= From 7b6c6026ad56f5bba6729199c2e54fcde37261c9 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Wed, 23 Aug 2023 13:34:32 +0530 Subject: [PATCH 6/6] Group CLI (#4043) CLI changes for groups. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * https://github.com/alcionai/corso/issues/3990 * #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/cli/backup/backup.go | 1 + src/cli/backup/groups.go | 147 +++++++++++++++- src/cli/utils/groups.go | 57 +++++++ src/cli/utils/groups_test.go | 161 ++++++++++++++++++ src/internal/m365/backup.go | 24 ++- .../m365/collection/drive/group_handler.go | 23 +-- src/pkg/backup/details/groups.go | 56 +++++- src/pkg/backup/details/iteminfo.go | 2 +- src/pkg/selectors/groups.go | 1 - src/pkg/services/m365/groups.go | 23 ++- src/pkg/services/m365/groups_test.go | 25 +++ 11 files changed, 486 insertions(+), 34 deletions(-) create mode 100644 src/cli/utils/groups_test.go diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index 56b5c5ef4..c21f5cbb3 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -39,6 +39,7 @@ var serviceCommands = []func(cmd *cobra.Command) *cobra.Command{ addExchangeCommands, addOneDriveCommands, addSharePointCommands, + addGroupsCommands, addTeamsCommands, } diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go index 1dc490ae7..f4cc101f0 100644 --- a/src/cli/backup/groups.go +++ b/src/cli/backup/groups.go @@ -1,14 +1,27 @@ package backup import ( + "context" + "errors" + "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/spf13/pflag" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/cli/flags" . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/repo" "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/repository" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365" ) // ------------------------------------------------------------------------------------------------ @@ -134,7 +147,38 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error { return nil } - return Only(ctx, utils.ErrNotYetImplemented) + if err := validateGroupsBackupCreateFlags(flags.GroupFV, flags.CategoryDataFV); err != nil { + return err + } + + r, acct, err := utils.AccountConnectAndWriteRepoConfig(ctx, path.GroupsService, repo.S3Overrides(cmd)) + if err != nil { + return Only(ctx, err) + } + + defer utils.CloseRepo(ctx, r) + + // TODO: log/print recoverable errors + errs := fault.New(false) + + ins, err := m365.GroupsMap(ctx, *acct, errs) + if err != nil { + return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 groups")) + } + + sel := groupsBackupCreateSelectors(ctx, ins, flags.GroupFV, flags.CategoryDataFV) + selectorSet := []selectors.Selector{} + + for _, discSel := range sel.SplitByResourceOwner(ins.IDs()) { + selectorSet = append(selectorSet, discSel.Selector) + } + + return runBackups( + ctx, + r, + "Group", "group", + selectorSet, + ins) } // ------------------------------------------------------------------------------------------------ @@ -172,17 +216,71 @@ func groupsDetailsCmd() *cobra.Command { // processes a groups service backup. func detailsGroupsCmd(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() - if utils.HasNoFlagsAndShownHelp(cmd) { return nil } - if err := validateGroupBackupCreateFlags(flags.GroupFV); err != nil { + ctx := cmd.Context() + opts := utils.MakeGroupsOpts(cmd) + + r, _, _, ctrlOpts, err := utils.GetAccountAndConnect(ctx, path.GroupsService, repo.S3Overrides(cmd)) + if err != nil { return Only(ctx, err) } - return Only(ctx, utils.ErrNotYetImplemented) + defer utils.CloseRepo(ctx, r) + + ds, err := runDetailsGroupsCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) + if err != nil { + return Only(ctx, err) + } + + if len(ds.Entries) == 0 { + Info(ctx, selectors.ErrorNoMatchingItems) + return nil + } + + ds.PrintEntries(ctx) + + return nil +} + +// runDetailsGroupsCmd actually performs the lookup in backup details. +// the fault.Errors return is always non-nil. Callers should check if +// errs.Failure() == nil. +func runDetailsGroupsCmd( + ctx context.Context, + r repository.BackupGetter, + backupID string, + opts utils.GroupsOpts, + skipReduce bool, +) (*details.Details, error) { + if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil { + return nil, err + } + + ctx = clues.Add(ctx, "backup_id", backupID) + + d, _, errs := r.GetBackupDetails(ctx, backupID) + // TODO: log/track recoverable errors + if errs.Failure() != nil { + if errors.Is(errs.Failure(), data.ErrNotFound) { + return nil, clues.New("no backup exists with the id " + backupID) + } + + return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") + } + + ctx = clues.Add(ctx, "details_entries", len(d.Entries)) + + if !skipReduce { + sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterGroupsRestoreInfoSelectors(sel, opts) + d = sel.Reduce(ctx, d, errs) + } + + return d, nil } // ------------------------------------------------------------------------------------------------ @@ -208,7 +306,7 @@ func deleteGroupsCmd(cmd *cobra.Command, args []string) error { // helpers // --------------------------------------------------------------------------- -func validateGroupBackupCreateFlags(groups []string) error { +func validateGroupsBackupCreateFlags(groups, cats []string) error { if len(groups) == 0 { return clues.New( "requires one or more --" + @@ -228,3 +326,40 @@ func validateGroupBackupCreateFlags(groups []string) error { return nil } + +// TODO: users might specify a data type, this only supports AllData(). +func groupsBackupCreateSelectors( + ctx context.Context, + ins idname.Cacher, + group, cats []string, +) *selectors.GroupsBackup { + if filters.PathContains(group).Compare(flags.Wildcard) { + return includeAllGroupWithCategories(ins, cats) + } + + sel := selectors.NewGroupsBackup(slices.Clone(group)) + + return addGroupsCategories(sel, cats) +} + +func includeAllGroupWithCategories(ins idname.Cacher, categories []string) *selectors.GroupsBackup { + return addGroupsCategories(selectors.NewGroupsBackup(ins.IDs()), categories) +} + +func addGroupsCategories(sel *selectors.GroupsBackup, cats []string) *selectors.GroupsBackup { + if len(cats) == 0 { + sel.Include(sel.AllData()) + } + + // TODO(meain): handle filtering + // for _, d := range cats { + // switch d { + // case dataLibraries: + // sel.Include(sel.LibraryFolders(selectors.Any())) + // case dataPages: + // sel.Include(sel.Pages(selectors.Any())) + // } + // } + + return sel +} diff --git a/src/cli/utils/groups.go b/src/cli/utils/groups.go index 9b0827d46..cabc9f3c6 100644 --- a/src/cli/utils/groups.go +++ b/src/cli/utils/groups.go @@ -1,9 +1,13 @@ package utils import ( + "context" + + "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/pkg/selectors" ) type GroupsOpts struct { @@ -28,3 +32,56 @@ func MakeGroupsOpts(cmd *cobra.Command) GroupsOpts { Populated: flags.GetPopulatedFlags(cmd), } } + +// ValidateGroupsRestoreFlags checks common flags for correctness and interdependencies +func ValidateGroupsRestoreFlags(backupID string, opts GroupsOpts) error { + if len(backupID) == 0 { + return clues.New("a backup ID is required") + } + + // TODO(meain): selectors (refer sharepoint) + + return validateRestoreConfigFlags(flags.CollisionsFV, opts.RestoreCfg) +} + +// AddGroupInfo adds the scope of the provided values to the selector's +// filter set +func AddGroupInfo( + sel *selectors.GroupsRestore, + v string, + f func(string) []selectors.GroupsScope, +) { + if len(v) == 0 { + return + } + + sel.Filter(f(v)) +} + +// IncludeGroupsRestoreDataSelectors builds the common data-selector +// inclusions for Group commands. +func IncludeGroupsRestoreDataSelectors(ctx context.Context, opts GroupsOpts) *selectors.GroupsRestore { + groups := opts.Groups + + ls := len(opts.Groups) + + if ls == 0 { + groups = selectors.Any() + } + + sel := selectors.NewGroupsRestore(groups) + + // TODO(meain): add selectors + sel.Include(sel.AllData()) + + return sel +} + +// FilterGroupsRestoreInfoSelectors builds the common info-selector filters. +func FilterGroupsRestoreInfoSelectors( + sel *selectors.GroupsRestore, + opts GroupsOpts, +) { + // TODO(meain) + // AddGroupInfo(sel, opts.GroupID, sel.Library) +} diff --git a/src/cli/utils/groups_test.go b/src/cli/utils/groups_test.go new file mode 100644 index 000000000..e2a48faf0 --- /dev/null +++ b/src/cli/utils/groups_test.go @@ -0,0 +1,161 @@ +package utils_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/internal/tester" +) + +type GroupsUtilsSuite struct { + tester.Suite +} + +func TestGroupsUtilsSuite(t *testing.T) { + suite.Run(t, &GroupsUtilsSuite{Suite: tester.NewUnitSuite(t)}) +} + +// Tests selector build for Groups properly +// differentiates between the 3 categories: Pages, Libraries and Lists CLI +func (suite *GroupsUtilsSuite) TestIncludeGroupsRestoreDataSelectors() { + var ( + empty = []string{} + single = []string{"single"} + multi = []string{"more", "than", "one"} + ) + + table := []struct { + name string + opts utils.GroupsOpts + expectIncludeLen int + }{ + { + name: "no inputs", + opts: utils.GroupsOpts{}, + expectIncludeLen: 2, + }, + { + name: "empty", + opts: utils.GroupsOpts{ + Groups: empty, + }, + expectIncludeLen: 2, + }, + { + name: "single inputs", + opts: utils.GroupsOpts{ + Groups: single, + }, + expectIncludeLen: 2, + }, + { + name: "multi inputs", + opts: utils.GroupsOpts{ + Groups: multi, + }, + expectIncludeLen: 2, + }, + // TODO Add library specific tests once we have filters based + // on library folders + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + sel := utils.IncludeGroupsRestoreDataSelectors(ctx, test.opts) + assert.Len(suite.T(), sel.Includes, test.expectIncludeLen) + }) + } +} + +func (suite *GroupsUtilsSuite) TestValidateGroupsRestoreFlags() { + table := []struct { + name string + backupID string + opts utils.GroupsOpts + expect assert.ErrorAssertionFunc + }{ + { + name: "no opts", + backupID: "id", + opts: utils.GroupsOpts{}, + expect: assert.NoError, + }, + { + name: "no backupID", + backupID: "", + opts: utils.GroupsOpts{}, + expect: assert.Error, + }, + // TODO: Add tests for selectors once we have them + // { + // name: "all valid", + // backupID: "id", + // opts: utils.GroupsOpts{ + // Populated: flags.PopulatedFlags{ + // flags.FileCreatedAfterFN: struct{}{}, + // flags.FileCreatedBeforeFN: struct{}{}, + // flags.FileModifiedAfterFN: struct{}{}, + // flags.FileModifiedBeforeFN: struct{}{}, + // }, + // }, + // expect: assert.NoError, + // }, + // { + // name: "invalid file created after", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileCreatedAfter: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileCreatedAfterFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + // { + // name: "invalid file created before", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileCreatedBefore: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileCreatedBeforeFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + // { + // name: "invalid file modified after", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileModifiedAfter: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileModifiedAfterFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + // { + // name: "invalid file modified before", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileModifiedBefore: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileModifiedBeforeFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + test.expect(t, utils.ValidateGroupsRestoreFlags(test.backupID, test.opts)) + }) + } +} diff --git a/src/internal/m365/backup.go b/src/internal/m365/backup.go index 805dcebd1..55c4c7fdb 100644 --- a/src/internal/m365/backup.go +++ b/src/internal/m365/backup.go @@ -156,6 +156,17 @@ func (ctrl *Controller) IsBackupRunnable( service path.ServiceType, resourceOwner string, ) (bool, error) { + if service == path.GroupsService { + _, err := ctrl.AC.Groups().GetByID(ctx, resourceOwner) + if err != nil { + // TODO(meain): check for error message in case groups are + // not enabled at all similar to sharepoint + return false, err + } + + return true, nil + } + if service == path.SharePointService { _, err := ctrl.AC.Sites().GetRoot(ctx) if err != nil { @@ -181,7 +192,7 @@ func (ctrl *Controller) IsBackupRunnable( return true, nil } -func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error { +func verifyBackupInputs(sels selectors.Selector, cachedIDs []string) error { var ids []string switch sels.Service { @@ -189,16 +200,13 @@ func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error { // Exchange and OneDrive user existence now checked in checkServiceEnabled. return nil - case selectors.ServiceGroups: - // TODO(meain): check for group existence. - return nil - - case selectors.ServiceSharePoint: - ids = siteIDs + case selectors.ServiceSharePoint, selectors.ServiceGroups: + ids = cachedIDs } if !filters.Contains(ids).Compare(sels.ID()) { - return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_protected_resource", sels.DiscreteOwner) + return clues.Stack(graph.ErrResourceOwnerNotFound). + With("selector_protected_resource", sels.DiscreteOwner) } return nil diff --git a/src/internal/m365/collection/drive/group_handler.go b/src/internal/m365/collection/drive/group_handler.go index 81bbf36af..136d61b2d 100644 --- a/src/internal/m365/collection/drive/group_handler.go +++ b/src/internal/m365/collection/drive/group_handler.go @@ -100,6 +100,7 @@ func (h groupBackupHandler) NewLocationIDer( driveID string, elems ...string, ) details.LocationIDer { + // TODO(meain): path fixes return details.NewSharePointLocationIDer(driveID, elems...) } @@ -124,7 +125,6 @@ func (h groupBackupHandler) IsAllPass() bool { func (h groupBackupHandler) IncludesDir(dir string) bool { // TODO(meain) - // return h.scope.Matches(selectors.SharePointGroupFolder, dir) return true } @@ -138,7 +138,7 @@ func augmentGroupItemInfo( size int64, parentPath *path.Builder, ) details.ItemInfo { - var driveName, driveID, creatorEmail string + var driveName, driveID, creatorEmail, siteID, weburl string // TODO: we rely on this info for details/restore lookups, // so if it's nil we have an issue, and will need an alternative @@ -159,15 +159,15 @@ func augmentGroupItemInfo( } } - // gsi := item.GetSharepointIds() - // if gsi != nil { - // siteID = ptr.Val(gsi.GetSiteId()) - // weburl = ptr.Val(gsi.GetSiteUrl()) + gsi := item.GetSharepointIds() + if gsi != nil { + siteID = ptr.Val(gsi.GetSiteId()) + weburl = ptr.Val(gsi.GetSiteUrl()) - // if len(weburl) == 0 { - // weburl = constructWebURL(item.GetAdditionalData()) - // } - // } + if len(weburl) == 0 { + weburl = constructWebURL(item.GetAdditionalData()) + } + } if item.GetParentReference() != nil { driveID = ptr.Val(item.GetParentReference().GetDriveId()) @@ -179,6 +179,7 @@ func augmentGroupItemInfo( pps = parentPath.String() } + // TODO: Add channel name and ID dii.Groups = &details.GroupsInfo{ Created: ptr.Val(item.GetCreatedDateTime()), DriveID: driveID, @@ -189,6 +190,8 @@ func augmentGroupItemInfo( Owner: creatorEmail, ParentPath: pps, Size: size, + SiteID: siteID, + WebURL: weburl, } dii.Extension = &details.ExtensionData{} diff --git a/src/pkg/backup/details/groups.go b/src/pkg/backup/details/groups.go index 398d8f529..9065d6bbb 100644 --- a/src/pkg/backup/details/groups.go +++ b/src/pkg/backup/details/groups.go @@ -11,24 +11,48 @@ import ( // NewGroupsLocationIDer builds a LocationIDer for the groups. func NewGroupsLocationIDer( + category path.CategoryType, driveID string, escapedFolders ...string, -) uniqueLoc { - // TODO: implement - return uniqueLoc{} +) (uniqueLoc, error) { + // TODO(meain): path fixes + if err := path.ValidateServiceAndCategory(path.GroupsService, category); err != nil { + return uniqueLoc{}, clues.Wrap(err, "making groups LocationIDer") + } + + pb := path.Builder{}.Append(category.String()) + prefixElems := 1 + + if driveID != "" { // non sp paths don't have driveID + pb.Append(driveID) + + prefixElems = 2 + } + + pb.Append(escapedFolders...) + + return uniqueLoc{pb, prefixElems}, nil } // GroupsInfo describes a groups item type GroupsInfo struct { Created time.Time `json:"created,omitempty"` - DriveName string `json:"driveName,omitempty"` - DriveID string `json:"driveID,omitempty"` ItemName string `json:"itemName,omitempty"` ItemType ItemType `json:"itemType,omitempty"` Modified time.Time `json:"modified,omitempty"` Owner string `json:"owner,omitempty"` ParentPath string `json:"parentPath,omitempty"` Size int64 `json:"size,omitempty"` + + // Channels Specific + ChannelName string `json:"channelName,omitempty"` + ChannelID string `json:"channelID,omitempty"` + + // SharePoint specific + DriveName string `json:"driveName,omitempty"` + DriveID string `json:"driveID,omitempty"` + SiteID string `json:"siteID,omitempty"` + WebURL string `json:"webURL,omitempty"` } // Headers returns the human-readable names of properties in a SharePointInfo @@ -51,9 +75,27 @@ func (i *GroupsInfo) UpdateParentPath(newLocPath *path.Builder) { } func (i *GroupsInfo) uniqueLocation(baseLoc *path.Builder) (*uniqueLoc, error) { - return nil, clues.New("not yet implemented") + var category path.CategoryType + + switch i.ItemType { + case SharePointLibrary: + category = path.LibrariesCategory + + if len(i.DriveID) == 0 { + return nil, clues.New("empty drive ID") + } + } + + loc, err := NewGroupsLocationIDer(category, i.DriveID, baseLoc.Elements()...) + + return &loc, err } func (i *GroupsInfo) updateFolder(f *FolderInfo) error { - return clues.New("not yet implemented") + // TODO(meain): path updates if any + if i.ItemType == SharePointLibrary { + return updateFolderWithinDrive(SharePointLibrary, i.DriveName, i.DriveID, f) + } + + return clues.New("unsupported ItemType for GroupsInfo").With("item_type", i.ItemType) } diff --git a/src/pkg/backup/details/iteminfo.go b/src/pkg/backup/details/iteminfo.go index fbd6a92cd..a8ba23100 100644 --- a/src/pkg/backup/details/iteminfo.go +++ b/src/pkg/backup/details/iteminfo.go @@ -28,7 +28,7 @@ const ( ExchangeMail ItemType = 3 // SharePoint (10x) - SharePointLibrary ItemType = 101 + SharePointLibrary ItemType = 101 // also used for groups SharePointList ItemType = 102 SharePointPage ItemType = 103 diff --git a/src/pkg/selectors/groups.go b/src/pkg/selectors/groups.go index 50aa3db74..6f1bd1d74 100644 --- a/src/pkg/selectors/groups.go +++ b/src/pkg/selectors/groups.go @@ -425,7 +425,6 @@ func (c groupsCategory) pathValues( folderCat, itemCat = GroupsLibraryFolder, GroupsLibraryItem rFld = ent.Groups.ParentPath - default: return nil, clues.New("unrecognized groupsCategory").With("category", c) } diff --git a/src/pkg/services/m365/groups.go b/src/pkg/services/m365/groups.go index f4924be22..a32195c1c 100644 --- a/src/pkg/services/m365/groups.go +++ b/src/pkg/services/m365/groups.go @@ -6,6 +6,7 @@ import ( "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/fault" @@ -80,7 +81,7 @@ func getAllGroups( // helpers // --------------------------------------------------------------------------- -// parseUser extracts information from `models.Groupable` we care about +// parseGroup extracts information from `models.Groupable` we care about func parseGroup(ctx context.Context, mg models.Groupable) (*Group, error) { if mg.GetDisplayName() == nil { return nil, clues.New("group missing display name"). @@ -95,3 +96,23 @@ func parseGroup(ctx context.Context, mg models.Groupable) (*Group, error) { return u, nil } + +// GroupsMap retrieves an id-name cache of all groups in the tenant. +func GroupsMap( + ctx context.Context, + acct account.Account, + errs *fault.Bus, +) (idname.Cacher, error) { + groups, err := Groups(ctx, acct, errs) + if err != nil { + return idname.NewCache(nil), err + } + + itn := make(map[string]string, len(groups)) + + for _, s := range groups { + itn[s.ID] = s.DisplayName + } + + return idname.NewCache(itn), nil +} diff --git a/src/pkg/services/m365/groups_test.go b/src/pkg/services/m365/groups_test.go index 8fa650a98..9219209f0 100644 --- a/src/pkg/services/m365/groups_test.go +++ b/src/pkg/services/m365/groups_test.go @@ -68,6 +68,31 @@ func (suite *GroupsIntgSuite) TestGroups() { } } +func (suite *GroupsIntgSuite) TestGroupsMap() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + gm, err := m365.GroupsMap(ctx, suite.acct, fault.New(true)) + assert.NoError(t, err, clues.ToCore(err)) + assert.NotEmpty(t, gm) + + for _, gid := range gm.IDs() { + suite.Run("group_"+gid, func() { + t := suite.T() + + assert.NotEmpty(t, gid) + + name, ok := gm.NameOf(gid) + assert.True(t, ok) + assert.NotEmpty(t, name) + }) + } +} + func (suite *GroupsIntgSuite) TestGroups_InvalidCredentials() { table := []struct { name string