From 8d3fdeeb8dbd8dc68284c1d33b0b431d309b4f5b Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Fri, 18 Aug 2023 09:09:08 -0700 Subject: [PATCH 01/32] Remove call to PITR backup check (#4067) Currently failing due to minor upstream bugs. Disable until we can get upstream fixes in. Revert this merge once upstream issues are fixed --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [x] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #4031 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/cmd/longevity_test/longevity.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/cmd/longevity_test/longevity.go b/src/cmd/longevity_test/longevity.go index b3d6f865d..efe3bd352 100644 --- a/src/cmd/longevity_test/longevity.go +++ b/src/cmd/longevity_test/longevity.go @@ -67,6 +67,9 @@ func deleteBackups( // pitrListBackups connects to the repository at the given point in time and // lists the backups for service. It then checks the list of backups contains // the backups in backupIDs. +// +//nolint:unused +//lint:ignore U1000 Waiting for upstream fix tracked by 4031 func pitrListBackups( ctx context.Context, service path.ServiceType, @@ -156,16 +159,10 @@ func main() { fatal(ctx, "invalid number of days provided", nil) } - beforeDel := time.Now() - - backups, err := deleteBackups(ctx, service, days) + _, err = deleteBackups(ctx, service, days) if err != nil { fatal(ctx, "deleting backups", clues.Stack(err)) } - - if err := pitrListBackups(ctx, service, beforeDel, backups); err != nil { - fatal(ctx, "listing backups from point in time", clues.Stack(err)) - } } func fatal(ctx context.Context, msg string, err error) { From 20675dbcf7086f24ff086b593b3836519779b1b6 Mon Sep 17 00:00:00 2001 From: Keepers Date: Fri, 18 Aug 2023 14:10:56 -0600 Subject: [PATCH 02/32] add the groups resources service addition (#4053) Adds groups to the m365 services api. Also adds a bit of touchups/cleanups on the side. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3989 #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- .../tester/tconfig/protected_resources.go | 4 +- src/pkg/services/m365/api/groups.go | 106 ++++++----------- src/pkg/services/m365/api/groups_test.go | 93 +-------------- src/pkg/services/m365/api/helper_test.go | 13 ++- src/pkg/services/m365/groups.go | 97 ++++++++++++++++ src/pkg/services/m365/groups_test.go | 108 ++++++++++++++++++ src/pkg/services/m365/m365.go | 15 ++- src/pkg/services/m365/m365_test.go | 18 +-- 8 files changed, 272 insertions(+), 182 deletions(-) create mode 100644 src/pkg/services/m365/groups.go create mode 100644 src/pkg/services/m365/groups_test.go diff --git a/src/internal/tester/tconfig/protected_resources.go b/src/internal/tester/tconfig/protected_resources.go index caac0c586..26c0187ac 100644 --- a/src/internal/tester/tconfig/protected_resources.go +++ b/src/internal/tester/tconfig/protected_resources.go @@ -223,11 +223,11 @@ func UnlicensedM365UserID(t *testing.T) string { // Teams -// M365TeamsID returns a teamID string representing the m365TeamsID described +// M365TeamID returns a teamID string representing the m365TeamsID described // by either the env var CORSO_M365_TEST_TEAM_ID, the corso_test.toml config // file or the default value (in that order of priority). The default is a // last-attempt fallback that will only work on alcion's testing org. -func M365TeamsID(t *testing.T) string { +func M365TeamID(t *testing.T) string { cfg, err := ReadTestConfig() require.NoError(t, err, "retrieving m365 team id from test configuration: %+v", clues.ToCore(err)) diff --git a/src/pkg/services/m365/api/groups.go b/src/pkg/services/m365/api/groups.go index c2a27dad3..3d036e610 100644 --- a/src/pkg/services/m365/api/groups.go +++ b/src/pkg/services/m365/api/groups.go @@ -49,24 +49,6 @@ func (c Groups) GetAll( return getGroups(ctx, errs, service) } -// GetTeams retrieves all Teams. -func (c Groups) GetTeams( - ctx context.Context, - errs *fault.Bus, -) ([]models.Groupable, error) { - service, err := c.Service() - if err != nil { - return nil, err - } - - groups, err := getGroups(ctx, errs, service) - if err != nil { - return nil, err - } - - return OnlyTeams(ctx, groups), nil -} - // GetAll retrieves all groups. func getGroups( ctx context.Context, @@ -113,31 +95,6 @@ func getGroups( return groups, el.Failure() } -func OnlyTeams(ctx context.Context, groups []models.Groupable) []models.Groupable { - log := logger.Ctx(ctx) - - var teams []models.Groupable - - for _, g := range groups { - if g.GetAdditionalData()[ResourceProvisioningOptions] != nil { - val, _ := tform.AnyValueToT[[]any](ResourceProvisioningOptions, g.GetAdditionalData()) - for _, v := range val { - s, err := str.AnyToString(v) - if err != nil { - log.Debug("could not be converted to string value: ", ResourceProvisioningOptions) - continue - } - - if s == teamsAdditionalDataLabel { - teams = append(teams, g) - } - } - } - } - - return teams -} - // GetID retrieves group by groupID. func (c Groups) GetByID( ctx context.Context, @@ -158,34 +115,6 @@ func (c Groups) GetByID( return resp, graph.Stack(ctx, err).OrNil() } -// GetTeamByID retrieves group by groupID. -func (c Groups) GetTeamByID( - ctx context.Context, - identifier string, -) (models.Groupable, error) { - service, err := c.Service() - if err != nil { - return nil, err - } - - resp, err := service.Client().Groups().ByGroupId(identifier).Get(ctx, nil) - if err != nil { - err := graph.Wrap(ctx, err, "getting group by id") - - return nil, err - } - - groups := []models.Groupable{resp} - - if len(OnlyTeams(ctx, groups)) == 0 { - err := clues.New("given teamID is not related to any team") - - return nil, err - } - - return resp, graph.Stack(ctx, err).OrNil() -} - // --------------------------------------------------------------------------- // helpers // --------------------------------------------------------------------------- @@ -203,3 +132,38 @@ func ValidateGroup(item models.Groupable) error { return nil } + +func OnlyTeams(ctx context.Context, groups []models.Groupable) []models.Groupable { + var teams []models.Groupable + + for _, g := range groups { + if IsTeam(ctx, g) { + teams = append(teams, g) + } + } + + return teams +} + +func IsTeam(ctx context.Context, mg models.Groupable) bool { + log := logger.Ctx(ctx) + + if mg.GetAdditionalData()[ResourceProvisioningOptions] == nil { + return false + } + + val, _ := tform.AnyValueToT[[]any](ResourceProvisioningOptions, mg.GetAdditionalData()) + for _, v := range val { + s, err := str.AnyToString(v) + if err != nil { + log.Debug("could not be converted to string value: ", ResourceProvisioningOptions) + continue + } + + if s == teamsAdditionalDataLabel { + return true + } + } + + return false +} diff --git a/src/pkg/services/m365/api/groups_test.go b/src/pkg/services/m365/api/groups_test.go index 8ce0f8f6b..ae435168a 100644 --- a/src/pkg/services/m365/api/groups_test.go +++ b/src/pkg/services/m365/api/groups_test.go @@ -97,7 +97,7 @@ func (suite *GroupsIntgSuite) SetupSuite() { suite.its = newIntegrationTesterSetup(suite.T()) } -func (suite *GroupsIntgSuite) TestGetAllGroups() { +func (suite *GroupsIntgSuite) TestGetAll() { t := suite.T() ctx, flush := tester.NewContext(t) @@ -107,100 +107,15 @@ func (suite *GroupsIntgSuite) TestGetAllGroups() { Groups(). GetAll(ctx, fault.New(true)) require.NoError(t, err) - require.NotZero(t, len(groups), "must have at least one group") -} - -func (suite *GroupsIntgSuite) TestGetAllTeams() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - teams, err := suite.its.ac. - Groups(). - GetTeams(ctx, fault.New(true)) - require.NoError(t, err) - require.NotZero(t, len(teams), "must have at least one teams") - - groups, err := suite.its.ac. - Groups(). - GetAll(ctx, fault.New(true)) - require.NoError(t, err) - require.NotZero(t, len(groups), "must have at least one group") - - var isTeam bool - - if len(groups) > len(teams) { - isTeam = true - } - - assert.True(t, isTeam, "must only return teams") -} - -func (suite *GroupsIntgSuite) TestTeams_GetByID() { - var ( - t = suite.T() - teamID = tconfig.M365TeamsID(t) - ) - - teamsAPI := suite.its.ac.Groups() - - table := []struct { - name string - id string - expectErr func(*testing.T, error) - }{ - { - name: "3 part id", - id: teamID, - expectErr: func(t *testing.T, err error) { - assert.NoError(t, err, clues.ToCore(err)) - }, - }, - { - name: "malformed id", - id: uuid.NewString(), - expectErr: func(t *testing.T, err error) { - assert.Error(t, err, clues.ToCore(err)) - }, - }, - { - name: "random id", - id: uuid.NewString() + "," + uuid.NewString(), - expectErr: func(t *testing.T, err error) { - assert.Error(t, err, clues.ToCore(err)) - }, - }, - - { - name: "malformed url", - id: "barunihlda", - expectErr: func(t *testing.T, err error) { - assert.Error(t, err, clues.ToCore(err)) - }, - }, - } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - _, err := teamsAPI.GetTeamByID(ctx, test.id) - test.expectErr(t, err) - }) - } + require.NotZero(t, len(groups), "must find at least one group") } func (suite *GroupsIntgSuite) TestGroups_GetByID() { var ( - t = suite.T() - groupID = tconfig.M365GroupID(t) + groupID = suite.its.groupID + groupsAPI = suite.its.ac.Groups() ) - groupsAPI := suite.its.ac.Groups() - table := []struct { name string id string diff --git a/src/pkg/services/m365/api/helper_test.go b/src/pkg/services/m365/api/helper_test.go index a9c12324f..8e8c760c0 100644 --- a/src/pkg/services/m365/api/helper_test.go +++ b/src/pkg/services/m365/api/helper_test.go @@ -83,7 +83,7 @@ type intgTesterSetup struct { siteID string siteDriveID string siteDriveRootFolderID string - teamID string + groupID string } func newIntegrationTesterSetup(t *testing.T) intgTesterSetup { @@ -132,13 +132,16 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup { its.siteDriveRootFolderID = ptr.Val(siteDriveRootFolder.GetId()) - // teams - its.teamID = tconfig.M365TeamsID(t) + // group - team, err := its.ac.Groups().GetTeamByID(ctx, its.teamID) + // use of the TeamID is intentional here, so that we are assured + // the group has full usage of the teams api. + its.groupID = tconfig.M365TeamID(t) + + team, err := its.ac.Groups().GetByID(ctx, its.groupID) require.NoError(t, err, clues.ToCore(err)) - its.teamID = ptr.Val(team.GetId()) + its.groupID = ptr.Val(team.GetId()) return its } diff --git a/src/pkg/services/m365/groups.go b/src/pkg/services/m365/groups.go new file mode 100644 index 000000000..f4924be22 --- /dev/null +++ b/src/pkg/services/m365/groups.go @@ -0,0 +1,97 @@ +package m365 + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +// Group is the minimal information required to identify and display a M365 Group. +type Group struct { + ID string + + // DisplayName is the human-readable name of the group. Normally the plaintext name that the + // user provided when they created the group, or the updated name if it was changed. + // Ex: displayName: "My Group" + DisplayName string + + // IsTeam is true if the group qualifies as a Teams resource, and is able to backup and restore + // teams data. + IsTeam bool +} + +// GroupsCompat returns a list of groups in the specified M365 tenant. +func GroupsCompat(ctx context.Context, acct account.Account) ([]*Group, error) { + errs := fault.New(true) + + us, err := Groups(ctx, acct, errs) + if err != nil { + return nil, err + } + + return us, errs.Failure() +} + +// Groups returns a list of groups in the specified M365 tenant +func Groups( + ctx context.Context, + acct account.Account, + errs *fault.Bus, +) ([]*Group, error) { + ac, err := makeAC(ctx, acct, path.GroupsService) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + return getAllGroups(ctx, ac.Groups()) +} + +func getAllGroups( + ctx context.Context, + ga getAller[models.Groupable], +) ([]*Group, error) { + groups, err := ga.GetAll(ctx, fault.New(true)) + if err != nil { + return nil, clues.Wrap(err, "retrieving groups") + } + + ret := make([]*Group, 0, len(groups)) + + for _, g := range groups { + t, err := parseGroup(ctx, g) + if err != nil { + return nil, clues.Wrap(err, "parsing groups") + } + + ret = append(ret, t) + } + + return ret, nil +} + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +// parseUser extracts information from `models.Groupable` we care about +func parseGroup(ctx context.Context, mg models.Groupable) (*Group, error) { + if mg.GetDisplayName() == nil { + return nil, clues.New("group missing display name"). + With("group_id", ptr.Val(mg.GetId())) + } + + u := &Group{ + ID: ptr.Val(mg.GetId()), + DisplayName: ptr.Val(mg.GetDisplayName()), + IsTeam: api.IsTeam(ctx, mg), + } + + return u, nil +} diff --git a/src/pkg/services/m365/groups_test.go b/src/pkg/services/m365/groups_test.go new file mode 100644 index 000000000..8fa650a98 --- /dev/null +++ b/src/pkg/services/m365/groups_test.go @@ -0,0 +1,108 @@ +package m365_test + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/services/m365" +) + +type GroupsIntgSuite struct { + tester.Suite + acct account.Account +} + +func TestGroupsIntgSuite(t *testing.T) { + suite.Run(t, &GroupsIntgSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *GroupsIntgSuite) SetupSuite() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + suite.acct = tconfig.NewM365Account(t) +} + +func (suite *GroupsIntgSuite) TestGroups() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + groups, err := m365.Groups(ctx, suite.acct, fault.New(true)) + assert.NoError(t, err, clues.ToCore(err)) + assert.NotEmpty(t, groups) + + for _, group := range groups { + suite.Run("group_"+group.ID, func() { + t := suite.T() + + assert.NotEmpty(t, group.ID) + assert.NotEmpty(t, group.DisplayName) + + // at least one known group should be a team + if group.ID == tconfig.M365TeamID(t) { + assert.True(t, group.IsTeam) + } + }) + } +} + +func (suite *GroupsIntgSuite) TestGroups_InvalidCredentials() { + table := []struct { + name string + acct func(t *testing.T) account.Account + }{ + { + name: "Invalid Credentials", + acct: func(t *testing.T) account.Account { + a, err := account.NewAccount( + account.ProviderM365, + account.M365Config{ + M365: credentials.M365{ + AzureClientID: "Test", + AzureClientSecret: "without", + }, + AzureTenantID: "data", + }, + ) + require.NoError(t, err, clues.ToCore(err)) + + return a + }, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + groups, err := m365.Groups(ctx, test.acct(t), fault.New(true)) + assert.Empty(t, groups, "returned no groups") + assert.NotNil(t, err) + }) + } +} diff --git a/src/pkg/services/m365/m365.go b/src/pkg/services/m365/m365.go index 5b61885e5..469f4d08f 100644 --- a/src/pkg/services/m365/m365.go +++ b/src/pkg/services/m365/m365.go @@ -24,6 +24,10 @@ type getDefaultDriver interface { GetDefaultDrive(ctx context.Context, userID string) (models.Driveable, error) } +type getAller[T any] interface { + GetAll(ctx context.Context, errs *fault.Bus) ([]T, error) +} + // --------------------------------------------------------------------------- // Users // --------------------------------------------------------------------------- @@ -253,12 +257,11 @@ func Sites(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*Site, return getAllSites(ctx, ac.Sites()) } -type getAllSiteser interface { - GetAll(ctx context.Context, errs *fault.Bus) ([]models.Siteable, error) -} - -func getAllSites(ctx context.Context, gas getAllSiteser) ([]*Site, error) { - sites, err := gas.GetAll(ctx, fault.New(true)) +func getAllSites( + ctx context.Context, + ga getAller[models.Siteable], +) ([]*Site, error) { + sites, err := ga.GetAll(ctx, fault.New(true)) if err != nil { if clues.HasLabel(err, graph.LabelsNoSharePointLicense) { return nil, clues.Stack(graph.ErrServiceNotEnabled, err) diff --git a/src/pkg/services/m365/m365_test.go b/src/pkg/services/m365/m365_test.go index 1eafa67f2..0124f13f2 100644 --- a/src/pkg/services/m365/m365_test.go +++ b/src/pkg/services/m365/m365_test.go @@ -276,25 +276,25 @@ func (suite *m365UnitSuite) TestCheckUserHasDrives() { } } -type mockGAS struct { +type mockGASites struct { response []models.Siteable err error } -func (m mockGAS) GetAll(context.Context, *fault.Bus) ([]models.Siteable, error) { +func (m mockGASites) GetAll(context.Context, *fault.Bus) ([]models.Siteable, error) { return m.response, m.err } func (suite *m365UnitSuite) TestGetAllSites() { table := []struct { name string - mock func(context.Context) getAllSiteser + mock func(context.Context) getAller[models.Siteable] expectErr func(*testing.T, error) }{ { name: "ok", - mock: func(ctx context.Context) getAllSiteser { - return mockGAS{[]models.Siteable{}, nil} + mock: func(ctx context.Context) getAller[models.Siteable] { + return mockGASites{[]models.Siteable{}, nil} }, expectErr: func(t *testing.T, err error) { assert.NoError(t, err, clues.ToCore(err)) @@ -302,14 +302,14 @@ func (suite *m365UnitSuite) TestGetAllSites() { }, { name: "no sharepoint license", - mock: func(ctx context.Context) getAllSiteser { + mock: func(ctx context.Context) getAller[models.Siteable] { odErr := odataerrors.NewODataError() merr := odataerrors.NewMainError() merr.SetCode(ptr.To("code")) merr.SetMessage(ptr.To(string(graph.NoSPLicense))) odErr.SetErrorEscaped(merr) - return mockGAS{nil, graph.Stack(ctx, odErr)} + return mockGASites{nil, graph.Stack(ctx, odErr)} }, expectErr: func(t *testing.T, err error) { assert.ErrorIs(t, err, graph.ErrServiceNotEnabled, clues.ToCore(err)) @@ -317,14 +317,14 @@ func (suite *m365UnitSuite) TestGetAllSites() { }, { name: "arbitrary error", - mock: func(ctx context.Context) getAllSiteser { + mock: func(ctx context.Context) getAller[models.Siteable] { odErr := odataerrors.NewODataError() merr := odataerrors.NewMainError() merr.SetCode(ptr.To("code")) merr.SetMessage(ptr.To("message")) odErr.SetErrorEscaped(merr) - return mockGAS{nil, graph.Stack(ctx, odErr)} + return mockGASites{nil, graph.Stack(ctx, odErr)} }, expectErr: func(t *testing.T, err error) { assert.Error(t, err, clues.ToCore(err)) From 2c00ca40ac76e5135c4fd53cda70c6ec0d72b57a Mon Sep 17 00:00:00 2001 From: Keepers Date: Fri, 18 Aug 2023 15:35:22 -0600 Subject: [PATCH 03/32] Updated teams cli addition (#4054) #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3989 #### Test Plan - [x] :muscle: Manual - [x] :zap: Unit test --- src/cli/backup/backup.go | 1 + src/cli/backup/groups.go | 230 +++++++++++++++++++++++++++++++++ src/cli/backup/groups_test.go | 98 ++++++++++++++ src/cli/backup/teams.go | 230 +++++++++++++++++++++++++++++++++ src/cli/backup/teams_test.go | 98 ++++++++++++++ src/cli/flags/groups.go | 28 ++++ src/cli/flags/teams.go | 28 ++++ src/cli/restore/groups.go | 81 ++++++++++++ src/cli/restore/groups_test.go | 108 ++++++++++++++++ src/cli/restore/teams.go | 81 ++++++++++++ src/cli/restore/teams_test.go | 108 ++++++++++++++++ src/cli/utils/groups.go | 30 +++++ src/cli/utils/teams.go | 30 +++++ src/cli/utils/utils.go | 2 + src/pkg/path/service_type.go | 2 + 15 files changed, 1155 insertions(+) create mode 100644 src/cli/backup/groups.go create mode 100644 src/cli/backup/groups_test.go create mode 100644 src/cli/backup/teams.go create mode 100644 src/cli/backup/teams_test.go create mode 100644 src/cli/flags/groups.go create mode 100644 src/cli/flags/teams.go create mode 100644 src/cli/restore/groups.go create mode 100644 src/cli/restore/groups_test.go create mode 100644 src/cli/restore/teams.go create mode 100644 src/cli/restore/teams_test.go create mode 100644 src/cli/utils/groups.go create mode 100644 src/cli/utils/teams.go diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index c8df39902..56b5c5ef4 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -39,6 +39,7 @@ var serviceCommands = []func(cmd *cobra.Command) *cobra.Command{ addExchangeCommands, addOneDriveCommands, addSharePointCommands, + addTeamsCommands, } // AddCommands attaches all `corso backup * *` commands to the parent. diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go new file mode 100644 index 000000000..3f1f83eb7 --- /dev/null +++ b/src/cli/backup/groups.go @@ -0,0 +1,230 @@ +package backup + +import ( + "github.com/alcionai/clues" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/alcionai/corso/src/cli/flags" + . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/pkg/path" +) + +// ------------------------------------------------------------------------------------------------ +// setup and globals +// ------------------------------------------------------------------------------------------------ + +const ( + groupsServiceCommand = "groups" + groupsServiceCommandCreateUseSuffix = "--group | '" + flags.Wildcard + "'" + groupsServiceCommandDeleteUseSuffix = "--backup " + groupsServiceCommandDetailsUseSuffix = "--backup " +) + +// TODO: correct examples +const ( + groupsServiceCommandCreateExamples = `# Backup all Groups data for Alice +corso backup create groups --group alice@example.com + +# Backup only Groups contacts for Alice and Bob +corso backup create groups --group engineering,sales --data contacts + +# Backup all Groups data for all M365 users +corso backup create groups --group '*'` + + groupsServiceCommandDeleteExamples = `# Delete Groups backup with ID 1234abcd-12ab-cd34-56de-1234abcd +corso backup delete groups --backup 1234abcd-12ab-cd34-56de-1234abcd` + + groupsServiceCommandDetailsExamples = `# Explore items in Alice's latest backup (1234abcd...) +corso backup details groups --backup 1234abcd-12ab-cd34-56de-1234abcd + +# Explore calendar events occurring after start of 2022 +corso backup details groups --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --event-starts-after 2022-01-01T00:00:00` +) + +// called by backup.go to map subcommands to provider-specific handling. +func addGroupsCommands(cmd *cobra.Command) *cobra.Command { + var ( + c *cobra.Command + fs *pflag.FlagSet + ) + + switch cmd.Use { + case createCommand: + c, fs = utils.AddCommand(cmd, groupsCreateCmd(), utils.HideCommand()) + fs.SortFlags = false + + c.Use = c.Use + " " + groupsServiceCommandCreateUseSuffix + c.Example = groupsServiceCommandCreateExamples + + // Flags addition ordering should follow the order we want them to appear in help and docs: + flags.AddGroupFlag(c) + flags.AddDataFlag(c, []string{dataLibraries}, false) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + flags.AddFetchParallelismFlag(c) + flags.AddFailFastFlag(c) + + case listCommand: + c, fs = utils.AddCommand(cmd, groupsListCmd(), utils.HideCommand()) + fs.SortFlags = false + + flags.AddBackupIDFlag(c, false) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + addFailedItemsFN(c) + addSkippedItemsFN(c) + addRecoveredErrorsFN(c) + + case detailsCommand: + c, fs = utils.AddCommand(cmd, groupsDetailsCmd(), utils.HideCommand()) + fs.SortFlags = false + + c.Use = c.Use + " " + groupsServiceCommandDetailsUseSuffix + c.Example = groupsServiceCommandDetailsExamples + + flags.AddSkipReduceFlag(c) + + // Flags addition ordering should follow the order we want them to appear in help and docs: + // More generic (ex: --user) and more frequently used flags take precedence. + flags.AddBackupIDFlag(c, true) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + + case deleteCommand: + c, fs = utils.AddCommand(cmd, groupsDeleteCmd(), utils.HideCommand()) + fs.SortFlags = false + + c.Use = c.Use + " " + groupsServiceCommandDeleteUseSuffix + c.Example = groupsServiceCommandDeleteExamples + + flags.AddBackupIDFlag(c, true) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + } + + return c +} + +// ------------------------------------------------------------------------------------------------ +// backup create +// ------------------------------------------------------------------------------------------------ + +// `corso backup create groups [...]` +func groupsCreateCmd() *cobra.Command { + return &cobra.Command{ + Use: groupsServiceCommand, + Short: "Backup M365 Group service data", + RunE: createGroupsCmd, + Args: cobra.NoArgs, + } +} + +// processes a groups service backup. +func createGroupsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + return Only(ctx, utils.ErrNotYetImplemented) +} + +// ------------------------------------------------------------------------------------------------ +// backup list +// ------------------------------------------------------------------------------------------------ + +// `corso backup list groups [...]` +func groupsListCmd() *cobra.Command { + return &cobra.Command{ + Use: groupsServiceCommand, + Short: "List the history of M365 Groups service backups", + RunE: listGroupsCmd, + Args: cobra.NoArgs, + } +} + +// lists the history of backup operations +func listGroupsCmd(cmd *cobra.Command, args []string) error { + return genericListCommand(cmd, flags.BackupIDFV, path.GroupsService, args) +} + +// ------------------------------------------------------------------------------------------------ +// backup details +// ------------------------------------------------------------------------------------------------ + +// `corso backup details groups [...]` +func groupsDetailsCmd() *cobra.Command { + return &cobra.Command{ + Use: groupsServiceCommand, + Short: "Shows the details of a M365 Groups service backup", + RunE: detailsGroupsCmd, + Args: cobra.NoArgs, + } +} + +// processes a groups service backup. +func detailsGroupsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + if err := validateGroupBackupCreateFlags(flags.GroupFV); err != nil { + return Only(ctx, err) + } + + return Only(ctx, utils.ErrNotYetImplemented) +} + +// ------------------------------------------------------------------------------------------------ +// backup delete +// ------------------------------------------------------------------------------------------------ + +// `corso backup delete groups [...]` +func groupsDeleteCmd() *cobra.Command { + return &cobra.Command{ + Use: groupsServiceCommand, + Short: "Delete backed-up M365 Groups service data", + RunE: deleteGroupsCmd, + Args: cobra.NoArgs, + } +} + +// deletes an groups service backup. +func deleteGroupsCmd(cmd *cobra.Command, args []string) error { + return genericDeleteCommand(cmd, path.GroupsService, flags.BackupIDFV, "Groups", args) +} + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +func validateGroupBackupCreateFlags(groups []string) error { + if len(groups) == 0 { + return clues.New( + "requires one or more --" + + flags.GroupFN + " ids, or the wildcard --" + + flags.GroupFN + " *", + ) + } + + // TODO(meain) + // for _, d := range cats { + // if d != dataLibraries { + // return clues.New( + // d + " is an unrecognized data type; only " + dataLibraries + " is supported" + // ) + // } + // } + + return nil +} diff --git a/src/cli/backup/groups_test.go b/src/cli/backup/groups_test.go new file mode 100644 index 000000000..04a131b59 --- /dev/null +++ b/src/cli/backup/groups_test.go @@ -0,0 +1,98 @@ +package backup + +import ( + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/internal/tester" +) + +type GroupsUnitSuite struct { + tester.Suite +} + +func TestGroupsUnitSuite(t *testing.T) { + suite.Run(t, &GroupsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *GroupsUnitSuite) TestAddGroupsCommands() { + expectUse := groupsServiceCommand + + table := []struct { + name string + use string + expectUse string + expectShort string + flags []string + expectRunE func(*cobra.Command, []string) error + }{ + { + "create groups", + createCommand, + expectUse + " " + groupsServiceCommandCreateUseSuffix, + groupsCreateCmd().Short, + []string{ + flags.CategoryDataFN, + flags.FailFastFN, + flags.FetchParallelismFN, + flags.SkipReduceFN, + flags.NoStatsFN, + }, + createGroupsCmd, + }, + { + "list groups", + listCommand, + expectUse, + groupsListCmd().Short, + []string{ + flags.BackupFN, + flags.FailedItemsFN, + flags.SkippedItemsFN, + flags.RecoveredErrorsFN, + }, + listGroupsCmd, + }, + { + "details groups", + detailsCommand, + expectUse + " " + groupsServiceCommandDetailsUseSuffix, + groupsDetailsCmd().Short, + []string{ + flags.BackupFN, + }, + detailsGroupsCmd, + }, + { + "delete groups", + deleteCommand, + expectUse + " " + groupsServiceCommandDeleteUseSuffix, + groupsDeleteCmd().Short, + []string{flags.BackupFN}, + deleteGroupsCmd, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + cmd := &cobra.Command{Use: test.use} + + c := addGroupsCommands(cmd) + require.NotNil(t, c) + + cmds := cmd.Commands() + require.Len(t, cmds, 1) + + child := cmds[0] + assert.Equal(t, test.expectUse, child.Use) + assert.Equal(t, test.expectShort, child.Short) + tester.AreSameFunc(t, test.expectRunE, child.RunE) + }) + } +} diff --git a/src/cli/backup/teams.go b/src/cli/backup/teams.go new file mode 100644 index 000000000..fcac3394d --- /dev/null +++ b/src/cli/backup/teams.go @@ -0,0 +1,230 @@ +package backup + +import ( + "github.com/alcionai/clues" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/alcionai/corso/src/cli/flags" + . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/pkg/path" +) + +// ------------------------------------------------------------------------------------------------ +// setup and globals +// ------------------------------------------------------------------------------------------------ + +const ( + teamsServiceCommand = "teams" + teamsServiceCommandCreateUseSuffix = "--team | '" + flags.Wildcard + "'" + teamsServiceCommandDeleteUseSuffix = "--backup " + teamsServiceCommandDetailsUseSuffix = "--backup " +) + +// TODO: correct examples +const ( + teamsServiceCommandCreateExamples = `# Backup all Teams data for Alice +corso backup create teams --team alice@example.com + +# Backup only Teams contacts for Alice and Bob +corso backup create teams --team engineering,sales --data contacts + +# Backup all Teams data for all M365 users +corso backup create teams --team '*'` + + teamsServiceCommandDeleteExamples = `# Delete Teams backup with ID 1234abcd-12ab-cd34-56de-1234abcd +corso backup delete teams --backup 1234abcd-12ab-cd34-56de-1234abcd` + + teamsServiceCommandDetailsExamples = `# Explore items in Alice's latest backup (1234abcd...) +corso backup details teams --backup 1234abcd-12ab-cd34-56de-1234abcd + +# Explore calendar events occurring after start of 2022 +corso backup details teams --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --event-starts-after 2022-01-01T00:00:00` +) + +// called by backup.go to map subcommands to provider-specific handling. +func addTeamsCommands(cmd *cobra.Command) *cobra.Command { + var ( + c *cobra.Command + fs *pflag.FlagSet + ) + + switch cmd.Use { + case createCommand: + c, fs = utils.AddCommand(cmd, teamsCreateCmd(), utils.HideCommand()) + fs.SortFlags = false + + c.Use = c.Use + " " + teamsServiceCommandCreateUseSuffix + c.Example = teamsServiceCommandCreateExamples + + // Flags addition ordering should follow the order we want them to appear in help and docs: + flags.AddTeamFlag(c) + flags.AddDataFlag(c, []string{dataEmail, dataContacts, dataEvents}, false) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + flags.AddFetchParallelismFlag(c) + flags.AddFailFastFlag(c) + + case listCommand: + c, fs = utils.AddCommand(cmd, teamsListCmd(), utils.HideCommand()) + fs.SortFlags = false + + flags.AddBackupIDFlag(c, false) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + addFailedItemsFN(c) + addSkippedItemsFN(c) + addRecoveredErrorsFN(c) + + case detailsCommand: + c, fs = utils.AddCommand(cmd, teamsDetailsCmd(), utils.HideCommand()) + fs.SortFlags = false + + c.Use = c.Use + " " + teamsServiceCommandDetailsUseSuffix + c.Example = teamsServiceCommandDetailsExamples + + flags.AddSkipReduceFlag(c) + + // Flags addition ordering should follow the order we want them to appear in help and docs: + // More generic (ex: --user) and more frequently used flags take precedence. + flags.AddBackupIDFlag(c, true) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + + case deleteCommand: + c, fs = utils.AddCommand(cmd, teamsDeleteCmd(), utils.HideCommand()) + fs.SortFlags = false + + c.Use = c.Use + " " + teamsServiceCommandDeleteUseSuffix + c.Example = teamsServiceCommandDeleteExamples + + flags.AddBackupIDFlag(c, true) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + } + + return c +} + +// ------------------------------------------------------------------------------------------------ +// backup create +// ------------------------------------------------------------------------------------------------ + +// `corso backup create teams [...]` +func teamsCreateCmd() *cobra.Command { + return &cobra.Command{ + Use: teamsServiceCommand, + Short: "Backup M365 Team service data", + RunE: createTeamsCmd, + Args: cobra.NoArgs, + } +} + +// processes a teams service backup. +func createTeamsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + if err := validateTeamBackupCreateFlags(flags.TeamFV); err != nil { + return Only(ctx, err) + } + + return Only(ctx, utils.ErrNotYetImplemented) +} + +// ------------------------------------------------------------------------------------------------ +// backup list +// ------------------------------------------------------------------------------------------------ + +// `corso backup list teams [...]` +func teamsListCmd() *cobra.Command { + return &cobra.Command{ + Use: teamsServiceCommand, + Short: "List the history of M365 Teams service backups", + RunE: listTeamsCmd, + Args: cobra.NoArgs, + } +} + +// lists the history of backup operations +func listTeamsCmd(cmd *cobra.Command, args []string) error { + return genericListCommand(cmd, flags.BackupIDFV, path.TeamsService, args) +} + +// ------------------------------------------------------------------------------------------------ +// backup details +// ------------------------------------------------------------------------------------------------ + +// `corso backup details teams [...]` +func teamsDetailsCmd() *cobra.Command { + return &cobra.Command{ + Use: teamsServiceCommand, + Short: "Shows the details of a M365 Teams service backup", + RunE: detailsTeamsCmd, + Args: cobra.NoArgs, + } +} + +// processes a teams service backup. +func detailsTeamsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + return Only(ctx, utils.ErrNotYetImplemented) +} + +// ------------------------------------------------------------------------------------------------ +// backup delete +// ------------------------------------------------------------------------------------------------ + +// `corso backup delete teams [...]` +func teamsDeleteCmd() *cobra.Command { + return &cobra.Command{ + Use: teamsServiceCommand, + Short: "Delete backed-up M365 Teams service data", + RunE: deleteTeamsCmd, + Args: cobra.NoArgs, + } +} + +// deletes an teams service backup. +func deleteTeamsCmd(cmd *cobra.Command, args []string) error { + return genericDeleteCommand(cmd, path.TeamsService, flags.BackupIDFV, "Teams", args) +} + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +func validateTeamBackupCreateFlags(teams []string) error { + if len(teams) == 0 { + return clues.New( + "requires one or more --" + + flags.TeamFN + " ids, or the wildcard --" + + flags.TeamFN + " *", + ) + } + + // TODO(meain) + // for _, d := range cats { + // if d != dataLibraries { + // return clues.New( + // d + " is an unrecognized data type; only " + dataLibraries + " is supported" + // ) + // } + // } + + return nil +} diff --git a/src/cli/backup/teams_test.go b/src/cli/backup/teams_test.go new file mode 100644 index 000000000..966830f82 --- /dev/null +++ b/src/cli/backup/teams_test.go @@ -0,0 +1,98 @@ +package backup + +import ( + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/internal/tester" +) + +type TeamsUnitSuite struct { + tester.Suite +} + +func TestTeamsUnitSuite(t *testing.T) { + suite.Run(t, &TeamsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *TeamsUnitSuite) TestAddTeamsCommands() { + expectUse := teamsServiceCommand + + table := []struct { + name string + use string + expectUse string + expectShort string + flags []string + expectRunE func(*cobra.Command, []string) error + }{ + { + "create teams", + createCommand, + expectUse + " " + teamsServiceCommandCreateUseSuffix, + teamsCreateCmd().Short, + []string{ + flags.CategoryDataFN, + flags.FailFastFN, + flags.FetchParallelismFN, + flags.SkipReduceFN, + flags.NoStatsFN, + }, + createTeamsCmd, + }, + { + "list teams", + listCommand, + expectUse, + teamsListCmd().Short, + []string{ + flags.BackupFN, + flags.FailedItemsFN, + flags.SkippedItemsFN, + flags.RecoveredErrorsFN, + }, + listTeamsCmd, + }, + { + "details teams", + detailsCommand, + expectUse + " " + teamsServiceCommandDetailsUseSuffix, + teamsDetailsCmd().Short, + []string{ + flags.BackupFN, + }, + detailsTeamsCmd, + }, + { + "delete teams", + deleteCommand, + expectUse + " " + teamsServiceCommandDeleteUseSuffix, + teamsDeleteCmd().Short, + []string{flags.BackupFN}, + deleteTeamsCmd, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + cmd := &cobra.Command{Use: test.use} + + c := addTeamsCommands(cmd) + require.NotNil(t, c) + + cmds := cmd.Commands() + require.Len(t, cmds, 1) + + child := cmds[0] + assert.Equal(t, test.expectUse, child.Use) + assert.Equal(t, test.expectShort, child.Short) + tester.AreSameFunc(t, test.expectRunE, child.RunE) + }) + } +} diff --git a/src/cli/flags/groups.go b/src/cli/flags/groups.go new file mode 100644 index 000000000..8aa6792ad --- /dev/null +++ b/src/cli/flags/groups.go @@ -0,0 +1,28 @@ +package flags + +import ( + "github.com/spf13/cobra" +) + +const ( + GroupFN = "group" +) + +var GroupFV []string + +func AddGroupDetailsAndRestoreFlags(cmd *cobra.Command) { + // TODO: implement flags +} + +// AddGroupFlag adds the --group flag, which accepts id or name values. +// TODO: need to decide what the appropriate "name" to accept here is. +// keepers thinks its either DisplayName or MailNickname or Mail +// Mail is most accurate, MailNickame is accurate and shorter, but the end user +// may not see either one visibly. +// https://learn.microsoft.com/en-us/graph/api/group-list?view=graph-rest-1.0&tabs=http +func AddGroupFlag(cmd *cobra.Command) { + cmd.Flags().StringSliceVar( + &GroupFV, + GroupFN, nil, + "Backup data by group; accepts '"+Wildcard+"' to select all groups.") +} diff --git a/src/cli/flags/teams.go b/src/cli/flags/teams.go new file mode 100644 index 000000000..a3ca73e62 --- /dev/null +++ b/src/cli/flags/teams.go @@ -0,0 +1,28 @@ +package flags + +import ( + "github.com/spf13/cobra" +) + +const ( + TeamFN = "team" +) + +var TeamFV []string + +func AddTeamDetailsAndRestoreFlags(cmd *cobra.Command) { + // TODO: implement flags +} + +// AddTeamFlag adds the --team flag, which accepts id or name values. +// TODO: need to decide what the appropriate "name" to accept here is. +// keepers thinks its either DisplayName or MailNickname or Mail +// Mail is most accurate, MailNickame is accurate and shorter, but the end user +// may not see either one visibly. +// https://learn.microsoft.com/en-us/graph/api/team-list?view=graph-rest-1.0&tabs=http +func AddTeamFlag(cmd *cobra.Command) { + cmd.Flags().StringSliceVar( + &TeamFV, + TeamFN, nil, + "Backup data by team; accepts '"+Wildcard+"' to select all teams.") +} diff --git a/src/cli/restore/groups.go b/src/cli/restore/groups.go new file mode 100644 index 000000000..a98c9d088 --- /dev/null +++ b/src/cli/restore/groups.go @@ -0,0 +1,81 @@ +package restore + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/alcionai/corso/src/cli/flags" + . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" +) + +// called by restore.go to map subcommands to provider-specific handling. +func addGroupsCommands(cmd *cobra.Command) *cobra.Command { + var ( + c *cobra.Command + fs *pflag.FlagSet + ) + + switch cmd.Use { + case restoreCommand: + c, fs = utils.AddCommand(cmd, groupsRestoreCmd(), utils.HideCommand()) + + c.Use = c.Use + " " + groupsServiceCommandUseSuffix + + // Flags addition ordering should follow the order we want them to appear in help and docs: + // More generic (ex: --user) and more frequently used flags take precedence. + fs.SortFlags = false + + flags.AddBackupIDFlag(c, true) + flags.AddRestorePermissionsFlag(c) + flags.AddRestoreConfigFlags(c) + flags.AddFailFastFlag(c) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + } + + return c +} + +// TODO: correct examples +const ( + groupsServiceCommand = "groups" + groupsServiceCommandUseSuffix = "--backup " + + groupsServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef in Bob's last backup (1234abcd...) +corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef + +# Restore the file with ID 98765abcdef along with its associated permissions +corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --restore-permissions + +# Restore files named "FY2021 Planning.xlsx" in "Documents/Finance Reports" +corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" + +# Restore all files and folders in folder "Documents/Finance Reports" that were created before 2020 +corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd + --folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00` +) + +// `corso restore groups [...]` +func groupsRestoreCmd() *cobra.Command { + return &cobra.Command{ + Use: groupsServiceCommand, + Short: "Restore M365 Groups service data", + RunE: restoreGroupsCmd, + Args: cobra.NoArgs, + Example: groupsServiceCommandRestoreExamples, + } +} + +// processes an groups service restore. +func restoreGroupsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + return Only(ctx, utils.ErrNotYetImplemented) +} diff --git a/src/cli/restore/groups_test.go b/src/cli/restore/groups_test.go new file mode 100644 index 000000000..4ea7a7d19 --- /dev/null +++ b/src/cli/restore/groups_test.go @@ -0,0 +1,108 @@ +package restore + +import ( + "bytes" + "testing" + + "github.com/alcionai/clues" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/cli/utils/testdata" + "github.com/alcionai/corso/src/internal/tester" +) + +type GroupsUnitSuite struct { + tester.Suite +} + +func TestGroupsUnitSuite(t *testing.T) { + suite.Run(t, &GroupsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *GroupsUnitSuite) TestAddGroupsCommands() { + expectUse := groupsServiceCommand + " " + groupsServiceCommandUseSuffix + + table := []struct { + name string + use string + expectUse string + expectShort string + expectRunE func(*cobra.Command, []string) error + }{ + {"restore groups", restoreCommand, expectUse, groupsRestoreCmd().Short, restoreGroupsCmd}, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + cmd := &cobra.Command{Use: test.use} + + // normally a persistent flag from the root. + // required to ensure a dry run. + flags.AddRunModeFlag(cmd, true) + + c := addGroupsCommands(cmd) + require.NotNil(t, c) + + cmds := cmd.Commands() + require.Len(t, cmds, 1) + + child := cmds[0] + assert.Equal(t, test.expectUse, child.Use) + assert.Equal(t, test.expectShort, child.Short) + tester.AreSameFunc(t, test.expectRunE, child.RunE) + + cmd.SetArgs([]string{ + "groups", + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, testdata.BackupInput, + + "--" + flags.CollisionsFN, testdata.Collisions, + "--" + flags.DestinationFN, testdata.Destination, + "--" + flags.ToResourceFN, testdata.ToResource, + + "--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID, + "--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey, + "--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken, + + "--" + flags.AzureClientIDFN, testdata.AzureClientID, + "--" + flags.AzureClientTenantFN, testdata.AzureTenantID, + "--" + flags.AzureClientSecretFN, testdata.AzureClientSecret, + + "--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase, + + // bool flags + "--" + flags.RestorePermissionsFN, + }) + + cmd.SetOut(new(bytes.Buffer)) // drop output + cmd.SetErr(new(bytes.Buffer)) // drop output + err := cmd.Execute() + // assert.NoError(t, err, clues.ToCore(err)) + assert.ErrorIs(t, err, utils.ErrNotYetImplemented, clues.ToCore(err)) + + opts := utils.MakeGroupsOpts(cmd) + assert.Equal(t, testdata.BackupInput, flags.BackupIDFV) + + assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions) + assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination) + assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource) + + assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV) + assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV) + assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV) + + assert.Equal(t, testdata.AzureClientID, flags.AzureClientIDFV) + assert.Equal(t, testdata.AzureTenantID, flags.AzureClientTenantFV) + assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV) + + assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV) + assert.True(t, flags.RestorePermissionsFV) + }) + } +} diff --git a/src/cli/restore/teams.go b/src/cli/restore/teams.go new file mode 100644 index 000000000..59623024a --- /dev/null +++ b/src/cli/restore/teams.go @@ -0,0 +1,81 @@ +package restore + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/alcionai/corso/src/cli/flags" + . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" +) + +// called by restore.go to map subcommands to provider-specific handling. +func addTeamsCommands(cmd *cobra.Command) *cobra.Command { + var ( + c *cobra.Command + fs *pflag.FlagSet + ) + + switch cmd.Use { + case restoreCommand: + c, fs = utils.AddCommand(cmd, teamsRestoreCmd(), utils.HideCommand()) + + c.Use = c.Use + " " + teamsServiceCommandUseSuffix + + // Flags addition ordering should follow the order we want them to appear in help and docs: + // More generic (ex: --user) and more frequently used flags take precedence. + fs.SortFlags = false + + flags.AddBackupIDFlag(c, true) + flags.AddRestorePermissionsFlag(c) + flags.AddRestoreConfigFlags(c) + flags.AddFailFastFlag(c) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + flags.AddAzureCredsFlags(c) + } + + return c +} + +// TODO: correct examples +const ( + teamsServiceCommand = "teams" + teamsServiceCommandUseSuffix = "--backup " + + teamsServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef in Bob's last backup (1234abcd...) +corso restore teams --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef + +# Restore the file with ID 98765abcdef along with its associated permissions +corso restore teams --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --restore-permissions + +# Restore files named "FY2021 Planning.xlsx" in "Documents/Finance Reports" +corso restore teams --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" + +# Restore all files and folders in folder "Documents/Finance Reports" that were created before 2020 +corso restore teams --backup 1234abcd-12ab-cd34-56de-1234abcd + --folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00` +) + +// `corso restore teams [...]` +func teamsRestoreCmd() *cobra.Command { + return &cobra.Command{ + Use: teamsServiceCommand, + Short: "Restore M365 Teams service data", + RunE: restoreTeamsCmd, + Args: cobra.NoArgs, + Example: teamsServiceCommandRestoreExamples, + } +} + +// processes an teams service restore. +func restoreTeamsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + return Only(ctx, utils.ErrNotYetImplemented) +} diff --git a/src/cli/restore/teams_test.go b/src/cli/restore/teams_test.go new file mode 100644 index 000000000..ac502e950 --- /dev/null +++ b/src/cli/restore/teams_test.go @@ -0,0 +1,108 @@ +package restore + +import ( + "bytes" + "testing" + + "github.com/alcionai/clues" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/cli/utils/testdata" + "github.com/alcionai/corso/src/internal/tester" +) + +type TeamsUnitSuite struct { + tester.Suite +} + +func TestTeamsUnitSuite(t *testing.T) { + suite.Run(t, &TeamsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *TeamsUnitSuite) TestAddTeamsCommands() { + expectUse := teamsServiceCommand + " " + teamsServiceCommandUseSuffix + + table := []struct { + name string + use string + expectUse string + expectShort string + expectRunE func(*cobra.Command, []string) error + }{ + {"restore teams", restoreCommand, expectUse, teamsRestoreCmd().Short, restoreTeamsCmd}, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + cmd := &cobra.Command{Use: test.use} + + // normally a persistent flag from the root. + // required to ensure a dry run. + flags.AddRunModeFlag(cmd, true) + + c := addTeamsCommands(cmd) + require.NotNil(t, c) + + cmds := cmd.Commands() + require.Len(t, cmds, 1) + + child := cmds[0] + assert.Equal(t, test.expectUse, child.Use) + assert.Equal(t, test.expectShort, child.Short) + tester.AreSameFunc(t, test.expectRunE, child.RunE) + + cmd.SetArgs([]string{ + "teams", + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, testdata.BackupInput, + + "--" + flags.CollisionsFN, testdata.Collisions, + "--" + flags.DestinationFN, testdata.Destination, + "--" + flags.ToResourceFN, testdata.ToResource, + + "--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID, + "--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey, + "--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken, + + "--" + flags.AzureClientIDFN, testdata.AzureClientID, + "--" + flags.AzureClientTenantFN, testdata.AzureTenantID, + "--" + flags.AzureClientSecretFN, testdata.AzureClientSecret, + + "--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase, + + // bool flags + "--" + flags.RestorePermissionsFN, + }) + + cmd.SetOut(new(bytes.Buffer)) // drop output + cmd.SetErr(new(bytes.Buffer)) // drop output + err := cmd.Execute() + // assert.NoError(t, err, clues.ToCore(err)) + assert.ErrorIs(t, err, utils.ErrNotYetImplemented, clues.ToCore(err)) + + opts := utils.MakeTeamsOpts(cmd) + assert.Equal(t, testdata.BackupInput, flags.BackupIDFV) + + assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions) + assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination) + assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource) + + assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV) + assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV) + assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV) + + assert.Equal(t, testdata.AzureClientID, flags.AzureClientIDFV) + assert.Equal(t, testdata.AzureTenantID, flags.AzureClientTenantFV) + assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV) + + assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV) + assert.True(t, flags.RestorePermissionsFV) + }) + } +} diff --git a/src/cli/utils/groups.go b/src/cli/utils/groups.go new file mode 100644 index 000000000..9b0827d46 --- /dev/null +++ b/src/cli/utils/groups.go @@ -0,0 +1,30 @@ +package utils + +import ( + "github.com/spf13/cobra" + + "github.com/alcionai/corso/src/cli/flags" +) + +type GroupsOpts struct { + Groups []string + + RestoreCfg RestoreCfgOpts + ExportCfg ExportCfgOpts + + Populated flags.PopulatedFlags +} + +func MakeGroupsOpts(cmd *cobra.Command) GroupsOpts { + return GroupsOpts{ + Groups: flags.UserFV, + + RestoreCfg: makeRestoreCfgOpts(cmd), + ExportCfg: makeExportCfgOpts(cmd), + + // populated contains the list of flags that appear in the + // command, according to pflags. Use this to differentiate + // between an "empty" and a "missing" value. + Populated: flags.GetPopulatedFlags(cmd), + } +} diff --git a/src/cli/utils/teams.go b/src/cli/utils/teams.go new file mode 100644 index 000000000..365e7971e --- /dev/null +++ b/src/cli/utils/teams.go @@ -0,0 +1,30 @@ +package utils + +import ( + "github.com/spf13/cobra" + + "github.com/alcionai/corso/src/cli/flags" +) + +type TeamsOpts struct { + Teams []string + + RestoreCfg RestoreCfgOpts + ExportCfg ExportCfgOpts + + Populated flags.PopulatedFlags +} + +func MakeTeamsOpts(cmd *cobra.Command) TeamsOpts { + return TeamsOpts{ + Teams: flags.UserFV, + + RestoreCfg: makeRestoreCfgOpts(cmd), + ExportCfg: makeExportCfgOpts(cmd), + + // populated contains the list of flags that appear in the + // command, according to pflags. Use this to differentiate + // between an "empty" and a "missing" value. + Populated: flags.GetPopulatedFlags(cmd), + } +} diff --git a/src/cli/utils/utils.go b/src/cli/utils/utils.go index a542d55f3..5a639474a 100644 --- a/src/cli/utils/utils.go +++ b/src/cli/utils/utils.go @@ -19,6 +19,8 @@ import ( "github.com/alcionai/corso/src/pkg/storage" ) +var ErrNotYetImplemented = clues.New("not yet implemented") + func GetAccountAndConnect( ctx context.Context, pst path.ServiceType, diff --git a/src/pkg/path/service_type.go b/src/pkg/path/service_type.go index 0028bca4b..343117857 100644 --- a/src/pkg/path/service_type.go +++ b/src/pkg/path/service_type.go @@ -31,6 +31,8 @@ const ( SharePointMetadataService // sharepointMetadata GroupsService // groups GroupsMetadataService // groupsMetadata + TeamsService // teams + TeamsMetadataService // teamsMetadata ) func toServiceType(service string) ServiceType { From 9abd9d4f96312a2dc5314d60a740ee6670ebb282 Mon Sep 17 00:00:00 2001 From: Keepers Date: Fri, 18 Aug 2023 16:45:21 -0600 Subject: [PATCH 04/32] remove all uses of iota (#4046) I've needed to catch gotchas that arise from contributors adding a value in the middle of an iota list, not to mention have dealt with prior bugs that happened the same way, now too many times to feel safe about its usage. This PR removes the use of iota from all const declarations. The intent is to not allow the use of iota within the codebase. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :broom: Tech Debt/Cleanup #### Issue(s) * #3993 #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/data/implementations.go | 8 ++--- .../m365/collection/drive/collections.go | 6 ++-- .../collection/drive/metadata/permissions.go | 4 +-- src/internal/m365/collection/site/backup.go | 6 ++-- .../m365/collection/site/backup_test.go | 4 +++ .../m365/collection/site/collection.go | 26 +++++++++------- .../m365/collection/site/collection_test.go | 11 ++++--- .../collection/site/datacategory_string.go | 27 ----------------- .../m365/service/sharepoint/backup.go | 2 ++ src/internal/m365/support/status.go | 8 ++--- src/internal/model/model.go | 14 ++++----- src/internal/operations/operation.go | 10 +++---- src/pkg/account/account.go | 4 +-- src/pkg/backup/details/iteminfo.go | 15 +++++----- src/pkg/control/repository/repo.go | 12 +++----- src/pkg/path/category_type.go | 18 +++++------ src/pkg/path/service_type.go | 30 ++++++++++++------- src/pkg/path/servicetype_string.go | 6 ++-- src/pkg/selectors/selectors.go | 10 +++---- src/pkg/storage/storage.go | 4 +-- 20 files changed, 110 insertions(+), 115 deletions(-) delete mode 100644 src/internal/m365/collection/site/datacategory_string.go diff --git a/src/internal/data/implementations.go b/src/internal/data/implementations.go index 15b7dffb3..d75bd93b6 100644 --- a/src/internal/data/implementations.go +++ b/src/internal/data/implementations.go @@ -13,10 +13,10 @@ var ErrNotFound = clues.New("not found") type CollectionState int const ( - NewState = CollectionState(iota) - NotMovedState - MovedState - DeletedState + NewState CollectionState = 0 + NotMovedState CollectionState = 1 + MovedState CollectionState = 2 + DeletedState CollectionState = 3 ) type FetchRestoreCollection struct { diff --git a/src/internal/m365/collection/drive/collections.go b/src/internal/m365/collection/drive/collections.go index a2161f779..6964774b8 100644 --- a/src/internal/m365/collection/drive/collections.go +++ b/src/internal/m365/collection/drive/collections.go @@ -31,13 +31,13 @@ type collectionScope int const ( // CollectionScopeUnknown is used when we don't know and don't need // to know the kind, like in the case of deletes - CollectionScopeUnknown collectionScope = iota + CollectionScopeUnknown collectionScope = 0 // CollectionScopeFolder is used for regular folder collections - CollectionScopeFolder + CollectionScopeFolder collectionScope = 1 // CollectionScopePackage is used to represent OneNote items - CollectionScopePackage + CollectionScopePackage collectionScope = 2 ) const restrictedDirectory = "Site Pages" diff --git a/src/internal/m365/collection/drive/metadata/permissions.go b/src/internal/m365/collection/drive/metadata/permissions.go index ec0cc22f0..53f549110 100644 --- a/src/internal/m365/collection/drive/metadata/permissions.go +++ b/src/internal/m365/collection/drive/metadata/permissions.go @@ -14,8 +14,8 @@ import ( type SharingMode int const ( - SharingModeCustom = SharingMode(iota) - SharingModeInherited + SharingModeCustom SharingMode = 0 + SharingModeInherited SharingMode = 1 ) type GV2Type string diff --git a/src/internal/m365/collection/site/backup.go b/src/internal/m365/collection/site/backup.go index 14f1333be..8357d9512 100644 --- a/src/internal/m365/collection/site/backup.go +++ b/src/internal/m365/collection/site/backup.go @@ -59,6 +59,7 @@ func CollectPages( bpc inject.BackupProducerConfig, creds account.M365Config, ac api.Client, + scope selectors.SharePointScope, su support.StatusUpdater, errs *fault.Bus, ) ([]data.BackupCollection, error) { @@ -105,7 +106,7 @@ func CollectPages( collection := NewCollection( dir, ac, - Pages, + scope, su, bpc.Options) collection.SetBetaService(betaService) @@ -122,6 +123,7 @@ func CollectLists( bpc inject.BackupProducerConfig, ac api.Client, tenantID string, + scope selectors.SharePointScope, su support.StatusUpdater, errs *fault.Bus, ) ([]data.BackupCollection, error) { @@ -156,7 +158,7 @@ func CollectLists( collection := NewCollection( dir, ac, - List, + scope, su, bpc.Options) collection.AddJob(tuple.ID) diff --git a/src/internal/m365/collection/site/backup_test.go b/src/internal/m365/collection/site/backup_test.go index de0d91c50..46dff1a97 100644 --- a/src/internal/m365/collection/site/backup_test.go +++ b/src/internal/m365/collection/site/backup_test.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -61,11 +62,14 @@ func (suite *SharePointPagesSuite) TestCollectPages() { ProtectedResource: mock.NewProvider(siteID, siteID), } + sel := selectors.NewSharePointBackup([]string{siteID}) + col, err := CollectPages( ctx, bpc, creds, ac, + sel.Lists(selectors.Any())[0], (&MockGraphService{}).UpdateStatus, fault.New(true)) assert.NoError(t, err, clues.ToCore(err)) diff --git a/src/internal/m365/collection/site/collection.go b/src/internal/m365/collection/site/collection.go index a293e40a0..a6196a4ed 100644 --- a/src/internal/m365/collection/site/collection.go +++ b/src/internal/m365/collection/site/collection.go @@ -21,19 +21,23 @@ import ( "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" ) type DataCategory int +// channel sizes +const ( + collectionChannelBufferSize = 50 + fetchChannelSize = 5 +) + //go:generate stringer -type=DataCategory const ( - collectionChannelBufferSize = 50 - fetchChannelSize = 5 - Unknown DataCategory = iota - List - Drive - Pages + Unknown DataCategory = 0 + List DataCategory = 1 + Pages DataCategory = 2 ) var ( @@ -53,7 +57,7 @@ type Collection struct { // jobs contain the SharePoint.Site.ListIDs for the associated list(s). jobs []string // M365 IDs of the items of this collection - category DataCategory + category path.CategoryType client api.Sites ctrl control.Options betaService *betaAPI.BetaService @@ -64,7 +68,7 @@ type Collection struct { func NewCollection( folderPath path.Path, ac api.Client, - category DataCategory, + scope selectors.SharePointScope, statusUpdater support.StatusUpdater, ctrlOpts control.Options, ) *Collection { @@ -74,7 +78,7 @@ func NewCollection( data: make(chan data.Item, collectionChannelBufferSize), client: ac.Sites(), statusUpdater: statusUpdater, - category: category, + category: scope.Category().PathType(), ctrl: ctrlOpts, } @@ -198,9 +202,9 @@ func (sc *Collection) runPopulate( // Switch retrieval function based on category switch sc.category { - case List: + case path.ListsCategory: metrics, err = sc.retrieveLists(ctx, writer, colProgress, errs) - case Pages: + case path.PagesCategory: metrics, err = sc.retrievePages(ctx, sc.client, writer, colProgress, errs) } diff --git a/src/internal/m365/collection/site/collection_test.go b/src/internal/m365/collection/site/collection_test.go index f3f19c7e4..390d5cd14 100644 --- a/src/internal/m365/collection/site/collection_test.go +++ b/src/internal/m365/collection/site/collection_test.go @@ -23,6 +23,7 @@ import ( "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -82,16 +83,18 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { dirRoot = "directory" ) + sel := selectors.NewSharePointBackup([]string{"site"}) + tables := []struct { name, itemName string - category DataCategory + scope selectors.SharePointScope getDir func(t *testing.T) path.Path getItem func(t *testing.T, itemName string) *Item }{ { name: "List", itemName: "MockListing", - category: List, + scope: sel.Lists(selectors.Any())[0], getDir: func(t *testing.T) path.Path { dir, err := path.Build( tenant, @@ -127,7 +130,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { { name: "Pages", itemName: "MockPages", - category: Pages, + scope: sel.Pages(selectors.Any())[0], getDir: func(t *testing.T) path.Path { dir, err := path.Build( tenant, @@ -166,7 +169,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { col := NewCollection( test.getDir(t), suite.ac, - test.category, + test.scope, nil, control.DefaultOptions()) col.data <- test.getItem(t, test.itemName) diff --git a/src/internal/m365/collection/site/datacategory_string.go b/src/internal/m365/collection/site/datacategory_string.go deleted file mode 100644 index eac0006cc..000000000 --- a/src/internal/m365/collection/site/datacategory_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type=DataCategory"; DO NOT EDIT. - -package site - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Unknown-2] - _ = x[List-3] - _ = x[Drive-4] - _ = x[Pages-5] -} - -const _DataCategory_name = "UnknownListDrivePages" - -var _DataCategory_index = [...]uint8{0, 7, 11, 16, 21} - -func (i DataCategory) String() string { - i -= 2 - if i < 0 || i >= DataCategory(len(_DataCategory_index)-1) { - return "DataCategory(" + strconv.FormatInt(int64(i+2), 10) + ")" - } - return _DataCategory_name[_DataCategory_index[i]:_DataCategory_index[i+1]] -} diff --git a/src/internal/m365/service/sharepoint/backup.go b/src/internal/m365/service/sharepoint/backup.go index 479d4ac24..c4604e609 100644 --- a/src/internal/m365/service/sharepoint/backup.go +++ b/src/internal/m365/service/sharepoint/backup.go @@ -63,6 +63,7 @@ func ProduceBackupCollections( bpc, ac, creds.AzureTenantID, + scope, su, errs) if err != nil { @@ -95,6 +96,7 @@ func ProduceBackupCollections( bpc, creds, ac, + scope, su, errs) if err != nil { diff --git a/src/internal/m365/support/status.go b/src/internal/m365/support/status.go index b1a7d2449..5e85857eb 100644 --- a/src/internal/m365/support/status.go +++ b/src/internal/m365/support/status.go @@ -37,10 +37,10 @@ type Operation int //go:generate stringer -type=Operation const ( - OpUnknown Operation = iota - Backup - Restore - Export + OpUnknown Operation = 0 + Backup Operation = 1 + Restore Operation = 2 + Export Operation = 3 ) // Constructor for ConnectorOperationStatus. If the counts do not agree, an error is returned. diff --git a/src/internal/model/model.go b/src/internal/model/model.go index dcf0dce51..a3f25c820 100644 --- a/src/internal/model/model.go +++ b/src/internal/model/model.go @@ -22,12 +22,12 @@ func (id StableID) String() string { // //go:generate go run golang.org/x/tools/cmd/stringer -type=Schema const ( - UnknownSchema = Schema(iota) - BackupOpSchema - RestoreOpSchema - BackupSchema - BackupDetailsSchema - RepositorySchema + UnknownSchema Schema = 0 + BackupOpSchema Schema = 1 + RestoreOpSchema Schema = 2 + BackupSchema Schema = 3 + BackupDetailsSchema Schema = 4 + RepositorySchema Schema = 5 ) // common tags for filtering @@ -38,7 +38,7 @@ const ( MergeBackup = "merge-backup" ) -// Valid returns true if the ModelType value fits within the iota range. +// Valid returns true if the ModelType value fits within the const range. func (mt Schema) Valid() bool { return mt > 0 && mt < RepositorySchema+1 } diff --git a/src/internal/operations/operation.go b/src/internal/operations/operation.go index 35bf9fb19..c400e52cd 100644 --- a/src/internal/operations/operation.go +++ b/src/internal/operations/operation.go @@ -33,11 +33,11 @@ type OpStatus int //go:generate stringer -type=OpStatus -linecomment const ( - Unknown OpStatus = iota // Status Unknown - InProgress // In Progress - Completed // Completed - Failed // Failed - NoData // No Data + Unknown OpStatus = 0 // Status Unknown + InProgress OpStatus = 1 // In Progress + Completed OpStatus = 2 // Completed + Failed OpStatus = 3 // Failed + NoData OpStatus = 4 // No Data ) // -------------------------------------------------------------------------------- diff --git a/src/pkg/account/account.go b/src/pkg/account/account.go index 12b8d679c..4c1591818 100644 --- a/src/pkg/account/account.go +++ b/src/pkg/account/account.go @@ -10,8 +10,8 @@ type accountProvider int //go:generate stringer -type=accountProvider -linecomment const ( - ProviderUnknown accountProvider = iota // Unknown Provider - ProviderM365 // M365 + ProviderUnknown accountProvider = 0 // Unknown Provider + ProviderM365 accountProvider = 1 // M365 ) // storage parsing errors diff --git a/src/pkg/backup/details/iteminfo.go b/src/pkg/backup/details/iteminfo.go index 9912fb6d2..fbd6a92cd 100644 --- a/src/pkg/backup/details/iteminfo.go +++ b/src/pkg/backup/details/iteminfo.go @@ -20,16 +20,17 @@ type ItemType int // Additionally, any itemType directly assigned a number should not be altered. // This applies to OneDriveItem and FolderItem const ( - UnknownType ItemType = iota // 0, global unknown value + UnknownType ItemType = 0 // Exchange (00x) - ExchangeContact - ExchangeEvent - ExchangeMail + ExchangeContact ItemType = 1 + ExchangeEvent ItemType = 2 + ExchangeMail ItemType = 3 + // SharePoint (10x) - SharePointLibrary ItemType = iota + 97 // 100 - SharePointList // 101... - SharePointPage + SharePointLibrary ItemType = 101 + SharePointList ItemType = 102 + SharePointPage ItemType = 103 // OneDrive (20x) OneDriveItem ItemType = 205 diff --git a/src/pkg/control/repository/repo.go b/src/pkg/control/repository/repo.go index 0d80a1fda..6d1869f91 100644 --- a/src/pkg/control/repository/repo.go +++ b/src/pkg/control/repository/repo.go @@ -25,12 +25,10 @@ type Maintenance struct { type MaintenanceType int -// Can't be reordered as we rely on iota for numbering. -// //go:generate stringer -type=MaintenanceType -linecomment const ( - CompleteMaintenance MaintenanceType = iota // complete - MetadataMaintenance // metadata + CompleteMaintenance MaintenanceType = 0 // complete + MetadataMaintenance MaintenanceType = 1 // metadata ) var StringToMaintenanceType = map[string]MaintenanceType{ @@ -40,16 +38,14 @@ var StringToMaintenanceType = map[string]MaintenanceType{ type MaintenanceSafety int -// Can't be reordered as we rely on iota for numbering. -// //go:generate stringer -type=MaintenanceSafety -linecomment const ( - FullMaintenanceSafety MaintenanceSafety = iota + FullMaintenanceSafety MaintenanceSafety = 0 //nolint:lll // Use only if there's no other kopia instances accessing the repo and the // storage backend is strongly consistent. // https://github.com/kopia/kopia/blob/f9de453efc198b6e993af8922f953a7e5322dc5f/repo/maintenance/maintenance_safety.go#L42 - NoMaintenanceSafety + NoMaintenanceSafety MaintenanceSafety = 1 ) type RetentionMode int diff --git a/src/pkg/path/category_type.go b/src/pkg/path/category_type.go index 4a992176f..5f8009e5d 100644 --- a/src/pkg/path/category_type.go +++ b/src/pkg/path/category_type.go @@ -17,15 +17,15 @@ type CategoryType int //go:generate stringer -type=CategoryType -linecomment const ( - UnknownCategory CategoryType = iota - EmailCategory // email - ContactsCategory // contacts - EventsCategory // events - FilesCategory // files - ListsCategory // lists - LibrariesCategory // libraries - PagesCategory // pages - DetailsCategory // details + UnknownCategory CategoryType = 0 + EmailCategory CategoryType = 1 // email + ContactsCategory CategoryType = 2 // contacts + EventsCategory CategoryType = 3 // events + FilesCategory CategoryType = 4 // files + ListsCategory CategoryType = 5 // lists + LibrariesCategory CategoryType = 6 // libraries + PagesCategory CategoryType = 7 // pages + DetailsCategory CategoryType = 8 // details ) func ToCategoryType(category string) CategoryType { diff --git a/src/pkg/path/service_type.go b/src/pkg/path/service_type.go index 343117857..a4a99ec6c 100644 --- a/src/pkg/path/service_type.go +++ b/src/pkg/path/service_type.go @@ -22,17 +22,17 @@ type ServiceType int //go:generate stringer -type=ServiceType -linecomment const ( - UnknownService ServiceType = iota - ExchangeService // exchange - OneDriveService // onedrive - SharePointService // sharepoint - ExchangeMetadataService // exchangeMetadata - OneDriveMetadataService // onedriveMetadata - SharePointMetadataService // sharepointMetadata - GroupsService // groups - GroupsMetadataService // groupsMetadata - TeamsService // teams - TeamsMetadataService // teamsMetadata + UnknownService ServiceType = 0 + ExchangeService ServiceType = 1 // exchange + OneDriveService ServiceType = 2 // onedrive + SharePointService ServiceType = 3 // sharepoint + ExchangeMetadataService ServiceType = 4 // exchangeMetadata + OneDriveMetadataService ServiceType = 5 // onedriveMetadata + SharePointMetadataService ServiceType = 6 // sharepointMetadata + GroupsService ServiceType = 7 // groups + GroupsMetadataService ServiceType = 8 // groupsMetadata + TeamsService ServiceType = 9 // teams + TeamsMetadataService ServiceType = 10 // teamsMetadata ) func toServiceType(service string) ServiceType { @@ -45,12 +45,20 @@ func toServiceType(service string) ServiceType { return OneDriveService case strings.ToLower(SharePointService.String()): return SharePointService + case strings.ToLower(GroupsService.String()): + return GroupsService + case strings.ToLower(TeamsService.String()): + return TeamsService case strings.ToLower(ExchangeMetadataService.String()): return ExchangeMetadataService case strings.ToLower(OneDriveMetadataService.String()): return OneDriveMetadataService case strings.ToLower(SharePointMetadataService.String()): return SharePointMetadataService + case strings.ToLower(GroupsMetadataService.String()): + return GroupsMetadataService + case strings.ToLower(TeamsMetadataService.String()): + return TeamsMetadataService default: return UnknownService } diff --git a/src/pkg/path/servicetype_string.go b/src/pkg/path/servicetype_string.go index 6fa499364..4b9ab16ec 100644 --- a/src/pkg/path/servicetype_string.go +++ b/src/pkg/path/servicetype_string.go @@ -17,11 +17,13 @@ func _() { _ = x[SharePointMetadataService-6] _ = x[GroupsService-7] _ = x[GroupsMetadataService-8] + _ = x[TeamsService-9] + _ = x[TeamsMetadataService-10] } -const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadata" +const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadatateamsteamsMetadata" -var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110} +var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110, 115, 128} func (i ServiceType) String() string { if i < 0 || i >= ServiceType(len(_ServiceType_index)-1) { diff --git a/src/pkg/selectors/selectors.go b/src/pkg/selectors/selectors.go index ac85f75c3..3a18c2bd0 100644 --- a/src/pkg/selectors/selectors.go +++ b/src/pkg/selectors/selectors.go @@ -20,11 +20,11 @@ type service int //go:generate stringer -type=service -linecomment const ( - ServiceUnknown service = iota // Unknown Service - ServiceExchange // Exchange - ServiceOneDrive // OneDrive - ServiceSharePoint // SharePoint - ServiceGroups // Groups + ServiceUnknown service = 0 // Unknown Service + ServiceExchange service = 1 // Exchange + ServiceOneDrive service = 2 // OneDrive + ServiceSharePoint service = 3 // SharePoint + ServiceGroups service = 4 // Groups ) var serviceToPathType = map[service]path.ServiceType{ diff --git a/src/pkg/storage/storage.go b/src/pkg/storage/storage.go index 673503587..e197f4081 100644 --- a/src/pkg/storage/storage.go +++ b/src/pkg/storage/storage.go @@ -12,8 +12,8 @@ type storageProvider int //go:generate stringer -type=storageProvider -linecomment const ( - ProviderUnknown storageProvider = iota // Unknown Provider - ProviderS3 // S3 + ProviderUnknown storageProvider = 0 // Unknown Provider + ProviderS3 storageProvider = 1 // S3 ) // storage parsing errors From 2ba349797f50bf1afdb24843e4395d31ce21a186 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 05:54:57 +0000 Subject: [PATCH 05/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20sass=20from?= =?UTF-8?q?=201.65.1=20to=201.66.1=20in=20/website=20(#4072)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [sass](https://github.com/sass/dart-sass) from 1.65.1 to 1.66.1.
Release notes

Sourced from sass's releases.

Dart Sass 1.66.1

To install Sass 1.66.1, download one of the packages below and add it to your PATH, or see the Sass website for full installation instructions.

Changes

JS API

  • Fix a bug where Sass compilation could crash in strict mode if passed a callback that threw a string, boolean, number, symbol, or bignum.

See the full changelog for changes in earlier releases.

Dart Sass 1.66.0

To install Sass 1.66.0, download one of the packages below and add it to your PATH, or see the Sass website for full installation instructions.

Changes

  • Breaking change: Drop support for the additional CSS calculations defined in CSS Values and Units 4. Custom Sass functions whose names overlapped with these new CSS functions were being parsed as CSS calculations instead, causing an unintentional breaking change outside our normal [compatibility policy] for CSS compatibility changes.

    Support will be added again in a future version, but only after Sass has emitted a deprecation warning for all functions that will break for at least three months prior to the breakage.

See the full changelog for changes in earlier releases.

Changelog

Sourced from sass's changelog.

1.66.1

JS API

  • Fix a bug where Sass compilation could crash in strict mode if passed a callback that threw a string, boolean, number, symbol, or bignum.

1.66.0

  • Breaking change: Drop support for the additional CSS calculations defined in CSS Values and Units 4. Custom Sass functions whose names overlapped with these new CSS functions were being parsed as CSS calculations instead, causing an unintentional breaking change outside our normal [compatibility policy] for CSS compatibility changes.

    Support will be added again in a future version, but only after Sass has emitted a deprecation warning for all functions that will break for at least three months prior to the breakage.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sass&package-manager=npm_and_yarn&previous-version=1.65.1&new-version=1.66.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- website/package-lock.json | 14 +++++++------- website/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index ef8a2cc4f..581e44381 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -24,7 +24,7 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.65.1", + "sass": "^1.66.1", "tiny-slider": "^2.9.4", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" @@ -12639,9 +12639,9 @@ "license": "MIT" }, "node_modules/sass": { - "version": "1.65.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.65.1.tgz", - "integrity": "sha512-9DINwtHmA41SEd36eVPQ9BJKpn7eKDQmUHmpI0y5Zv2Rcorrh0zS+cFrt050hdNbmmCNKTW3hV5mWfuegNRsEA==", + "version": "1.66.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.66.1.tgz", + "integrity": "sha512-50c+zTsZOJVgFfTgwwEzkjA3/QACgdNsKueWPyAR0mRINIvLAStVQBbPg14iuqEQ74NPDbXzJARJ/O4SI1zftA==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -23932,9 +23932,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sass": { - "version": "1.65.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.65.1.tgz", - "integrity": "sha512-9DINwtHmA41SEd36eVPQ9BJKpn7eKDQmUHmpI0y5Zv2Rcorrh0zS+cFrt050hdNbmmCNKTW3hV5mWfuegNRsEA==", + "version": "1.66.1", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.66.1.tgz", + "integrity": "sha512-50c+zTsZOJVgFfTgwwEzkjA3/QACgdNsKueWPyAR0mRINIvLAStVQBbPg14iuqEQ74NPDbXzJARJ/O4SI1zftA==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", diff --git a/website/package.json b/website/package.json index 5cbecd8c2..7528e4759 100644 --- a/website/package.json +++ b/website/package.json @@ -30,7 +30,7 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.65.1", + "sass": "^1.66.1", "tiny-slider": "^2.9.4", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" From b37ee8aced0066a4cc1bd5d71c9068250f64f2e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 06:51:17 +0000 Subject: [PATCH 06/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.326=20to=201.44.327=20in=20/src=20(#?= =?UTF-8?q?4073)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.326 to 1.44.327.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.327 (2023-08-18)

Service Client Updates

  • service/codecommit: Updates service API, documentation, and paginators
    • Add new ListFileCommitHistory operation to retrieve commits which introduced changes to a specific file.
  • service/securityhub: Updates service API and documentation

SDK Bugs

  • aws/credentials/ssocreds: Modify sso token provider logic to handle possible nil val returned by CreateToken.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.326&new-version=1.44.327)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 7dc1f418a..af8b05608 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.326 + github.com/aws/aws-sdk-go v1.44.327 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.0 diff --git a/src/go.sum b/src/go.sum index fd1a66ad1..4122060fb 100644 --- a/src/go.sum +++ b/src/go.sum @@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.44.326 h1:/6xD/9mKZ2RMTDfbhh9qCxw+CaTbJRvfHJ/NHPFbI38= -github.com/aws/aws-sdk-go v1.44.326/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY= +github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= From 13e0d82735464ffb3e6fd9b4f45e74f87d2ac7e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 16:22:17 +0000 Subject: [PATCH 07/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/mi?= =?UTF-8?q?crosoftgraph/msgraph-sdk-go=20from=201.14.0=20to=201.15.0=20in?= =?UTF-8?q?=20/src=20(#4075)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/microsoftgraph/msgraph-sdk-go](https://github.com/microsoftgraph/msgraph-sdk-go) from 1.14.0 to 1.15.0.
Changelog

Sourced from github.com/microsoftgraph/msgraph-sdk-go's changelog.

[1.15.0]- 2023-08-21

Changed

  • Weekly generation.
Commits
  • 4b0d8c6 Generated models and request builders (#554)
  • 344b8dd Merge pull request #553 from microsoftgraph/dependabot/go_modules/github.com/...
  • e3bb680 Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.7.0 to 1.7.1
  • 82bbc80 Merge pull request #549 from microsoftgraph/dependabot/go_modules/github.com/...
  • 1f12cf9 Bump github.com/microsoft/kiota-abstractions-go from 1.1.0 to 1.2.0
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/microsoftgraph/msgraph-sdk-go&package-manager=go_modules&previous-version=1.14.0&new-version=1.15.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 3 ++- src/go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index af8b05608..00231cdfc 100644 --- a/src/go.mod +++ b/src/go.mod @@ -19,7 +19,7 @@ require ( github.com/microsoft/kiota-http-go v1.1.0 github.com/microsoft/kiota-serialization-form-go v1.0.0 github.com/microsoft/kiota-serialization-json-go v1.0.4 - github.com/microsoftgraph/msgraph-sdk-go v1.14.0 + github.com/microsoftgraph/msgraph-sdk-go v1.15.0 github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/pkg/errors v0.9.1 github.com/puzpuzpuz/xsync/v2 v2.4.1 @@ -49,6 +49,7 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/microsoft/kiota-serialization-multipart-go v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect github.com/spf13/afero v1.9.5 // indirect diff --git a/src/go.sum b/src/go.sum index 4122060fb..f5263b371 100644 --- a/src/go.sum +++ b/src/go.sum @@ -281,10 +281,12 @@ github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjb github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA= github.com/microsoft/kiota-serialization-json-go v1.0.4 h1:5TaISWwd2Me8clrK7SqNATo0tv9seOq59y4I5953egQ= github.com/microsoft/kiota-serialization-json-go v1.0.4/go.mod h1:rM4+FsAY+9AEpBsBzkFFis+b/LZLlNKKewuLwK9Q6Mg= +github.com/microsoft/kiota-serialization-multipart-go v1.0.0 h1:3O5sb5Zj+moLBiJympbXNaeV07K0d46IfuEd5v9+pBs= +github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so= github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= -github.com/microsoftgraph/msgraph-sdk-go v1.14.0 h1:YdhMvzu8bXcfIQGRur6NkXnv4cPOsMBJ44XjfWLOt9Y= -github.com/microsoftgraph/msgraph-sdk-go v1.14.0/go.mod h1:ccLv84FJFtwdSzYWM/HlTes5FLzkzzBsYh9kg93/WS8= +github.com/microsoftgraph/msgraph-sdk-go v1.15.0 h1:cdz6Bs0T0Hl/NTdUAZq8TRJwidTmX741X2SnVIsn5l4= +github.com/microsoftgraph/msgraph-sdk-go v1.15.0/go.mod h1:YfKdWdUwQWuS6E+Qg6+SZnHxJ/kvG2nYQutwzGa5NZs= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= From 734b1021c90940aab2e14553d03924380cad4c24 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Mon, 21 Aug 2023 09:58:48 -0700 Subject: [PATCH 08/32] Fix log output and don't fail fast (#4076) #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [x] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- .github/workflows/nightly_test.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/nightly_test.yml b/.github/workflows/nightly_test.yml index f6338a4c1..a676a5bac 100644 --- a/.github/workflows/nightly_test.yml +++ b/.github/workflows/nightly_test.yml @@ -92,7 +92,7 @@ jobs: CORSO_M365_TEST_USER_ID: ${{ vars.CORSO_M365_TEST_USER_ID }} CORSO_SECONDARY_M365_TEST_USER_ID: ${{ vars.CORSO_SECONDARY_M365_TEST_USER_ID }} CORSO_PASSPHRASE: ${{ secrets.INTEGRATION_TEST_CORSO_PASSPHRASE }} - CORSO_LOG_FILE: ${{ github.workspace }}/testlog/run-nightly.log + CORSO_LOG_FILE: ${{ github.workspace }}/src/testlog/run-nightly.log LOG_GRAPH_REQUESTS: true S3_BUCKET: ${{ secrets.CI_TESTS_S3_BUCKET }} run: | @@ -101,7 +101,6 @@ jobs: -tags testing \ -json \ -v \ - -failfast \ -p 1 \ -timeout 1h \ ./... 2>&1 | tee ./testlog/gotest-nightly.log | gotestfmt -hide successful-tests From 1468c0881aa1d963e540b67705ab5ac420315493 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 18:14:57 +0000 Subject: [PATCH 09/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/pu?= =?UTF-8?q?zpuzpuz/xsync/v2=20from=202.4.1=20to=202.5.0=20in=20/src=20(#40?= =?UTF-8?q?77)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/puzpuzpuz/xsync/v2](https://github.com/puzpuzpuz/xsync) from 2.4.1 to 2.5.0.
Release notes

Sourced from github.com/puzpuzpuz/xsync/v2's releases.

v2.5.0

  • Add concurrent queue with generics support (MPMCQueueOf) (#104)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/puzpuzpuz/xsync/v2&package-manager=go_modules&previous-version=2.4.1&new-version=2.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 00231cdfc..e34158eb9 100644 --- a/src/go.mod +++ b/src/go.mod @@ -22,7 +22,7 @@ require ( github.com/microsoftgraph/msgraph-sdk-go v1.15.0 github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/pkg/errors v0.9.1 - github.com/puzpuzpuz/xsync/v2 v2.4.1 + github.com/puzpuzpuz/xsync/v2 v2.5.0 github.com/rudderlabs/analytics-go v3.3.3+incompatible github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 github.com/spf13/cobra v1.7.0 diff --git a/src/go.sum b/src/go.sum index f5263b371..aadbdc6f3 100644 --- a/src/go.sum +++ b/src/go.sum @@ -344,8 +344,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/puzpuzpuz/xsync/v2 v2.4.1 h1:aGdE1C/HaR/QC6YAFdtZXi60Df8/qBIrs8PKrzkItcM= -github.com/puzpuzpuz/xsync/v2 v2.4.1/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= +github.com/puzpuzpuz/xsync/v2 v2.5.0 h1:2k4qrO/orvmEXZ3hmtHqIy9XaQtPTwzMZk1+iErpE8c= +github.com/puzpuzpuz/xsync/v2 v2.5.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= From 0a875907699b82428aeb4f281f851c1282370fc6 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 21 Aug 2023 12:49:04 -0600 Subject: [PATCH 10/32] Teams groups export cli (#4069) #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3989 #### Test Plan - [x] :muscle: Manual - [x] :zap: Unit test --- src/cli/backup/groups.go | 8 +-- src/cli/backup/teams.go | 8 +-- src/cli/export/export.go | 2 + src/cli/export/groups.go | 84 +++++++++++++++++++++++++++++++ src/cli/export/groups_test.go | 94 +++++++++++++++++++++++++++++++++++ src/cli/export/onedrive.go | 4 +- src/cli/export/sharepoint.go | 4 +- src/cli/export/teams.go | 84 +++++++++++++++++++++++++++++++ src/cli/export/teams_test.go | 94 +++++++++++++++++++++++++++++++++++ src/cli/restore/groups.go | 2 +- src/cli/restore/teams.go | 2 +- 11 files changed, 372 insertions(+), 14 deletions(-) create mode 100644 src/cli/export/groups.go create mode 100644 src/cli/export/groups_test.go create mode 100644 src/cli/export/teams.go create mode 100644 src/cli/export/teams_test.go diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go index 3f1f83eb7..1dc490ae7 100644 --- a/src/cli/backup/groups.go +++ b/src/cli/backup/groups.go @@ -53,7 +53,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command { switch cmd.Use { case createCommand: - c, fs = utils.AddCommand(cmd, groupsCreateCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, groupsCreateCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false c.Use = c.Use + " " + groupsServiceCommandCreateUseSuffix @@ -69,7 +69,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command { flags.AddFailFastFlag(c) case listCommand: - c, fs = utils.AddCommand(cmd, groupsListCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, groupsListCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false flags.AddBackupIDFlag(c, false) @@ -81,7 +81,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command { addRecoveredErrorsFN(c) case detailsCommand: - c, fs = utils.AddCommand(cmd, groupsDetailsCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, groupsDetailsCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false c.Use = c.Use + " " + groupsServiceCommandDetailsUseSuffix @@ -97,7 +97,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command { flags.AddAzureCredsFlags(c) case deleteCommand: - c, fs = utils.AddCommand(cmd, groupsDeleteCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, groupsDeleteCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false c.Use = c.Use + " " + groupsServiceCommandDeleteUseSuffix diff --git a/src/cli/backup/teams.go b/src/cli/backup/teams.go index fcac3394d..97e314cfd 100644 --- a/src/cli/backup/teams.go +++ b/src/cli/backup/teams.go @@ -53,7 +53,7 @@ func addTeamsCommands(cmd *cobra.Command) *cobra.Command { switch cmd.Use { case createCommand: - c, fs = utils.AddCommand(cmd, teamsCreateCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, teamsCreateCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false c.Use = c.Use + " " + teamsServiceCommandCreateUseSuffix @@ -69,7 +69,7 @@ func addTeamsCommands(cmd *cobra.Command) *cobra.Command { flags.AddFailFastFlag(c) case listCommand: - c, fs = utils.AddCommand(cmd, teamsListCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, teamsListCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false flags.AddBackupIDFlag(c, false) @@ -81,7 +81,7 @@ func addTeamsCommands(cmd *cobra.Command) *cobra.Command { addRecoveredErrorsFN(c) case detailsCommand: - c, fs = utils.AddCommand(cmd, teamsDetailsCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, teamsDetailsCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false c.Use = c.Use + " " + teamsServiceCommandDetailsUseSuffix @@ -97,7 +97,7 @@ func addTeamsCommands(cmd *cobra.Command) *cobra.Command { flags.AddAzureCredsFlags(c) case deleteCommand: - c, fs = utils.AddCommand(cmd, teamsDeleteCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, teamsDeleteCmd(), utils.MarkPreReleaseCommand()) fs.SortFlags = false c.Use = c.Use + " " + teamsServiceCommandDeleteUseSuffix diff --git a/src/cli/export/export.go b/src/cli/export/export.go index e0deed014..5f63895c0 100644 --- a/src/cli/export/export.go +++ b/src/cli/export/export.go @@ -21,6 +21,8 @@ import ( var exportCommands = []func(cmd *cobra.Command) *cobra.Command{ addOneDriveCommands, addSharePointCommands, + addGroupsCommands, + addTeamsCommands, } // AddCommands attaches all `corso export * *` commands to the parent. diff --git a/src/cli/export/groups.go b/src/cli/export/groups.go new file mode 100644 index 000000000..36b56e60f --- /dev/null +++ b/src/cli/export/groups.go @@ -0,0 +1,84 @@ +package export + +import ( + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/alcionai/corso/src/cli/flags" + . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" +) + +// called by export.go to map subcommands to provider-specific handling. +func addGroupsCommands(cmd *cobra.Command) *cobra.Command { + var ( + c *cobra.Command + fs *pflag.FlagSet + ) + + switch cmd.Use { + case exportCommand: + c, fs = utils.AddCommand(cmd, groupsExportCmd(), utils.MarkPreReleaseCommand()) + + c.Use = c.Use + " " + groupsServiceCommandUseSuffix + + // Flags addition ordering should follow the order we want them to appear in help and docs: + // More generic (ex: --user) and more frequently used flags take precedence. + fs.SortFlags = false + + flags.AddBackupIDFlag(c, true) + flags.AddExportConfigFlags(c) + flags.AddFailFastFlag(c) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + } + + return c +} + +// TODO: correct examples +const ( + groupsServiceCommand = "groups" + groupsServiceCommandUseSuffix = " --backup " + + //nolint:lll + groupsServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's last backup (1234abcd...) to my-exports directory +corso export groups my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef + +# Export files named "FY2021 Planning.xlsx" in "Documents/Finance Reports" to current directory +corso export groups . --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" + +# Export all files and folders in folder "Documents/Finance Reports" that were created before 2020 to my-exports +corso export groups my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd + --folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00` +) + +// `corso export groups [...] ` +func groupsExportCmd() *cobra.Command { + return &cobra.Command{ + Use: groupsServiceCommand, + Short: "Export M365 Groups service data", + RunE: exportGroupsCmd, + Args: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("missing export destination") + } + + return nil + }, + Example: groupsServiceCommandExportExamples, + } +} + +// processes an groups service export. +func exportGroupsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + return Only(ctx, utils.ErrNotYetImplemented) +} diff --git a/src/cli/export/groups_test.go b/src/cli/export/groups_test.go new file mode 100644 index 000000000..d2a091e79 --- /dev/null +++ b/src/cli/export/groups_test.go @@ -0,0 +1,94 @@ +package export + +import ( + "bytes" + "testing" + + "github.com/alcionai/clues" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/cli/utils/testdata" + "github.com/alcionai/corso/src/internal/tester" +) + +type GroupsUnitSuite struct { + tester.Suite +} + +func TestGroupsUnitSuite(t *testing.T) { + suite.Run(t, &GroupsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *GroupsUnitSuite) TestAddGroupsCommands() { + expectUse := groupsServiceCommand + " " + groupsServiceCommandUseSuffix + + table := []struct { + name string + use string + expectUse string + expectShort string + expectRunE func(*cobra.Command, []string) error + }{ + {"export groups", exportCommand, expectUse, groupsExportCmd().Short, exportGroupsCmd}, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + cmd := &cobra.Command{Use: test.use} + + // normally a persistent flag from the root. + // required to ensure a dry run. + flags.AddRunModeFlag(cmd, true) + + c := addGroupsCommands(cmd) + require.NotNil(t, c) + + cmds := cmd.Commands() + require.Len(t, cmds, 1) + + child := cmds[0] + assert.Equal(t, test.expectUse, child.Use) + assert.Equal(t, test.expectShort, child.Short) + tester.AreSameFunc(t, test.expectRunE, child.RunE) + + cmd.SetArgs([]string{ + "groups", + testdata.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, testdata.BackupInput, + + "--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID, + "--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey, + "--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken, + + "--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase, + + // bool flags + "--" + flags.ArchiveFN, + }) + + cmd.SetOut(new(bytes.Buffer)) // drop output + cmd.SetErr(new(bytes.Buffer)) // drop output + err := cmd.Execute() + // assert.NoError(t, err, clues.ToCore(err)) + assert.ErrorIs(t, err, utils.ErrNotYetImplemented, clues.ToCore(err)) + + opts := utils.MakeGroupsOpts(cmd) + assert.Equal(t, testdata.BackupInput, flags.BackupIDFV) + + assert.Equal(t, testdata.Archive, opts.ExportCfg.Archive) + + assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV) + assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV) + assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV) + + assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV) + }) + } +} diff --git a/src/cli/export/onedrive.go b/src/cli/export/onedrive.go index 593149bd9..ea6537dc2 100644 --- a/src/cli/export/onedrive.go +++ b/src/cli/export/onedrive.go @@ -39,7 +39,7 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command { const ( oneDriveServiceCommand = "onedrive" - oneDriveServiceCommandUseSuffix = "--backup " + oneDriveServiceCommandUseSuffix = " --backup " //nolint:lll oneDriveServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's last backup (1234abcd...) to my-exports directory @@ -62,7 +62,7 @@ func oneDriveExportCmd() *cobra.Command { RunE: exportOneDriveCmd, Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { - return errors.New("missing restore destination") + return errors.New("missing export destination") } return nil diff --git a/src/cli/export/sharepoint.go b/src/cli/export/sharepoint.go index ec71a5f2b..7293a02f9 100644 --- a/src/cli/export/sharepoint.go +++ b/src/cli/export/sharepoint.go @@ -39,7 +39,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command { const ( sharePointServiceCommand = "sharepoint" - sharePointServiceCommandUseSuffix = "--backup " + sharePointServiceCommandUseSuffix = " --backup " //nolint:lll sharePointServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's latest backup (1234abcd...) to my-exports directory @@ -66,7 +66,7 @@ func sharePointExportCmd() *cobra.Command { RunE: exportSharePointCmd, Args: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { - return errors.New("missing restore destination") + return errors.New("missing export destination") } return nil diff --git a/src/cli/export/teams.go b/src/cli/export/teams.go new file mode 100644 index 000000000..7e680c28d --- /dev/null +++ b/src/cli/export/teams.go @@ -0,0 +1,84 @@ +package export + +import ( + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/alcionai/corso/src/cli/flags" + . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/utils" +) + +// called by export.go to map subcommands to provider-specific handling. +func addTeamsCommands(cmd *cobra.Command) *cobra.Command { + var ( + c *cobra.Command + fs *pflag.FlagSet + ) + + switch cmd.Use { + case exportCommand: + c, fs = utils.AddCommand(cmd, teamsExportCmd(), utils.MarkPreReleaseCommand()) + + c.Use = c.Use + " " + teamsServiceCommandUseSuffix + + // Flags addition ordering should follow the order we want them to appear in help and docs: + // More generic (ex: --user) and more frequently used flags take precedence. + fs.SortFlags = false + + flags.AddBackupIDFlag(c, true) + flags.AddExportConfigFlags(c) + flags.AddFailFastFlag(c) + flags.AddCorsoPassphaseFlags(c) + flags.AddAWSCredsFlags(c) + } + + return c +} + +// TODO: correct examples +const ( + teamsServiceCommand = "teams" + teamsServiceCommandUseSuffix = " --backup " + + //nolint:lll + teamsServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's last backup (1234abcd...) to my-exports directory +corso export teams my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef + +# Export files named "FY2021 Planning.xlsx" in "Documents/Finance Reports" to current directory +corso export teams . --backup 1234abcd-12ab-cd34-56de-1234abcd \ + --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" + +# Export all files and folders in folder "Documents/Finance Reports" that were created before 2020 to my-exports +corso export teams my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd + --folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00` +) + +// `corso export teams [...] ` +func teamsExportCmd() *cobra.Command { + return &cobra.Command{ + Use: teamsServiceCommand, + Short: "Export M365 Teams service data", + RunE: exportTeamsCmd, + Args: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("missing export destination") + } + + return nil + }, + Example: teamsServiceCommandExportExamples, + } +} + +// processes an teams service export. +func exportTeamsCmd(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + if utils.HasNoFlagsAndShownHelp(cmd) { + return nil + } + + return Only(ctx, utils.ErrNotYetImplemented) +} diff --git a/src/cli/export/teams_test.go b/src/cli/export/teams_test.go new file mode 100644 index 000000000..d431359d6 --- /dev/null +++ b/src/cli/export/teams_test.go @@ -0,0 +1,94 @@ +package export + +import ( + "bytes" + "testing" + + "github.com/alcionai/clues" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/cli/utils/testdata" + "github.com/alcionai/corso/src/internal/tester" +) + +type TeamsUnitSuite struct { + tester.Suite +} + +func TestTeamsUnitSuite(t *testing.T) { + suite.Run(t, &TeamsUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *TeamsUnitSuite) TestAddTeamsCommands() { + expectUse := teamsServiceCommand + " " + teamsServiceCommandUseSuffix + + table := []struct { + name string + use string + expectUse string + expectShort string + expectRunE func(*cobra.Command, []string) error + }{ + {"export teams", exportCommand, expectUse, teamsExportCmd().Short, exportTeamsCmd}, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + cmd := &cobra.Command{Use: test.use} + + // normally a persistent flag from the root. + // required to ensure a dry run. + flags.AddRunModeFlag(cmd, true) + + c := addTeamsCommands(cmd) + require.NotNil(t, c) + + cmds := cmd.Commands() + require.Len(t, cmds, 1) + + child := cmds[0] + assert.Equal(t, test.expectUse, child.Use) + assert.Equal(t, test.expectShort, child.Short) + tester.AreSameFunc(t, test.expectRunE, child.RunE) + + cmd.SetArgs([]string{ + "teams", + testdata.RestoreDestination, + "--" + flags.RunModeFN, flags.RunModeFlagTest, + "--" + flags.BackupFN, testdata.BackupInput, + + "--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID, + "--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey, + "--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken, + + "--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase, + + // bool flags + "--" + flags.ArchiveFN, + }) + + cmd.SetOut(new(bytes.Buffer)) // drop output + cmd.SetErr(new(bytes.Buffer)) // drop output + err := cmd.Execute() + // assert.NoError(t, err, clues.ToCore(err)) + assert.ErrorIs(t, err, utils.ErrNotYetImplemented, clues.ToCore(err)) + + opts := utils.MakeTeamsOpts(cmd) + assert.Equal(t, testdata.BackupInput, flags.BackupIDFV) + + assert.Equal(t, testdata.Archive, opts.ExportCfg.Archive) + + assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV) + assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV) + assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV) + + assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV) + }) + } +} diff --git a/src/cli/restore/groups.go b/src/cli/restore/groups.go index a98c9d088..3907b17d0 100644 --- a/src/cli/restore/groups.go +++ b/src/cli/restore/groups.go @@ -18,7 +18,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command { switch cmd.Use { case restoreCommand: - c, fs = utils.AddCommand(cmd, groupsRestoreCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, groupsRestoreCmd(), utils.MarkPreReleaseCommand()) c.Use = c.Use + " " + groupsServiceCommandUseSuffix diff --git a/src/cli/restore/teams.go b/src/cli/restore/teams.go index 59623024a..059c2182a 100644 --- a/src/cli/restore/teams.go +++ b/src/cli/restore/teams.go @@ -18,7 +18,7 @@ func addTeamsCommands(cmd *cobra.Command) *cobra.Command { switch cmd.Use { case restoreCommand: - c, fs = utils.AddCommand(cmd, teamsRestoreCmd(), utils.HideCommand()) + c, fs = utils.AddCommand(cmd, teamsRestoreCmd(), utils.MarkPreReleaseCommand()) c.Use = c.Use + " " + teamsServiceCommandUseSuffix From 90ac62ab140be153616723d0055c7bd8526714b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 20:06:53 +0000 Subject: [PATCH 11/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/go?= =?UTF-8?q?ogle/uuid=20from=201.3.0=20to=201.3.1=20in=20/src=20(#4078)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/google/uuid](https://github.com/google/uuid) from 1.3.0 to 1.3.1.
Release notes

Sourced from github.com/google/uuid's releases.

v1.3.1

1.3.1 (2023-08-18)

Bug Fixes

  • Use .EqualFold() to parse urn prefixed UUIDs (#118) (574e687)
Changelog

Sourced from github.com/google/uuid's changelog.

1.3.1 (2023-08-18)

Bug Fixes

  • Use .EqualFold() to parse urn prefixed UUIDs (#118) (574e687)

Changelog

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/google/uuid&package-manager=go_modules&previous-version=1.3.0&new-version=1.3.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index e34158eb9..88e1190fb 100644 --- a/src/go.mod +++ b/src/go.mod @@ -11,7 +11,7 @@ require ( github.com/aws/aws-sdk-go v1.44.327 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.3.1 github.com/h2non/gock v1.2.0 github.com/kopia/kopia v0.13.0 github.com/microsoft/kiota-abstractions-go v1.2.0 diff --git a/src/go.sum b/src/go.sum index aadbdc6f3..0b2b5786e 100644 --- a/src/go.sum +++ b/src/go.sum @@ -192,8 +192,8 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= From 6a83d23ff245040864e9a3bbd2e3a51caa02e916 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 21 Aug 2023 14:44:24 -0600 Subject: [PATCH 12/32] add groups selectors for channels and messages (#4071) #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3989 #### Test Plan - [x] :zap: Unit test --- src/pkg/path/category_type.go | 27 +- src/pkg/path/categorytype_string.go | 5 +- src/pkg/selectors/groups.go | 83 +++--- src/pkg/selectors/groups_test.go | 421 ++++++++++++++++++++++++++++ 4 files changed, 489 insertions(+), 47 deletions(-) create mode 100644 src/pkg/selectors/groups_test.go diff --git a/src/pkg/path/category_type.go b/src/pkg/path/category_type.go index 5f8009e5d..40f511692 100644 --- a/src/pkg/path/category_type.go +++ b/src/pkg/path/category_type.go @@ -17,15 +17,16 @@ type CategoryType int //go:generate stringer -type=CategoryType -linecomment const ( - UnknownCategory CategoryType = 0 - EmailCategory CategoryType = 1 // email - ContactsCategory CategoryType = 2 // contacts - EventsCategory CategoryType = 3 // events - FilesCategory CategoryType = 4 // files - ListsCategory CategoryType = 5 // lists - LibrariesCategory CategoryType = 6 // libraries - PagesCategory CategoryType = 7 // pages - DetailsCategory CategoryType = 8 // details + UnknownCategory CategoryType = 0 + EmailCategory CategoryType = 1 // email + ContactsCategory CategoryType = 2 // contacts + EventsCategory CategoryType = 3 // events + FilesCategory CategoryType = 4 // files + ListsCategory CategoryType = 5 // lists + LibrariesCategory CategoryType = 6 // libraries + PagesCategory CategoryType = 7 // pages + DetailsCategory CategoryType = 8 // details + ChannelMessagesCategory CategoryType = 9 // channel messages ) func ToCategoryType(category string) CategoryType { @@ -48,6 +49,8 @@ func ToCategoryType(category string) CategoryType { return PagesCategory case strings.ToLower(DetailsCategory.String()): return DetailsCategory + case strings.ToLower(ChannelMessagesCategory.String()): + return ChannelMessagesCategory default: return UnknownCategory } @@ -73,6 +76,12 @@ var serviceCategories = map[ServiceType]map[CategoryType]struct{}{ ListsCategory: {}, PagesCategory: {}, }, + GroupsService: { + ChannelMessagesCategory: {}, + }, + TeamsService: { + ChannelMessagesCategory: {}, + }, } func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, error) { diff --git a/src/pkg/path/categorytype_string.go b/src/pkg/path/categorytype_string.go index 626cc4e31..7b548d25a 100644 --- a/src/pkg/path/categorytype_string.go +++ b/src/pkg/path/categorytype_string.go @@ -17,11 +17,12 @@ func _() { _ = x[LibrariesCategory-6] _ = x[PagesCategory-7] _ = x[DetailsCategory-8] + _ = x[ChannelMessagesCategory-9] } -const _CategoryType_name = "UnknownCategoryemailcontactseventsfileslistslibrariespagesdetails" +const _CategoryType_name = "UnknownCategoryemailcontactseventsfileslistslibrariespagesdetailschannel messages" -var _CategoryType_index = [...]uint8{0, 15, 20, 28, 34, 39, 44, 53, 58, 65} +var _CategoryType_index = [...]uint8{0, 15, 20, 28, 34, 39, 44, 53, 58, 65, 81} func (i CategoryType) String() string { if i < 0 || i >= CategoryType(len(_CategoryType_index)-1) { diff --git a/src/pkg/selectors/groups.go b/src/pkg/selectors/groups.go index 7adf5398c..30d93698c 100644 --- a/src/pkg/selectors/groups.go +++ b/src/pkg/selectors/groups.go @@ -9,6 +9,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/identity" "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" ) @@ -214,38 +215,42 @@ func (s *groups) AllData() []GroupsScope { scopes = append( scopes, - makeScope[GroupsScope](GroupsTODOContainer, Any())) + makeScope[GroupsScope](GroupsChannel, Any())) return scopes } -// TODO produces one or more Groups TODO scopes. +// Channel produces one or more SharePoint channel scopes, where the channel +// matches upon a given channel by ID or Name. In order to ensure channel selection +// this should always be embedded within the Filter() set; include(channel()) will +// select all items in the channel without further filtering. // If any slice contains selectors.Any, that slice is reduced to [selectors.Any] // If any slice contains selectors.None, that slice is reduced to [selectors.None] -// Any empty slice defaults to [selectors.None] -func (s *groups) TODO(lists []string, opts ...option) []GroupsScope { +// If any slice is empty, it defaults to [selectors.None] +func (s *groups) Channel(channel string) []GroupsScope { + return []GroupsScope{ + makeInfoScope[GroupsScope]( + GroupsChannel, + GroupsInfoChannel, + []string{channel}, + filters.Equal), + } +} + +// ChannelMessages produces one or more Groups channel message scopes. +// If any slice contains selectors.Any, that slice is reduced to [selectors.Any] +// If any slice contains selectors.None, that slice is reduced to [selectors.None] +// If any slice is empty, it defaults to [selectors.None] +func (s *sharePoint) ChannelMessages(channels, messages []string, opts ...option) []GroupsScope { var ( scopes = []GroupsScope{} os = append([]option{pathComparator()}, opts...) ) - scopes = append(scopes, makeScope[GroupsScope](GroupsTODOContainer, lists, os...)) - - return scopes -} - -// ListTODOItemsItems produces one or more Groups TODO item scopes. -// If any slice contains selectors.Any, that slice is reduced to [selectors.Any] -// If any slice contains selectors.None, that slice is reduced to [selectors.None] -// If any slice is empty, it defaults to [selectors.None] -// options are only applied to the list scopes. -func (s *groups) TODOItems(lists, items []string, opts ...option) []GroupsScope { - scopes := []GroupsScope{} - scopes = append( scopes, - makeScope[GroupsScope](GroupsTODOItem, items, defaultItemOptions(s.Cfg)...). - set(GroupsTODOContainer, lists, opts...)) + makeScope[GroupsScope](GroupsChannelMessage, messages, os...). + set(GroupsChannel, channels, opts...)) return scopes } @@ -270,21 +275,22 @@ const ( GroupsCategoryUnknown groupsCategory = "" // types of data in Groups - GroupsGroup groupsCategory = "GroupsGroup" - GroupsTODOContainer groupsCategory = "GroupsTODOContainer" - GroupsTODOItem groupsCategory = "GroupsTODOItem" + GroupsGroup groupsCategory = "GroupsGroup" + GroupsChannel groupsCategory = "GroupsChannel" + GroupsChannelMessage groupsCategory = "GroupsChannelMessage" // details.itemInfo comparables - // library drive selection + // channel drive selection GroupsInfoSiteLibraryDrive groupsCategory = "GroupsInfoSiteLibraryDrive" + GroupsInfoChannel groupsCategory = "GroupsInfoChannel" ) // groupsLeafProperties describes common metadata of the leaf categories var groupsLeafProperties = map[categorizer]leafProperty{ - GroupsTODOItem: { // the root category must be represented, even though it isn't a leaf - pathKeys: []categorizer{GroupsTODOContainer, GroupsTODOItem}, - pathType: path.UnknownCategory, + GroupsChannelMessage: { // the root category must be represented, even though it isn't a leaf + pathKeys: []categorizer{GroupsChannel, GroupsChannelMessage}, + pathType: path.ChannelMessagesCategory, }, GroupsGroup: { // the root category must be represented, even though it isn't a leaf pathKeys: []categorizer{GroupsGroup}, @@ -303,8 +309,10 @@ func (c groupsCategory) String() string { // Ex: ServiceUser.leafCat() => ServiceUser func (c groupsCategory) leafCat() categorizer { switch c { - case GroupsTODOContainer, GroupsInfoSiteLibraryDrive: - return GroupsTODOItem + // TODO: if channels ever contain more than one type of item, + // we'll need to fix this up. + case GroupsChannel, GroupsChannelMessage, GroupsInfoSiteLibraryDrive: + return GroupsChannelMessage } return c @@ -348,12 +356,12 @@ func (c groupsCategory) pathValues( ) switch c { - case GroupsTODOContainer, GroupsTODOItem: + case GroupsChannel, GroupsChannelMessage: if ent.Groups == nil { return nil, clues.New("no Groups ItemInfo in details") } - folderCat, itemCat = GroupsTODOContainer, GroupsTODOItem + folderCat, itemCat = GroupsChannel, GroupsChannelMessage rFld = ent.Groups.ParentPath default: @@ -451,7 +459,7 @@ func (s GroupsScope) set(cat groupsCategory, v []string, opts ...option) GroupsS os := []option{} switch cat { - case GroupsTODOContainer: + case GroupsChannel: os = append(os, pathComparator()) } @@ -462,10 +470,10 @@ func (s GroupsScope) set(cat groupsCategory, v []string, opts ...option) GroupsS func (s GroupsScope) setDefaults() { switch s.Category() { case GroupsGroup: - s[GroupsTODOContainer.String()] = passAny - s[GroupsTODOItem.String()] = passAny - case GroupsTODOContainer: - s[GroupsTODOItem.String()] = passAny + s[GroupsChannel.String()] = passAny + s[GroupsChannelMessage.String()] = passAny + case GroupsChannel: + s[GroupsChannelMessage.String()] = passAny } } @@ -485,7 +493,7 @@ func (s groups) Reduce( deets, s.Selector, map[path.CategoryType]groupsCategory{ - path.UnknownCategory: GroupsTODOItem, + path.ChannelMessagesCategory: GroupsChannelMessage, }, errs) } @@ -516,6 +524,9 @@ func (s GroupsScope) matchesInfo(dii details.ItemInfo) bool { } return matchesAny(s, GroupsInfoSiteLibraryDrive, ds) + case GroupsInfoChannel: + ds := Any() + return matchesAny(s, GroupsInfoChannel, ds) } return s.Matches(infoCat, i) diff --git a/src/pkg/selectors/groups_test.go b/src/pkg/selectors/groups_test.go new file mode 100644 index 000000000..a0912a144 --- /dev/null +++ b/src/pkg/selectors/groups_test.go @@ -0,0 +1,421 @@ +package selectors + +import ( + "testing" + + "github.com/alcionai/clues" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/path" +) + +type GroupsSelectorSuite struct { + tester.Suite +} + +func TestGroupsSelectorSuite(t *testing.T) { + suite.Run(t, &GroupsSelectorSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *GroupsSelectorSuite) TestNewGroupsBackup() { + t := suite.T() + ob := NewGroupsBackup(nil) + assert.Equal(t, ob.Service, ServiceGroups) + assert.NotZero(t, ob.Scopes()) +} + +func (suite *GroupsSelectorSuite) TestToGroupsBackup() { + t := suite.T() + ob := NewGroupsBackup(nil) + s := ob.Selector + ob, err := s.ToGroupsBackup() + require.NoError(t, err, clues.ToCore(err)) + assert.Equal(t, ob.Service, ServiceGroups) + assert.NotZero(t, ob.Scopes()) +} + +func (suite *GroupsSelectorSuite) TestNewGroupsRestore() { + t := suite.T() + or := NewGroupsRestore(nil) + assert.Equal(t, or.Service, ServiceGroups) + assert.NotZero(t, or.Scopes()) +} + +func (suite *GroupsSelectorSuite) TestToGroupsRestore() { + t := suite.T() + eb := NewGroupsRestore(nil) + s := eb.Selector + or, err := s.ToGroupsRestore() + require.NoError(t, err, clues.ToCore(err)) + assert.Equal(t, or.Service, ServiceGroups) + assert.NotZero(t, or.Scopes()) +} + +// TODO(rkeepers): implement +// func (suite *GroupsSelectorSuite) TestGroupsRestore_Reduce() { +// toRR := func(cat path.CategoryType, siteID string, folders []string, item string) string { +// folderElems := make([]string, 0, len(folders)) + +// for _, f := range folders { +// folderElems = append(folderElems, f+".d") +// } + +// return stubRepoRef( +// path.GroupsService, +// cat, +// siteID, +// strings.Join(folderElems, "/"), +// item) +// } + +// var ( +// prefixElems = []string{ +// odConsts.DrivesPathDir, +// "drive!id", +// odConsts.RootPathDir, +// } +// itemElems1 = []string{"folderA", "folderB"} +// itemElems2 = []string{"folderA", "folderC"} +// itemElems3 = []string{"folderD", "folderE"} +// pairAC = "folderA/folderC" +// pairGH = "folderG/folderH" +// item = toRR( +// path.LibrariesCategory, +// "sid", +// append(slices.Clone(prefixElems), itemElems1...), +// "item") +// item2 = toRR( +// path.LibrariesCategory, +// "sid", +// append(slices.Clone(prefixElems), itemElems2...), +// "item2") +// item3 = toRR( +// path.LibrariesCategory, +// "sid", +// append(slices.Clone(prefixElems), itemElems3...), +// "item3") +// item4 = stubRepoRef(path.GroupsService, path.PagesCategory, "sid", pairGH, "item4") +// item5 = stubRepoRef(path.GroupsService, path.PagesCategory, "sid", pairGH, "item5") +// ) + +// deets := &details.Details{ +// DetailsModel: details.DetailsModel{ +// Entries: []details.Entry{ +// { +// RepoRef: item, +// ItemRef: "item", +// LocationRef: strings.Join(append([]string{odConsts.RootPathDir}, itemElems1...), "/"), +// ItemInfo: details.ItemInfo{ +// Groups: &details.GroupsInfo{ +// ItemType: details.GroupsLibrary, +// ItemName: "itemName", +// ParentPath: strings.Join(itemElems1, "/"), +// }, +// }, +// }, +// { +// RepoRef: item2, +// LocationRef: strings.Join(append([]string{odConsts.RootPathDir}, itemElems2...), "/"), +// // ItemRef intentionally blank to test fallback case +// ItemInfo: details.ItemInfo{ +// Groups: &details.GroupsInfo{ +// ItemType: details.GroupsLibrary, +// ItemName: "itemName2", +// ParentPath: strings.Join(itemElems2, "/"), +// }, +// }, +// }, +// { +// RepoRef: item3, +// ItemRef: "item3", +// LocationRef: strings.Join(append([]string{odConsts.RootPathDir}, itemElems3...), "/"), +// ItemInfo: details.ItemInfo{ +// Groups: &details.GroupsInfo{ +// ItemType: details.GroupsLibrary, +// ItemName: "itemName3", +// ParentPath: strings.Join(itemElems3, "/"), +// }, +// }, +// }, +// { +// RepoRef: item4, +// LocationRef: pairGH, +// ItemRef: "item4", +// ItemInfo: details.ItemInfo{ +// Groups: &details.GroupsInfo{ +// ItemType: details.GroupsPage, +// ItemName: "itemName4", +// ParentPath: pairGH, +// }, +// }, +// }, +// { +// RepoRef: item5, +// LocationRef: pairGH, +// // ItemRef intentionally blank to test fallback case +// ItemInfo: details.ItemInfo{ +// Groups: &details.GroupsInfo{ +// ItemType: details.GroupsPage, +// ItemName: "itemName5", +// ParentPath: pairGH, +// }, +// }, +// }, +// }, +// }, +// } + +// arr := func(s ...string) []string { +// return s +// } + +// table := []struct { +// name string +// makeSelector func() *GroupsRestore +// expect []string +// cfg Config +// }{ +// { +// name: "all", +// makeSelector: func() *GroupsRestore { +// odr := NewGroupsRestore(Any()) +// odr.Include(odr.AllData()) +// return odr +// }, +// expect: arr(item, item2, item3, item4, item5), +// }, +// { +// name: "only match item", +// makeSelector: func() *GroupsRestore { +// odr := NewGroupsRestore(Any()) +// odr.Include(odr.LibraryItems(Any(), []string{"item2"})) +// return odr +// }, +// expect: arr(item2), +// }, +// { +// name: "id doesn't match name", +// makeSelector: func() *GroupsRestore { +// odr := NewGroupsRestore(Any()) +// odr.Include(odr.LibraryItems(Any(), []string{"item2"})) +// return odr +// }, +// expect: []string{}, +// cfg: Config{OnlyMatchItemNames: true}, +// }, +// { +// name: "only match item name", +// makeSelector: func() *GroupsRestore { +// odr := NewGroupsRestore(Any()) +// odr.Include(odr.LibraryItems(Any(), []string{"itemName2"})) +// return odr +// }, +// expect: arr(item2), +// cfg: Config{OnlyMatchItemNames: true}, +// }, +// { +// name: "name doesn't match", +// makeSelector: func() *GroupsRestore { +// odr := NewGroupsRestore(Any()) +// odr.Include(odr.LibraryItems(Any(), []string{"itemName2"})) +// return odr +// }, +// expect: []string{}, +// }, +// { +// name: "only match folder", +// makeSelector: func() *GroupsRestore { +// odr := NewGroupsRestore([]string{"sid"}) +// odr.Include(odr.LibraryFolders([]string{"folderA/folderB", pairAC})) +// return odr +// }, +// expect: arr(item, item2), +// }, +// { +// name: "pages match folder", +// makeSelector: func() *GroupsRestore { +// odr := NewGroupsRestore([]string{"sid"}) +// odr.Include(odr.Pages([]string{pairGH, pairAC})) +// return odr +// }, +// expect: arr(item4, item5), +// }, +// } +// for _, test := range table { +// suite.Run(test.name, func() { +// t := suite.T() + +// ctx, flush := tester.NewContext(t) +// defer flush() + +// sel := test.makeSelector() +// sel.Configure(test.cfg) +// results := sel.Reduce(ctx, deets, fault.New(true)) +// paths := results.Paths() +// assert.Equal(t, test.expect, paths) +// }) +// } +// } + +func (suite *GroupsSelectorSuite) TestGroupsCategory_PathValues() { + var ( + itemName = "item" + itemID = "item-id" + shortRef = "short" + elems = []string{itemID} + ) + + table := []struct { + name string + sc groupsCategory + pathElems []string + locRef string + parentPath string + expected map[categorizer][]string + cfg Config + }{ + { + name: "Groups Channel Messages", + sc: GroupsChannelMessage, + pathElems: elems, + locRef: "", + expected: map[categorizer][]string{ + GroupsChannel: {""}, + GroupsChannelMessage: {itemID, shortRef}, + }, + cfg: Config{}, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + itemPath, err := path.Build( + "tenant", + "site", + path.GroupsService, + test.sc.PathType(), + true, + test.pathElems...) + require.NoError(t, err, clues.ToCore(err)) + + ent := details.Entry{ + RepoRef: itemPath.String(), + ShortRef: shortRef, + ItemRef: itemPath.Item(), + LocationRef: test.locRef, + ItemInfo: details.ItemInfo{ + Groups: &details.GroupsInfo{ + ItemName: itemName, + ParentPath: test.parentPath, + }, + }, + } + + pv, err := test.sc.pathValues(itemPath, ent, test.cfg) + require.NoError(t, err) + assert.Equal(t, test.expected, pv) + }) + } +} + +// TODO(abin): implement +// func (suite *GroupsSelectorSuite) TestGroupsScope_MatchesInfo() { +// var ( +// sel = NewGroupsRestore(Any()) +// host = "www.website.com" +// pth = "/foo" +// url = host + pth +// epoch = time.Time{} +// now = time.Now() +// modification = now.Add(15 * time.Minute) +// future = now.Add(45 * time.Minute) +// ) + +// table := []struct { +// name string +// infoURL string +// scope []GroupsScope +// expect assert.BoolAssertionFunc +// }{ +// {"host match", host, sel.WebURL([]string{host}), assert.True}, +// {"url match", url, sel.WebURL([]string{url}), assert.True}, +// {"host suffixes host", host, sel.WebURL([]string{host}, SuffixMatch()), assert.True}, +// {"url does not suffix host", url, sel.WebURL([]string{host}, SuffixMatch()), assert.False}, +// {"url has path suffix", url, sel.WebURL([]string{pth}, SuffixMatch()), assert.True}, +// {"host does not contain substring", host, sel.WebURL([]string{"website"}), assert.False}, +// {"url does not suffix substring", url, sel.WebURL([]string{"oo"}, SuffixMatch()), assert.False}, +// {"host mismatch", host, sel.WebURL([]string{"www.google.com"}), assert.False}, +// {"file create after the epoch", host, sel.CreatedAfter(dttm.Format(epoch)), assert.True}, +// {"file create after now", host, sel.CreatedAfter(dttm.Format(now)), assert.False}, +// {"file create after later", url, sel.CreatedAfter(dttm.Format(future)), assert.False}, +// {"file create before future", host, sel.CreatedBefore(dttm.Format(future)), assert.True}, +// {"file create before now", host, sel.CreatedBefore(dttm.Format(now)), assert.False}, +// {"file create before modification", host, sel.CreatedBefore(dttm.Format(modification)), assert.True}, +// {"file create before epoch", host, sel.CreatedBefore(dttm.Format(now)), assert.False}, +// {"file modified after the epoch", host, sel.ModifiedAfter(dttm.Format(epoch)), assert.True}, +// {"file modified after now", host, sel.ModifiedAfter(dttm.Format(now)), assert.True}, +// {"file modified after later", host, sel.ModifiedAfter(dttm.Format(future)), assert.False}, +// {"file modified before future", host, sel.ModifiedBefore(dttm.Format(future)), assert.True}, +// {"file modified before now", host, sel.ModifiedBefore(dttm.Format(now)), assert.False}, +// {"file modified before epoch", host, sel.ModifiedBefore(dttm.Format(now)), assert.False}, +// {"in library", host, sel.Library("included-library"), assert.True}, +// {"not in library", host, sel.Library("not-included-library"), assert.False}, +// {"library id", host, sel.Library("1234"), assert.True}, +// {"not library id", host, sel.Library("abcd"), assert.False}, +// } +// for _, test := range table { +// suite.Run(test.name, func() { +// t := suite.T() + +// itemInfo := details.ItemInfo{ +// Groups: &details.GroupsInfo{ +// ItemType: details.GroupsPage, +// WebURL: test.infoURL, +// Created: now, +// Modified: modification, +// DriveName: "included-library", +// DriveID: "1234", +// }, +// } + +// scopes := setScopesToDefault(test.scope) +// for _, scope := range scopes { +// test.expect(t, scope.matchesInfo(itemInfo)) +// } +// }) +// } +// } + +func (suite *GroupsSelectorSuite) TestCategory_PathType() { + table := []struct { + cat groupsCategory + pathType path.CategoryType + }{ + { + cat: GroupsCategoryUnknown, + pathType: path.UnknownCategory, + }, + { + cat: GroupsChannel, + pathType: path.ChannelMessagesCategory, + }, + { + cat: GroupsChannelMessage, + pathType: path.ChannelMessagesCategory, + }, + } + for _, test := range table { + suite.Run(test.cat.String(), func() { + assert.Equal( + suite.T(), + test.pathType.String(), + test.cat.PathType().String()) + }) + } +} From 6963f63f4ff268e572505e072dbe361e444e9bd7 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Mon, 21 Aug 2023 15:40:47 -0700 Subject: [PATCH 13/32] Basic code for backup cleanup (#4051) Starting code for removing item data snapshots, backups, and backup details that have been orphaned. Data can become orphaned through either incomplete backup delete operations (older corso versions) or because backups didn't complete successfully This code doesn't cover all cases (see TODOs in PR) but gets a lot of the boiler-plate that will be required. Future PRs will build on what's in here to close the gaps This code is not wired into any corso operations so it cannot be run outside of unit tests --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #3217 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/base_finder.go | 8 - src/internal/kopia/cleanup_backups.go | 156 ++++++++ src/internal/kopia/cleanup_backups_test.go | 433 +++++++++++++++++++++ src/internal/kopia/conn.go | 23 +- 4 files changed, 609 insertions(+), 11 deletions(-) create mode 100644 src/internal/kopia/cleanup_backups.go create mode 100644 src/internal/kopia/cleanup_backups_test.go diff --git a/src/internal/kopia/base_finder.go b/src/internal/kopia/base_finder.go index 83f4009c4..81082ded6 100644 --- a/src/internal/kopia/base_finder.go +++ b/src/internal/kopia/base_finder.go @@ -115,14 +115,6 @@ func (me ManifestEntry) GetTag(key string) (string, bool) { return v, ok } -type snapshotManager interface { - FindManifests( - ctx context.Context, - tags map[string]string, - ) ([]*manifest.EntryMetadata, error) - LoadSnapshot(ctx context.Context, id manifest.ID) (*snapshot.Manifest, error) -} - func serviceCatString(s path.ServiceType, c path.CategoryType) string { return s.String() + c.String() } diff --git a/src/internal/kopia/cleanup_backups.go b/src/internal/kopia/cleanup_backups.go new file mode 100644 index 000000000..b431b7a91 --- /dev/null +++ b/src/internal/kopia/cleanup_backups.go @@ -0,0 +1,156 @@ +package kopia + +import ( + "context" + "errors" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/repo/manifest" + "github.com/kopia/kopia/snapshot" + "golang.org/x/exp/maps" + + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/store" +) + +func cleanupOrphanedData( + ctx context.Context, + bs store.Storer, + mf manifestFinder, +) error { + // Get all snapshot manifests. + snaps, err := mf.FindManifests( + ctx, + map[string]string{ + manifest.TypeLabelKey: snapshot.ManifestType, + }) + if err != nil { + return clues.Wrap(err, "getting snapshots") + } + + var ( + // deets is a hash set of the ModelStoreID or snapshot IDs for backup + // details. It contains the IDs for both legacy details stored in the model + // store and newer details stored as a snapshot because it doesn't matter + // what the storage format is. We only need to know the ID so we can: + // 1. check if there's a corresponding backup for them + // 2. delete the details if they're orphaned + deets = map[manifest.ID]struct{}{} + // dataSnaps is a hash set of the snapshot IDs for item data snapshots. + dataSnaps = map[manifest.ID]struct{}{} + ) + + // TODO(ashmrtn): Exclude all snapshots and details younger than X . + // Doing so adds some buffer so that even if this is run concurrently with a + // backup it's not likely to delete models just being created. For example, + // running this when another corso instance has created an item data snapshot + // but hasn't yet created the details snapshot or the backup model would + // result in this instance of corso marking the newly created item data + // snapshot for deletion because it appears orphaned. + // + // Excluding only snapshots and details models works for now since the backup + // model is the last thing persisted out of them. If we switch the order of + // persistence then this will need updated as well. + // + // The buffer duration should be longer than the time it would take to do + // details merging and backup model creation. We don't have hard numbers on + // that, but it should be faster than creating the snapshot itself and + // probably happens O(minutes) or O(hours) instead of O(days). Of course, that + // assumes a non-adversarial setup where things such as machine hiberation, + // process freezing (i.e. paused at the OS level), etc. don't occur. + + // Sort all the snapshots as either details snapshots or item data snapshots. + for _, snap := range snaps { + k, _ := makeTagKV(TagBackupCategory) + if _, ok := snap.Labels[k]; ok { + dataSnaps[snap.ID] = struct{}{} + continue + } + + deets[snap.ID] = struct{}{} + } + + // Get all legacy backup details models. The initial version of backup delete + // didn't seem to delete them so they may also be orphaned if the repo is old + // enough. + deetsModels, err := bs.GetIDsForType(ctx, model.BackupDetailsSchema, nil) + if err != nil { + return clues.Wrap(err, "getting legacy backup details") + } + + for _, d := range deetsModels { + deets[d.ModelStoreID] = struct{}{} + } + + // Get all backup models. + bups, err := bs.GetIDsForType(ctx, model.BackupSchema, nil) + if err != nil { + return clues.Wrap(err, "getting all backup models") + } + + toDelete := maps.Clone(deets) + maps.Copy(toDelete, dataSnaps) + + for _, bup := range bups { + toDelete[manifest.ID(bup.ModelStoreID)] = struct{}{} + + bm := backup.Backup{} + + if err := bs.GetWithModelStoreID( + ctx, + model.BackupSchema, + bup.ModelStoreID, + &bm, + ); err != nil { + if !errors.Is(err, data.ErrNotFound) { + return clues.Wrap(err, "getting backup model"). + With("search_backup_id", bup.ID) + } + + // TODO(ashmrtn): This actually needs revised, see above TODO. Leaving it + // here for the moment to get the basic logic in. + // + // Safe to continue if the model wasn't found because that means that the + // possible item data and details for the backup are now orphaned. They'll + // be deleted since we won't remove them from the delete set. + // + // This isn't expected to really pop up, but it's possible if this + // function is run concurrently with either a backup delete or another + // instance of this function. + logger.Ctx(ctx).Debugw( + "backup model not found", + "search_backup_id", bup.ModelStoreID) + + continue + } + + ssid := bm.StreamStoreID + if len(ssid) == 0 { + ssid = bm.DetailsID + } + + _, dataOK := dataSnaps[manifest.ID(bm.SnapshotID)] + _, deetsOK := deets[manifest.ID(ssid)] + + // All data is present, we shouldn't garbage collect this backup. + if deetsOK && dataOK { + delete(toDelete, bup.ModelStoreID) + delete(toDelete, manifest.ID(bm.SnapshotID)) + delete(toDelete, manifest.ID(ssid)) + } + } + + // Use single atomic batch delete operation to cleanup to keep from making a + // bunch of manifest content blobs. + if err := bs.DeleteWithModelStoreIDs(ctx, maps.Keys(toDelete)...); err != nil { + return clues.Wrap(err, "deleting orphaned data") + } + + // TODO(ashmrtn): Do some pruning of assist backup models so we don't keep + // them around forever. + + return nil +} diff --git a/src/internal/kopia/cleanup_backups_test.go b/src/internal/kopia/cleanup_backups_test.go new file mode 100644 index 000000000..78bc6a164 --- /dev/null +++ b/src/internal/kopia/cleanup_backups_test.go @@ -0,0 +1,433 @@ +package kopia + +import ( + "context" + "fmt" + "testing" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/repo/manifest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/model" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/backup" +) + +type BackupCleanupUnitSuite struct { + tester.Suite +} + +func TestBackupCleanupUnitSuite(t *testing.T) { + suite.Run(t, &BackupCleanupUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +type mockManifestFinder struct { + t *testing.T + manifests []*manifest.EntryMetadata + err error +} + +func (mmf mockManifestFinder) FindManifests( + ctx context.Context, + tags map[string]string, +) ([]*manifest.EntryMetadata, error) { + assert.Equal( + mmf.t, + map[string]string{"type": "snapshot"}, + tags, + "snapshot search tags") + + return mmf.manifests, clues.Stack(mmf.err).OrNil() +} + +type mockStorer struct { + t *testing.T + + details []*model.BaseModel + detailsErr error + + backups []backupRes + backupListErr error + + expectDeleteIDs []manifest.ID + deleteErr error +} + +func (ms mockStorer) Delete(context.Context, model.Schema, model.StableID) error { + return clues.New("not implemented") +} + +func (ms mockStorer) Get(context.Context, model.Schema, model.StableID, model.Model) error { + return clues.New("not implemented") +} + +func (ms mockStorer) Put(context.Context, model.Schema, model.Model) error { + return clues.New("not implemented") +} + +func (ms mockStorer) Update(context.Context, model.Schema, model.Model) error { + return clues.New("not implemented") +} + +func (ms mockStorer) GetIDsForType( + _ context.Context, + s model.Schema, + tags map[string]string, +) ([]*model.BaseModel, error) { + assert.Empty(ms.t, tags, "model search tags") + + switch s { + case model.BackupDetailsSchema: + return ms.details, clues.Stack(ms.detailsErr).OrNil() + + case model.BackupSchema: + var bases []*model.BaseModel + + for _, b := range ms.backups { + bases = append(bases, &b.bup.BaseModel) + } + + return bases, clues.Stack(ms.backupListErr).OrNil() + } + + return nil, clues.New(fmt.Sprintf("unknown type: %s", s.String())) +} + +func (ms mockStorer) GetWithModelStoreID( + _ context.Context, + s model.Schema, + id manifest.ID, + m model.Model, +) error { + assert.Equal(ms.t, model.BackupSchema, s, "model get schema") + + d := m.(*backup.Backup) + + for _, b := range ms.backups { + if id == b.bup.ModelStoreID { + *d = *b.bup + return clues.Stack(b.err).OrNil() + } + } + + return clues.Stack(data.ErrNotFound) +} + +func (ms mockStorer) DeleteWithModelStoreIDs( + _ context.Context, + ids ...manifest.ID, +) error { + assert.ElementsMatch(ms.t, ms.expectDeleteIDs, ids, "model delete IDs") + return clues.Stack(ms.deleteErr).OrNil() +} + +// backupRes represents an individual return value for an item in GetIDsForType +// or the result of GetWithModelStoreID. err is used for GetWithModelStoreID +// only. +type backupRes struct { + bup *backup.Backup + err error +} + +func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { + backupTag, _ := makeTagKV(TagBackupCategory) + + // Current backup and snapshots. + bupCurrent := &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("current-bup-id"), + ModelStoreID: manifest.ID("current-bup-msid"), + }, + SnapshotID: "current-snap-msid", + StreamStoreID: "current-deets-msid", + } + + snapCurrent := &manifest.EntryMetadata{ + ID: "current-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } + + deetsCurrent := &manifest.EntryMetadata{ + ID: "current-deets-msid", + } + + // Legacy backup with details in separate model. + bupLegacy := &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("legacy-bup-id"), + ModelStoreID: manifest.ID("legacy-bup-msid"), + }, + SnapshotID: "legacy-snap-msid", + DetailsID: "legacy-deets-msid", + } + + snapLegacy := &manifest.EntryMetadata{ + ID: "legacy-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } + + deetsLegacy := &model.BaseModel{ + ID: "legacy-deets-id", + ModelStoreID: "legacy-deets-msid", + } + + // Incomplete backup missing data snapshot. + bupNoSnapshot := &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("ns-bup-id"), + ModelStoreID: manifest.ID("ns-bup-id-msid"), + }, + StreamStoreID: "ns-deets-msid", + } + + deetsNoSnapshot := &manifest.EntryMetadata{ + ID: "ns-deets-msid", + } + + // Legacy incomplete backup missing data snapshot. + bupLegacyNoSnapshot := &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("ns-legacy-bup-id"), + ModelStoreID: manifest.ID("ns-legacy-bup-id-msid"), + }, + DetailsID: "ns-legacy-deets-msid", + } + + deetsLegacyNoSnapshot := &model.BaseModel{ + ID: "ns-legacy-deets-id", + ModelStoreID: "ns-legacy-deets-msid", + } + + // Incomplete backup missing details. + bupNoDetails := &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("nssid-bup-id"), + ModelStoreID: manifest.ID("nssid-bup-msid"), + }, + SnapshotID: "nssid-snap-msid", + } + + snapNoDetails := &manifest.EntryMetadata{ + ID: "nssid-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } + + table := []struct { + name string + snapshots []*manifest.EntryMetadata + snapshotFetchErr error + // only need BaseModel here since we never look inside the details items. + detailsModels []*model.BaseModel + detailsModelListErr error + backups []backupRes + backupListErr error + deleteErr error + + expectDeleteIDs []manifest.ID + expectErr assert.ErrorAssertionFunc + }{ + { + name: "EmptyRepo", + expectErr: assert.NoError, + }, + { + name: "OnlyCompleteBackups Noops", + snapshots: []*manifest.EntryMetadata{ + snapCurrent, + deetsCurrent, + snapLegacy, + }, + detailsModels: []*model.BaseModel{ + deetsLegacy, + }, + backups: []backupRes{ + {bup: bupCurrent}, + {bup: bupLegacy}, + }, + expectErr: assert.NoError, + }, + { + name: "MissingFieldsInBackup CausesCleanup", + snapshots: []*manifest.EntryMetadata{ + snapNoDetails, + deetsNoSnapshot, + }, + detailsModels: []*model.BaseModel{ + deetsLegacyNoSnapshot, + }, + backups: []backupRes{ + {bup: bupNoSnapshot}, + {bup: bupLegacyNoSnapshot}, + {bup: bupNoDetails}, + }, + expectDeleteIDs: []manifest.ID{ + manifest.ID(bupNoSnapshot.ModelStoreID), + manifest.ID(bupLegacyNoSnapshot.ModelStoreID), + manifest.ID(bupNoDetails.ModelStoreID), + manifest.ID(deetsLegacyNoSnapshot.ModelStoreID), + snapNoDetails.ID, + deetsNoSnapshot.ID, + }, + expectErr: assert.NoError, + }, + { + name: "MissingSnapshot CausesCleanup", + snapshots: []*manifest.EntryMetadata{ + deetsCurrent, + }, + detailsModels: []*model.BaseModel{ + deetsLegacy, + }, + backups: []backupRes{ + {bup: bupCurrent}, + {bup: bupLegacy}, + }, + expectDeleteIDs: []manifest.ID{ + manifest.ID(bupCurrent.ModelStoreID), + deetsCurrent.ID, + manifest.ID(bupLegacy.ModelStoreID), + manifest.ID(deetsLegacy.ModelStoreID), + }, + expectErr: assert.NoError, + }, + { + name: "MissingDetails CausesCleanup", + snapshots: []*manifest.EntryMetadata{ + snapCurrent, + snapLegacy, + }, + backups: []backupRes{ + {bup: bupCurrent}, + {bup: bupLegacy}, + }, + expectDeleteIDs: []manifest.ID{ + manifest.ID(bupCurrent.ModelStoreID), + manifest.ID(bupLegacy.ModelStoreID), + snapCurrent.ID, + snapLegacy.ID, + }, + expectErr: assert.NoError, + }, + { + name: "SnapshotsListError Fails", + snapshotFetchErr: assert.AnError, + backups: []backupRes{ + {bup: bupCurrent}, + }, + expectErr: assert.Error, + }, + { + name: "LegacyDetailsListError Fails", + snapshots: []*manifest.EntryMetadata{ + snapCurrent, + }, + detailsModelListErr: assert.AnError, + backups: []backupRes{ + {bup: bupCurrent}, + }, + expectErr: assert.Error, + }, + { + name: "BackupIDsListError Fails", + snapshots: []*manifest.EntryMetadata{ + snapCurrent, + deetsCurrent, + }, + backupListErr: assert.AnError, + expectErr: assert.Error, + }, + { + name: "BackupModelGetErrorNotFound CausesCleanup", + snapshots: []*manifest.EntryMetadata{ + snapCurrent, + deetsCurrent, + snapLegacy, + snapNoDetails, + }, + detailsModels: []*model.BaseModel{ + deetsLegacy, + }, + backups: []backupRes{ + {bup: bupCurrent}, + { + bup: bupLegacy, + err: data.ErrNotFound, + }, + { + bup: bupNoDetails, + err: data.ErrNotFound, + }, + }, + // Backup IDs are still included in here because they're added to the + // deletion set prior to attempting to fetch models. The model store + // delete operation should ignore missing models though so there's no + // issue. + expectDeleteIDs: []manifest.ID{ + snapLegacy.ID, + manifest.ID(deetsLegacy.ModelStoreID), + manifest.ID(bupLegacy.ModelStoreID), + snapNoDetails.ID, + manifest.ID(bupNoDetails.ModelStoreID), + }, + expectErr: assert.NoError, + }, + { + name: "BackupModelGetError Fails", + snapshots: []*manifest.EntryMetadata{ + snapCurrent, + deetsCurrent, + snapLegacy, + snapNoDetails, + }, + detailsModels: []*model.BaseModel{ + deetsLegacy, + }, + backups: []backupRes{ + {bup: bupCurrent}, + { + bup: bupLegacy, + err: assert.AnError, + }, + {bup: bupNoDetails}, + }, + expectErr: assert.Error, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + mbs := mockStorer{ + t: t, + details: test.detailsModels, + detailsErr: test.detailsModelListErr, + backups: test.backups, + backupListErr: test.backupListErr, + expectDeleteIDs: test.expectDeleteIDs, + deleteErr: test.deleteErr, + } + + mmf := mockManifestFinder{ + t: t, + manifests: test.snapshots, + err: test.snapshotFetchErr, + } + + err := cleanupOrphanedData(ctx, mbs, mmf) + test.expectErr(t, err, clues.ToCore(err)) + }) + } +} diff --git a/src/internal/kopia/conn.go b/src/internal/kopia/conn.go index 7eac9df5c..1703b466d 100644 --- a/src/internal/kopia/conn.go +++ b/src/internal/kopia/conn.go @@ -52,9 +52,26 @@ var ( } ) -type snapshotLoader interface { - SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) -} +type ( + manifestFinder interface { + FindManifests( + ctx context.Context, + tags map[string]string, + ) ([]*manifest.EntryMetadata, error) + } + + snapshotManager interface { + manifestFinder + LoadSnapshot( + ctx context.Context, + id manifest.ID, + ) (*snapshot.Manifest, error) + } + + snapshotLoader interface { + SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) + } +) var ( _ snapshotManager = &conn{} From 5808797fc64385a38059c8b07adbbbcf9f115021 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 23:11:02 +0000 Subject: [PATCH 14/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.327=20to=201.44.328=20in=20/src=20(#?= =?UTF-8?q?4079)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.327 to 1.44.328.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.328 (2023-08-21)

Service Client Updates

  • service/cloud9: Adds new service
    • Doc only update to add Ubuntu 22.04 as an Image ID option for Cloud9
  • service/ec2: Updates service API and documentation
    • The DeleteKeyPair API has been updated to return the keyPairId when an existing key pair is deleted.
  • service/finspace: Updates service API and documentation
  • service/rds: Updates service API, documentation, waiters, paginators, and examples
    • Adding support for RDS Aurora Global Database Unplanned Failover
  • service/route53domains: Updates service documentation
    • Fixed typos in description fields
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.327&new-version=1.44.328)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 88e1190fb..8438cb70f 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.327 + github.com/aws/aws-sdk-go v1.44.328 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.1 diff --git a/src/go.sum b/src/go.sum index 0b2b5786e..b88a52f28 100644 --- a/src/go.sum +++ b/src/go.sum @@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY= -github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.328 h1:WBwlf8ym9SDQ/GTIBO9eXyvwappKJyOetWJKl4mT7ZU= +github.com/aws/aws-sdk-go v1.44.328/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= From 99edf7d5b0a3393dd2824b28ca24118d52d6f8ed Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Mon, 21 Aug 2023 16:48:57 -0700 Subject: [PATCH 15/32] Add and populate mod time for BaseModel (#4065) Get the last time a model was modified and return it in BaseModel. This will help with discovering what items can be garbage collected during incomplete backup cleanup as we don't want to accidentally delete in-flight backups. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #3217 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/model_store.go | 1 + src/internal/kopia/model_store_test.go | 48 +++++++++++++++++++++----- src/internal/model/model.go | 5 ++- 3 files changed, 44 insertions(+), 10 deletions(-) diff --git a/src/internal/kopia/model_store.go b/src/internal/kopia/model_store.go index 93ef6c182..b5e572844 100644 --- a/src/internal/kopia/model_store.go +++ b/src/internal/kopia/model_store.go @@ -210,6 +210,7 @@ func (ms ModelStore) populateBaseModelFromMetadata( base.ID = model.StableID(id) base.ModelVersion = v base.Tags = m.Labels + base.ModTime = m.ModTime stripHiddenTags(base.Tags) diff --git a/src/internal/kopia/model_store_test.go b/src/internal/kopia/model_store_test.go index 048817a54..0afc72a7c 100644 --- a/src/internal/kopia/model_store_test.go +++ b/src/internal/kopia/model_store_test.go @@ -4,6 +4,7 @@ import ( "context" "sync" "testing" + "time" "github.com/alcionai/clues" "github.com/google/uuid" @@ -34,6 +35,18 @@ func getModelStore(t *testing.T, ctx context.Context) *ModelStore { return &ModelStore{c: c, modelVersion: globalModelVersion} } +func assertEqualNoModTime(t *testing.T, expected, got *fooModel) { + t.Helper() + + expectedClean := *expected + gotClean := *got + + expectedClean.ModTime = time.Time{} + gotClean.ModTime = time.Time{} + + assert.Equal(t, expectedClean, gotClean) +} + // --------------- // unit tests // --------------- @@ -259,6 +272,8 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet() { // Avoid some silly test errors from comparing nil to empty map. foo.Tags = map[string]string{} + startTime := time.Now() + err := suite.m.Put(suite.ctx, test.s, foo) test.check(t, err, clues.ToCore(err)) @@ -273,11 +288,17 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet() { returned := &fooModel{} err = suite.m.Get(suite.ctx, test.s, foo.ID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + + assertEqualNoModTime(t, foo, returned) + assert.WithinDuration(t, startTime, returned.ModTime, 5*time.Second) + + returned = &fooModel{} err = suite.m.GetWithModelStoreID(suite.ctx, test.s, foo.ModelStoreID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + + assertEqualNoModTime(t, foo, returned) + assert.WithinDuration(t, startTime, returned.ModTime, 5*time.Second) }) } } @@ -324,11 +345,11 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet_PreSetID() { err = suite.m.Get(suite.ctx, mdl, foo.ID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + assertEqualNoModTime(t, foo, returned) err = suite.m.GetWithModelStoreID(suite.ctx, mdl, foo.ModelStoreID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + assertEqualNoModTime(t, foo, returned) }) } } @@ -350,11 +371,11 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet_WithTags() { returned := &fooModel{} err = suite.m.Get(suite.ctx, theModelType, foo.ID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + assertEqualNoModTime(t, foo, returned) err = suite.m.GetWithModelStoreID(suite.ctx, theModelType, foo.ModelStoreID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + assertEqualNoModTime(t, foo, returned) } func (suite *ModelStoreIntegrationSuite) TestGet_NotFoundErrors() { @@ -559,7 +580,16 @@ func (suite *ModelStoreIntegrationSuite) TestGetOfTypeWithTags() { ids, err := suite.m.GetIDsForType(suite.ctx, test.s, test.tags) require.NoError(t, err, clues.ToCore(err)) - assert.ElementsMatch(t, expected, ids) + cleanIDs := make([]*model.BaseModel, 0, len(ids)) + + for _, id := range ids { + id2 := *id + id2.ModTime = time.Time{} + + cleanIDs = append(cleanIDs, &id2) + } + + assert.ElementsMatch(t, expected, cleanIDs) }) } } @@ -627,7 +657,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() { err = m.GetWithModelStoreID(ctx, theModelType, foo.ModelStoreID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + assertEqualNoModTime(t, foo, returned) ids, err := m.GetIDsForType(ctx, theModelType, nil) require.NoError(t, err, clues.ToCore(err)) @@ -822,7 +852,7 @@ func (suite *ModelStoreRegressionSuite) TestFailDuringWriteSessionHasNoVisibleEf err = m.GetWithModelStoreID(ctx, theModelType, foo.ModelStoreID, returned) require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, foo, returned) + assertEqualNoModTime(t, foo, returned) } func openConnAndModelStore( diff --git a/src/internal/model/model.go b/src/internal/model/model.go index a3f25c820..fb72e3613 100644 --- a/src/internal/model/model.go +++ b/src/internal/model/model.go @@ -1,6 +1,8 @@ package model import ( + "time" + "github.com/kopia/kopia/repo/manifest" ) @@ -68,7 +70,8 @@ type BaseModel struct { // Tags associated with this model in the store to facilitate lookup. Tags in // the struct are not serialized directly into the stored model, but are part // of the metadata for the model. - Tags map[string]string `json:"-"` + Tags map[string]string `json:"-"` + ModTime time.Time `json:"-"` } func (bm *BaseModel) Base() *BaseModel { From 11253bf8164025bf32783b9a412f1515f42eed2f Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Mon, 21 Aug 2023 17:39:24 -0700 Subject: [PATCH 16/32] Exclude recently created models from garbage collection (#4066) Exclude models that have been created within the buffer period from garbage collection/orphaned checks so that we don't accidentally delete models for backups that are running concurrently with the garbage collection task --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #3217 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/cleanup_backups.go | 81 ++++++++++----- src/internal/kopia/cleanup_backups_test.go | 109 ++++++++++++++++++++- 2 files changed, 166 insertions(+), 24 deletions(-) diff --git a/src/internal/kopia/cleanup_backups.go b/src/internal/kopia/cleanup_backups.go index b431b7a91..82ae04dc4 100644 --- a/src/internal/kopia/cleanup_backups.go +++ b/src/internal/kopia/cleanup_backups.go @@ -3,6 +3,7 @@ package kopia import ( "context" "errors" + "time" "github.com/alcionai/clues" "github.com/kopia/kopia/repo/manifest" @@ -16,10 +17,37 @@ import ( "github.com/alcionai/corso/src/pkg/store" ) +// cleanupOrphanedData uses bs and mf to lookup all models/snapshots for backups +// and deletes items that are older than nowFunc() - gcBuffer (cutoff) that are +// not "complete" backups with: +// - a backup model +// - an item data snapshot +// - a details snapshot or details model +// +// We exclude all items younger than the cutoff to add some buffer so that even +// if this is run concurrently with a backup it's not likely to delete models +// just being created. For example, if there was no buffer period and this is +// run when another corso instance has created an item data snapshot but hasn't +// yet created the details snapshot or the backup model it would result in this +// instance of corso marking the newly created item data snapshot for deletion +// because it appears orphaned. +// +// The buffer duration should be longer than the difference in creation times +// between the first item data snapshot/details/backup model made during a +// backup operation and the last. +// +// We don't have hard numbers on the time right now, but if the order of +// persistence is (item data snapshot, details snapshot, backup model) it should +// be faster than creating the snapshot itself and probably happens O(minutes) +// or O(hours) instead of O(days). Of course, that assumes a non-adversarial +// setup where things such as machine hiberation, process freezing (i.e. paused +// at the OS level), etc. don't occur. func cleanupOrphanedData( ctx context.Context, bs store.Storer, mf manifestFinder, + gcBuffer time.Duration, + nowFunc func() time.Time, ) error { // Get all snapshot manifests. snaps, err := mf.FindManifests( @@ -43,27 +71,16 @@ func cleanupOrphanedData( dataSnaps = map[manifest.ID]struct{}{} ) - // TODO(ashmrtn): Exclude all snapshots and details younger than X . - // Doing so adds some buffer so that even if this is run concurrently with a - // backup it's not likely to delete models just being created. For example, - // running this when another corso instance has created an item data snapshot - // but hasn't yet created the details snapshot or the backup model would - // result in this instance of corso marking the newly created item data - // snapshot for deletion because it appears orphaned. - // - // Excluding only snapshots and details models works for now since the backup - // model is the last thing persisted out of them. If we switch the order of - // persistence then this will need updated as well. - // - // The buffer duration should be longer than the time it would take to do - // details merging and backup model creation. We don't have hard numbers on - // that, but it should be faster than creating the snapshot itself and - // probably happens O(minutes) or O(hours) instead of O(days). Of course, that - // assumes a non-adversarial setup where things such as machine hiberation, - // process freezing (i.e. paused at the OS level), etc. don't occur. + cutoff := nowFunc().Add(-gcBuffer) // Sort all the snapshots as either details snapshots or item data snapshots. for _, snap := range snaps { + // Don't even try to see if this needs garbage collected because it's not + // old enough and may correspond to an in-progress operation. + if !cutoff.After(snap.ModTime) { + continue + } + k, _ := makeTagKV(TagBackupCategory) if _, ok := snap.Labels[k]; ok { dataSnaps[snap.ID] = struct{}{} @@ -82,6 +99,12 @@ func cleanupOrphanedData( } for _, d := range deetsModels { + // Don't even try to see if this needs garbage collected because it's not + // old enough and may correspond to an in-progress operation. + if !cutoff.After(d.ModTime) { + continue + } + deets[d.ModelStoreID] = struct{}{} } @@ -95,6 +118,12 @@ func cleanupOrphanedData( maps.Copy(toDelete, dataSnaps) for _, bup := range bups { + // Don't even try to see if this needs garbage collected because it's not + // old enough and may correspond to an in-progress operation. + if !cutoff.After(bup.ModTime) { + continue + } + toDelete[manifest.ID(bup.ModelStoreID)] = struct{}{} bm := backup.Backup{} @@ -110,12 +139,13 @@ func cleanupOrphanedData( With("search_backup_id", bup.ID) } - // TODO(ashmrtn): This actually needs revised, see above TODO. Leaving it - // here for the moment to get the basic logic in. + // Probably safe to continue if the model wasn't found because that means + // that the possible item data and details for the backup are now + // orphaned. They'll be deleted since we won't remove them from the delete + // set. // - // Safe to continue if the model wasn't found because that means that the - // possible item data and details for the backup are now orphaned. They'll - // be deleted since we won't remove them from the delete set. + // The fact that we exclude all items younger than the cutoff should + // already exclude items that are from concurrent corso backup operations. // // This isn't expected to really pop up, but it's possible if this // function is run concurrently with either a backup delete or another @@ -143,6 +173,11 @@ func cleanupOrphanedData( } } + logger.Ctx(ctx).Infow( + "garbage collecting orphaned items", + "num_items", len(toDelete), + "kopia_ids", maps.Keys(toDelete)) + // Use single atomic batch delete operation to cleanup to keep from making a // bunch of manifest content blobs. if err := bs.DeleteWithModelStoreIDs(ctx, maps.Keys(toDelete)...); err != nil { diff --git a/src/internal/kopia/cleanup_backups_test.go b/src/internal/kopia/cleanup_backups_test.go index 78bc6a164..895d9226e 100644 --- a/src/internal/kopia/cleanup_backups_test.go +++ b/src/internal/kopia/cleanup_backups_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "testing" + "time" "github.com/alcionai/clues" "github.com/kopia/kopia/repo/manifest" @@ -221,6 +222,28 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { }, } + // Get some stable time so that we can do everything relative to this in the + // tests. Mostly just makes reasoning/viewing times easier because the only + // differences will be the changes we make. + baseTime := time.Now() + + manifestWithTime := func( + mt time.Time, + m *manifest.EntryMetadata, + ) *manifest.EntryMetadata { + res := *m + res.ModTime = mt + + return &res + } + + backupWithTime := func(mt time.Time, b *backup.Backup) *backup.Backup { + res := *b + res.ModTime = mt + + return &res + } + table := []struct { name string snapshots []*manifest.EntryMetadata @@ -231,12 +254,15 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { backups []backupRes backupListErr error deleteErr error + time time.Time + buffer time.Duration expectDeleteIDs []manifest.ID expectErr assert.ErrorAssertionFunc }{ { name: "EmptyRepo", + time: baseTime, expectErr: assert.NoError, }, { @@ -253,6 +279,7 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { {bup: bupCurrent}, {bup: bupLegacy}, }, + time: baseTime, expectErr: assert.NoError, }, { @@ -277,6 +304,7 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { snapNoDetails.ID, deetsNoSnapshot.ID, }, + time: baseTime, expectErr: assert.NoError, }, { @@ -297,6 +325,7 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { manifest.ID(bupLegacy.ModelStoreID), manifest.ID(deetsLegacy.ModelStoreID), }, + time: baseTime, expectErr: assert.NoError, }, { @@ -315,6 +344,7 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { snapCurrent.ID, snapLegacy.ID, }, + time: baseTime, expectErr: assert.NoError, }, { @@ -334,6 +364,7 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { backups: []backupRes{ {bup: bupCurrent}, }, + time: baseTime, expectErr: assert.Error, }, { @@ -343,6 +374,7 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { deetsCurrent, }, backupListErr: assert.AnError, + time: baseTime, expectErr: assert.Error, }, { @@ -378,6 +410,7 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { snapNoDetails.ID, manifest.ID(bupNoDetails.ModelStoreID), }, + time: baseTime, expectErr: assert.NoError, }, { @@ -399,8 +432,77 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { }, {bup: bupNoDetails}, }, + time: baseTime, expectErr: assert.Error, }, + { + name: "DeleteError Fails", + snapshots: []*manifest.EntryMetadata{ + snapCurrent, + deetsCurrent, + snapLegacy, + snapNoDetails, + }, + detailsModels: []*model.BaseModel{ + deetsLegacy, + }, + backups: []backupRes{ + {bup: bupCurrent}, + {bup: bupLegacy}, + {bup: bupNoDetails}, + }, + expectDeleteIDs: []manifest.ID{ + snapNoDetails.ID, + manifest.ID(bupNoDetails.ModelStoreID), + }, + deleteErr: assert.AnError, + time: baseTime, + expectErr: assert.Error, + }, + { + name: "MissingSnapshot BarelyTooYoungForCleanup Noops", + snapshots: []*manifest.EntryMetadata{ + manifestWithTime(baseTime, deetsCurrent), + }, + backups: []backupRes{ + {bup: backupWithTime(baseTime, bupCurrent)}, + }, + time: baseTime.Add(24 * time.Hour), + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + name: "MissingSnapshot BarelyOldEnough CausesCleanup", + snapshots: []*manifest.EntryMetadata{ + manifestWithTime(baseTime, deetsCurrent), + }, + backups: []backupRes{ + {bup: backupWithTime(baseTime, bupCurrent)}, + }, + expectDeleteIDs: []manifest.ID{ + deetsCurrent.ID, + manifest.ID(bupCurrent.ModelStoreID), + }, + time: baseTime.Add((24 * time.Hour) + time.Second), + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, + { + name: "BackupGetErrorNotFound TooYoung Noops", + snapshots: []*manifest.EntryMetadata{ + manifestWithTime(baseTime, snapCurrent), + manifestWithTime(baseTime, deetsCurrent), + }, + backups: []backupRes{ + { + bup: backupWithTime(baseTime, bupCurrent), + err: data.ErrNotFound, + }, + }, + time: baseTime, + buffer: 24 * time.Hour, + expectErr: assert.NoError, + }, } for _, test := range table { @@ -426,7 +528,12 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { err: test.snapshotFetchErr, } - err := cleanupOrphanedData(ctx, mbs, mmf) + err := cleanupOrphanedData( + ctx, + mbs, + mmf, + test.buffer, + func() time.Time { return test.time }) test.expectErr(t, err, clues.ToCore(err)) }) } From f45aecd5db59e6b4665e3effc067840577fc551e Mon Sep 17 00:00:00 2001 From: Abhishek Pandey Date: Tue, 22 Aug 2023 11:00:55 +0530 Subject: [PATCH 17/32] Reduce $select parameters for URL cache delta queries (#4074) This PR optimizes memory & cache refresh time for URL cache. The cache only makes use of a small subset of drive item properties, namely ID, deleted, file, folder, content download URL. We have found that reducing the number of query properties has a sizable impact on corso mem usage. This is especially relevant for large scale backups. See below graph for a comparison between original delta queries & mod. Note that this is with corso instrumentations to show comparisons side by side in the same run. - Reading this graph - We are doing 3 orig delta queries followed right after by 3 mod. Vertical lines are delta query spans. Originally, this investigation was done to improve mem usage for scale backups. But we also found that url cache delta query time drops by 22% with this PR. This is because we are now transferring & processing fewer bytes. ![image](https://github.com/alcionai/corso/assets/4962258/be4461db-f86c-42d4-bca1-2819aff078ce) --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- .../m365/collection/drive/collections.go | 2 +- .../m365/collection/drive/url_cache_test.go | 23 +++++++++++++------ src/pkg/services/m365/api/config.go | 9 ++++++++ 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/src/internal/m365/collection/drive/collections.go b/src/internal/m365/collection/drive/collections.go index 6964774b8..b88de4aaa 100644 --- a/src/internal/m365/collection/drive/collections.go +++ b/src/internal/m365/collection/drive/collections.go @@ -471,7 +471,7 @@ func (c *Collections) addURLCacheToDriveCollections( driveID, prevDelta, urlCacheRefreshInterval, - c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()), + c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()), errs) if err != nil { return err diff --git a/src/internal/m365/collection/drive/url_cache_test.go b/src/internal/m365/collection/drive/url_cache_test.go index f2fd257b8..68b5b8a8b 100644 --- a/src/internal/m365/collection/drive/url_cache_test.go +++ b/src/internal/m365/collection/drive/url_cache_test.go @@ -3,6 +3,7 @@ package drive import ( "context" "errors" + "io" "math/rand" "net/http" "sync" @@ -87,6 +88,7 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { newItem(newFolderName, true), control.Copy) require.NoError(t, err, clues.ToCore(err)) + require.NotNil(t, newFolder.GetId()) nfid := ptr.Val(newFolder.GetId()) @@ -109,7 +111,7 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { // Get the previous delta to feed into url cache prevDelta, _, _, err := collectItems( ctx, - suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault()), + suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()), suite.driveID, "drive-name", collectorFunc, @@ -131,10 +133,7 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { nfid, newItem(newItemName, false), control.Copy) - if err != nil { - // Something bad happened, skip this item - continue - } + require.NoError(t, err, clues.ToCore(err)) items = append(items, item) } @@ -176,13 +175,23 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { nil, nil) require.NoError(t, err, clues.ToCore(err)) + + require.NotNil(t, resp) + require.NotNil(t, resp.Body) + + defer func(rc io.ReadCloser) { + if rc != nil { + rc.Close() + } + }(resp.Body) + require.Equal(t, http.StatusOK, resp.StatusCode) }(i) } wg.Wait() - // Validate that <= 1 delta queries were made by url cache - require.LessOrEqual(t, uc.deltaQueryCount, 1) + // Validate that exactly 1 delta query was made by url cache + require.Equal(t, 1, uc.deltaQueryCount) } type URLCacheUnitSuite struct { diff --git a/src/pkg/services/m365/api/config.go b/src/pkg/services/m365/api/config.go index 9e2247279..a1c752686 100644 --- a/src/pkg/services/m365/api/config.go +++ b/src/pkg/services/m365/api/config.go @@ -112,3 +112,12 @@ func DriveItemSelectDefault() []string { "malware", "shared") } + +// URL cache only needs a subset of item properties +func DriveItemSelectURLCache() []string { + return idAnd( + "content.downloadUrl", + "deleted", + "file", + "folder") +} From 0e6ef90e413555efcb6e4e114b58dab886cbf9ee Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Tue, 22 Aug 2023 13:28:03 +0530 Subject: [PATCH 18/32] Create backup collections for Group's default SharePoint site (#4030) This commit has the initial rough set of changes needed to create collections from the group's default SharePoint site. This still does not have all the functionality that we need, but the idea was that we could get this in and iterate over time. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * https://github.com/alcionai/corso/issues/3990 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/m365/backup.go | 21 +- src/internal/m365/backup_test.go | 82 ++++++++ .../m365/collection/drive/group_handler.go | 197 ++++++++++++++++++ src/internal/m365/collection/site/backup.go | 8 +- src/internal/m365/controller.go | 2 + src/internal/m365/resource/resource.go | 1 + src/internal/m365/service/groups/backup.go | 36 +++- .../m365/service/sharepoint/backup.go | 4 +- src/internal/tester/tconfig/config.go | 1 + src/pkg/path/builder.go | 4 + src/pkg/path/category_type.go | 2 + src/pkg/path/elements.go | 2 + src/pkg/path/resource_path_test.go | 19 +- src/pkg/selectors/groups.go | 79 ++++++- src/pkg/selectors/selectors.go | 1 + src/pkg/services/m365/api/groups.go | 38 +++- src/pkg/services/m365/api/groups_test.go | 21 +- 17 files changed, 478 insertions(+), 40 deletions(-) create mode 100644 src/internal/m365/collection/drive/group_handler.go diff --git a/src/internal/m365/backup.go b/src/internal/m365/backup.go index 9e7194511..805dcebd1 100644 --- a/src/internal/m365/backup.go +++ b/src/internal/m365/backup.go @@ -10,6 +10,7 @@ import ( "github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/service/exchange" + "github.com/alcionai/corso/src/internal/m365/service/groups" "github.com/alcionai/corso/src/internal/m365/service/onedrive" "github.com/alcionai/corso/src/internal/m365/service/sharepoint" "github.com/alcionai/corso/src/internal/operations/inject" @@ -116,6 +117,18 @@ func (ctrl *Controller) ProduceBackupCollections( return nil, nil, false, err } + case path.GroupsService: + colls, ssmb, canUsePreviousBackup, err = groups.ProduceBackupCollections( + ctx, + bpc, + ctrl.AC, + ctrl.credentials, + ctrl.UpdateStatus, + errs) + if err != nil { + return nil, nil, false, err + } + default: return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx) } @@ -176,6 +189,10 @@ func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error { // Exchange and OneDrive user existence now checked in checkServiceEnabled. return nil + case selectors.ServiceGroups: + // TODO(meain): check for group existence. + return nil + case selectors.ServiceSharePoint: ids = siteIDs } @@ -197,8 +214,8 @@ func checkServiceEnabled( service path.ServiceType, resource string, ) (bool, bool, error) { - if service == path.SharePointService { - // No "enabled" check required for sharepoint + if service == path.SharePointService || service == path.GroupsService { + // No "enabled" check required for sharepoint or groups. return true, true, nil } diff --git a/src/internal/m365/backup_test.go b/src/internal/m365/backup_test.go index 5c19a182c..c2938a36b 100644 --- a/src/internal/m365/backup_test.go +++ b/src/internal/m365/backup_test.go @@ -465,3 +465,85 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() { } } } + +// --------------------------------------------------------------------------- +// CreateGroupsCollection tests +// --------------------------------------------------------------------------- + +type GroupsCollectionIntgSuite struct { + tester.Suite + connector *Controller + user string +} + +func TestGroupsCollectionIntgSuite(t *testing.T) { + suite.Run(t, &GroupsCollectionIntgSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *GroupsCollectionIntgSuite) SetupSuite() { + ctx, flush := tester.NewContext(suite.T()) + defer flush() + + suite.connector = newController(ctx, suite.T(), resource.Sites, path.GroupsService) + suite.user = tconfig.M365UserID(suite.T()) + + tester.LogTimeOfTest(suite.T()) +} + +func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + var ( + groupID = tconfig.M365GroupID(t) + ctrl = newController(ctx, t, resource.Groups, path.GroupsService) + groupIDs = []string{groupID} + ) + + id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil) + require.NoError(t, err, clues.ToCore(err)) + + sel := selectors.NewGroupsBackup(groupIDs) + // TODO(meain): make use of selectors + sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch())) + + sel.SetDiscreteOwnerIDName(id, name) + + bpc := inject.BackupProducerConfig{ + LastBackupVersion: version.NoBackup, + Options: control.DefaultOptions(), + ProtectedResource: inMock.NewProvider(id, name), + Selector: sel.Selector, + } + + collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections( + ctx, + bpc, + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + assert.True(t, canUsePreviousBackup, "can use previous backup") + // No excludes yet as this isn't an incremental backup. + assert.True(t, excludes.Empty()) + + // we don't know an exact count of drives this will produce, + // but it should be more than one. + assert.Greater(t, len(collections), 1) + + for _, coll := range collections { + for object := range coll.Items(ctx, fault.New(true)) { + buf := &bytes.Buffer{} + _, err := buf.ReadFrom(object.ToReader()) + assert.NoError(t, err, "reading item", clues.ToCore(err)) + } + } + + status := ctrl.Wait() + assert.NotZero(t, status.Successes) + t.Log(status.String()) +} diff --git a/src/internal/m365/collection/drive/group_handler.go b/src/internal/m365/collection/drive/group_handler.go new file mode 100644 index 000000000..81bbf36af --- /dev/null +++ b/src/internal/m365/collection/drive/group_handler.go @@ -0,0 +1,197 @@ +package drive + +import ( + "context" + "net/http" + "strings" + + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +var _ BackupHandler = &groupBackupHandler{} + +type groupBackupHandler struct { + groupID string + ac api.Drives + scope selectors.GroupsScope +} + +func NewGroupBackupHandler(groupID string, ac api.Drives, scope selectors.GroupsScope) groupBackupHandler { + return groupBackupHandler{groupID, ac, scope} +} + +func (h groupBackupHandler) Get( + ctx context.Context, + url string, + headers map[string]string, +) (*http.Response, error) { + return h.ac.Get(ctx, url, headers) +} + +func (h groupBackupHandler) PathPrefix( + tenantID, resourceOwner, driveID string, +) (path.Path, error) { + return path.Build( + tenantID, + resourceOwner, + path.GroupsService, + path.LibrariesCategory, // TODO(meain) + false, + odConsts.DrivesPathDir, + driveID, + odConsts.RootPathDir) +} + +func (h groupBackupHandler) CanonicalPath( + folders *path.Builder, + tenantID, resourceOwner string, +) (path.Path, error) { + // TODO(meain): path fixes: sharepoint site ids should be in the path + return folders.ToDataLayerPath( + tenantID, + h.groupID, + path.GroupsService, + path.LibrariesCategory, + false) +} + +func (h groupBackupHandler) ServiceCat() (path.ServiceType, path.CategoryType) { + return path.GroupsService, path.LibrariesCategory +} + +func (h groupBackupHandler) NewDrivePager( + resourceOwner string, + fields []string, +) api.DrivePager { + return h.ac.NewSiteDrivePager(resourceOwner, fields) +} + +func (h groupBackupHandler) NewItemPager( + driveID, link string, + fields []string, +) api.DriveItemDeltaEnumerator { + return h.ac.NewDriveItemDeltaPager(driveID, link, fields) +} + +func (h groupBackupHandler) AugmentItemInfo( + dii details.ItemInfo, + item models.DriveItemable, + size int64, + parentPath *path.Builder, +) details.ItemInfo { + return augmentGroupItemInfo(dii, item, size, parentPath) +} + +func (h groupBackupHandler) FormatDisplayPath( + driveName string, + pb *path.Builder, +) string { + return "/" + driveName + "/" + pb.String() +} + +func (h groupBackupHandler) NewLocationIDer( + driveID string, + elems ...string, +) details.LocationIDer { + return details.NewSharePointLocationIDer(driveID, elems...) +} + +func (h groupBackupHandler) GetItemPermission( + ctx context.Context, + driveID, itemID string, +) (models.PermissionCollectionResponseable, error) { + return h.ac.GetItemPermission(ctx, driveID, itemID) +} + +func (h groupBackupHandler) GetItem( + ctx context.Context, + driveID, itemID string, +) (models.DriveItemable, error) { + return h.ac.GetItem(ctx, driveID, itemID) +} + +func (h groupBackupHandler) IsAllPass() bool { + // TODO(meain) + return true +} + +func (h groupBackupHandler) IncludesDir(dir string) bool { + // TODO(meain) + // return h.scope.Matches(selectors.SharePointGroupFolder, dir) + return true +} + +// --------------------------------------------------------------------------- +// Common +// --------------------------------------------------------------------------- + +func augmentGroupItemInfo( + dii details.ItemInfo, + item models.DriveItemable, + size int64, + parentPath *path.Builder, +) details.ItemInfo { + var driveName, driveID, creatorEmail string + + // TODO: we rely on this info for details/restore lookups, + // so if it's nil we have an issue, and will need an alternative + // way to source the data. + + if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil { + // User is sometimes not available when created via some + // external applications (like backup/restore solutions) + additionalData := item.GetCreatedBy().GetUser().GetAdditionalData() + + ed, ok := additionalData["email"] + if !ok { + ed = additionalData["displayName"] + } + + if ed != nil { + creatorEmail = *ed.(*string) + } + } + + // gsi := item.GetSharepointIds() + // if gsi != nil { + // siteID = ptr.Val(gsi.GetSiteId()) + // weburl = ptr.Val(gsi.GetSiteUrl()) + + // if len(weburl) == 0 { + // weburl = constructWebURL(item.GetAdditionalData()) + // } + // } + + if item.GetParentReference() != nil { + driveID = ptr.Val(item.GetParentReference().GetDriveId()) + driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName())) + } + + var pps string + if parentPath != nil { + pps = parentPath.String() + } + + dii.Groups = &details.GroupsInfo{ + Created: ptr.Val(item.GetCreatedDateTime()), + DriveID: driveID, + DriveName: driveName, + ItemName: ptr.Val(item.GetName()), + ItemType: details.SharePointLibrary, + Modified: ptr.Val(item.GetLastModifiedDateTime()), + Owner: creatorEmail, + ParentPath: pps, + Size: size, + } + + dii.Extension = &details.ExtensionData{} + + return dii +} diff --git a/src/internal/m365/collection/site/backup.go b/src/internal/m365/collection/site/backup.go index 8357d9512..f574ee4b5 100644 --- a/src/internal/m365/collection/site/backup.go +++ b/src/internal/m365/collection/site/backup.go @@ -25,10 +25,9 @@ import ( func CollectLibraries( ctx context.Context, bpc inject.BackupProducerConfig, - ad api.Drives, + bh drive.BackupHandler, tenantID string, ssmb *prefixmatcher.StringSetMatchBuilder, - scope selectors.SharePointScope, su support.StatusUpdater, errs *fault.Bus, ) ([]data.BackupCollection, bool, error) { @@ -37,13 +36,16 @@ func CollectLibraries( var ( collections = []data.BackupCollection{} colls = drive.NewCollections( - drive.NewLibraryBackupHandler(ad, scope), + bh, tenantID, bpc.ProtectedResource.ID(), su, bpc.Options) ) + // TODO(meain): backup resource owner should be group id in case + // of group sharepoint site backup. As of now, we always use + // sharepoint site ids. odcs, canUsePreviousBackup, err := colls.Get(ctx, bpc.MetadataCollections, ssmb, errs) if err != nil { return nil, false, graph.Wrap(ctx, err, "getting library") diff --git a/src/internal/m365/controller.go b/src/internal/m365/controller.go index 174148a76..0b8854be2 100644 --- a/src/internal/m365/controller.go +++ b/src/internal/m365/controller.go @@ -170,6 +170,8 @@ func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, er return &resourceClient{enum: rc, getter: ac.Users()}, nil case resource.Sites: return &resourceClient{enum: rc, getter: ac.Sites()}, nil + case resource.Groups: + return &resourceClient{enum: rc, getter: ac.Groups()}, nil default: return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc) } diff --git a/src/internal/m365/resource/resource.go b/src/internal/m365/resource/resource.go index f91a853a6..6aca21924 100644 --- a/src/internal/m365/resource/resource.go +++ b/src/internal/m365/resource/resource.go @@ -6,4 +6,5 @@ const ( UnknownResource Category = "" Users Category = "users" Sites Category = "sites" + Groups Category = "groups" ) diff --git a/src/internal/m365/service/groups/backup.go b/src/internal/m365/service/groups/backup.go index 3bb779507..b74b5fde0 100644 --- a/src/internal/m365/service/groups/backup.go +++ b/src/internal/m365/service/groups/backup.go @@ -5,8 +5,12 @@ import ( "github.com/alcionai/clues" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/prefixmatcher" + "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/m365/collection/drive" + "github.com/alcionai/corso/src/internal/m365/collection/site" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/support" "github.com/alcionai/corso/src/internal/observe" @@ -56,7 +60,35 @@ func ProduceBackupCollections( var dbcs []data.BackupCollection switch scope.Category().PathType() { - case path.LibrariesCategory: // TODO + case path.LibrariesCategory: + // TODO(meain): Private channels get a separate SharePoint + // site. We should also back those up and not just the + // default one. + resp, err := ac.Groups().GetRootSite(ctx, bpc.ProtectedResource.ID()) + if err != nil { + return nil, nil, false, err + } + + pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName())) + sbpc := inject.BackupProducerConfig{ + LastBackupVersion: bpc.LastBackupVersion, + Options: bpc.Options, + ProtectedResource: pr, + Selector: bpc.Selector, + } + + dbcs, canUsePreviousBackup, err = site.CollectLibraries( + ctx, + sbpc, + drive.NewGroupBackupHandler(bpc.ProtectedResource.ID(), ac.Drives(), scope), + creds.AzureTenantID, + ssmb, + su, + errs) + if err != nil { + el.AddRecoverable(ctx, err) + continue + } } collections = append(collections, dbcs...) @@ -70,7 +102,7 @@ func ProduceBackupCollections( collections, creds.AzureTenantID, bpc.ProtectedResource.ID(), - path.UnknownService, // path.GroupsService + path.GroupsService, categories, su, errs) diff --git a/src/internal/m365/service/sharepoint/backup.go b/src/internal/m365/service/sharepoint/backup.go index c4604e609..ce7789b64 100644 --- a/src/internal/m365/service/sharepoint/backup.go +++ b/src/internal/m365/service/sharepoint/backup.go @@ -7,6 +7,7 @@ import ( "github.com/alcionai/corso/src/internal/common/prefixmatcher" "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/m365/collection/drive" "github.com/alcionai/corso/src/internal/m365/collection/site" "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/support" @@ -79,10 +80,9 @@ func ProduceBackupCollections( spcs, canUsePreviousBackup, err = site.CollectLibraries( ctx, bpc, - ac.Drives(), + drive.NewLibraryBackupHandler(ac.Drives(), scope), creds.AzureTenantID, ssmb, - scope, su, errs) if err != nil { diff --git a/src/internal/tester/tconfig/config.go b/src/internal/tester/tconfig/config.go index a900f26f2..d92918dbb 100644 --- a/src/internal/tester/tconfig/config.go +++ b/src/internal/tester/tconfig/config.go @@ -30,6 +30,7 @@ const ( TestCfgGroupID = "m365groupid" TestCfgUserID = "m365userid" TestCfgSecondaryUserID = "secondarym365userid" + TestCfgSecondaryGroupID = "secondarym365groupid" TestCfgTertiaryUserID = "tertiarym365userid" TestCfgLoadTestUserID = "loadtestm365userid" TestCfgLoadTestOrgUsers = "loadtestm365orgusers" diff --git a/src/pkg/path/builder.go b/src/pkg/path/builder.go index 1cf502079..ec1f71ee3 100644 --- a/src/pkg/path/builder.go +++ b/src/pkg/path/builder.go @@ -241,6 +241,8 @@ func (pb Builder) ToStreamStorePath( metadataService = OneDriveMetadataService case SharePointService: metadataService = SharePointMetadataService + case GroupsService: + metadataService = GroupsMetadataService } return &dataLayerResourcePath{ @@ -282,6 +284,8 @@ func (pb Builder) ToServiceCategoryMetadataPath( metadataService = OneDriveMetadataService case SharePointService: metadataService = SharePointMetadataService + case GroupsService: + metadataService = GroupsMetadataService } return &dataLayerResourcePath{ diff --git a/src/pkg/path/category_type.go b/src/pkg/path/category_type.go index 40f511692..918435b70 100644 --- a/src/pkg/path/category_type.go +++ b/src/pkg/path/category_type.go @@ -78,9 +78,11 @@ var serviceCategories = map[ServiceType]map[CategoryType]struct{}{ }, GroupsService: { ChannelMessagesCategory: {}, + LibrariesCategory: {}, }, TeamsService: { ChannelMessagesCategory: {}, + LibrariesCategory: {}, }, } diff --git a/src/pkg/path/elements.go b/src/pkg/path/elements.go index 838cea114..e2f3f493e 100644 --- a/src/pkg/path/elements.go +++ b/src/pkg/path/elements.go @@ -13,10 +13,12 @@ var piiSafePathElems = pii.MapWithPlurals( UnknownService.String(), ExchangeService.String(), OneDriveService.String(), + GroupsService.String(), SharePointService.String(), ExchangeMetadataService.String(), OneDriveMetadataService.String(), SharePointMetadataService.String(), + GroupsMetadataService.String(), // categories UnknownCategory.String(), diff --git a/src/pkg/path/resource_path_test.go b/src/pkg/path/resource_path_test.go index e49f797e2..492dcb970 100644 --- a/src/pkg/path/resource_path_test.go +++ b/src/pkg/path/resource_path_test.go @@ -287,47 +287,54 @@ func (suite *DataLayerResourcePath) TestToServiceCategoryMetadataPath() { check: assert.Error, }, { - name: "Passes", + name: "Exchange Contacts", service: path.ExchangeService, category: path.ContactsCategory, expectedService: path.ExchangeMetadataService, check: assert.NoError, }, { - name: "Passes", + name: "Exchange Events", service: path.ExchangeService, category: path.EventsCategory, expectedService: path.ExchangeMetadataService, check: assert.NoError, }, { - name: "Passes", + name: "OneDrive Files", service: path.OneDriveService, category: path.FilesCategory, expectedService: path.OneDriveMetadataService, check: assert.NoError, }, { - name: "Passes", + name: "SharePoint Libraries", service: path.SharePointService, category: path.LibrariesCategory, expectedService: path.SharePointMetadataService, check: assert.NoError, }, { - name: "Passes", + name: "SharePoint Lists", service: path.SharePointService, category: path.ListsCategory, expectedService: path.SharePointMetadataService, check: assert.NoError, }, { - name: "Passes", + name: "SharePoint Pages", service: path.SharePointService, category: path.PagesCategory, expectedService: path.SharePointMetadataService, check: assert.NoError, }, + { + name: "Groups Libraries", + service: path.GroupsService, + category: path.LibrariesCategory, + expectedService: path.GroupsMetadataService, + check: assert.NoError, + }, } for _, test := range table { diff --git a/src/pkg/selectors/groups.go b/src/pkg/selectors/groups.go index 30d93698c..50aa3db74 100644 --- a/src/pkg/selectors/groups.go +++ b/src/pkg/selectors/groups.go @@ -205,8 +205,8 @@ func (s *groups) Scopes() []GroupsScope { // ------------------- // Scope Factories -// Produces one or more Groups site scopes. -// One scope is created per site entry. +// Produces one or more Groups scopes. +// One scope is created per group entry. // If any slice contains selectors.Any, that slice is reduced to [selectors.Any] // If any slice contains selectors.None, that slice is reduced to [selectors.None] // If any slice is empty, it defaults to [selectors.None] @@ -215,6 +215,7 @@ func (s *groups) AllData() []GroupsScope { scopes = append( scopes, + makeScope[GroupsScope](GroupsLibraryFolder, Any()), makeScope[GroupsScope](GroupsChannel, Any())) return scopes @@ -255,6 +256,56 @@ func (s *sharePoint) ChannelMessages(channels, messages []string, opts ...option return scopes } +// Library produces one or more Group library scopes, where the library +// matches upon a given drive by ID or Name. In order to ensure library selection +// this should always be embedded within the Filter() set; include(Library()) will +// select all items in the library without further filtering. +// If any slice contains selectors.Any, that slice is reduced to [selectors.Any] +// If any slice contains selectors.None, that slice is reduced to [selectors.None] +// If any slice is empty, it defaults to [selectors.None] +func (s *groups) Library(library string) []GroupsScope { + return []GroupsScope{ + makeInfoScope[GroupsScope]( + GroupsLibraryItem, + GroupsInfoSiteLibraryDrive, + []string{library}, + filters.Equal), + } +} + +// LibraryFolders produces one or more SharePoint libraryFolder scopes. +// If any slice contains selectors.Any, that slice is reduced to [selectors.Any] +// If any slice contains selectors.None, that slice is reduced to [selectors.None] +// If any slice is empty, it defaults to [selectors.None] +func (s *groups) LibraryFolders(libraryFolders []string, opts ...option) []GroupsScope { + var ( + scopes = []GroupsScope{} + os = append([]option{pathComparator()}, opts...) + ) + + scopes = append( + scopes, + makeScope[GroupsScope](GroupsLibraryFolder, libraryFolders, os...)) + + return scopes +} + +// LibraryItems produces one or more Groups library item scopes. +// If any slice contains selectors.Any, that slice is reduced to [selectors.Any] +// If any slice contains selectors.None, that slice is reduced to [selectors.None] +// If any slice is empty, it defaults to [selectors.None] +// options are only applied to the library scopes. +func (s *groups) LibraryItems(libraries, items []string, opts ...option) []GroupsScope { + scopes := []GroupsScope{} + + scopes = append( + scopes, + makeScope[GroupsScope](GroupsLibraryItem, items, defaultItemOptions(s.Cfg)...). + set(GroupsLibraryFolder, libraries, opts...)) + + return scopes +} + // ------------------- // ItemInfo Factories @@ -278,6 +329,8 @@ const ( GroupsGroup groupsCategory = "GroupsGroup" GroupsChannel groupsCategory = "GroupsChannel" GroupsChannelMessage groupsCategory = "GroupsChannelMessage" + GroupsLibraryFolder groupsCategory = "GroupsLibraryFolder" + GroupsLibraryItem groupsCategory = "GroupsLibraryItem" // details.itemInfo comparables @@ -292,6 +345,10 @@ var groupsLeafProperties = map[categorizer]leafProperty{ pathKeys: []categorizer{GroupsChannel, GroupsChannelMessage}, pathType: path.ChannelMessagesCategory, }, + GroupsLibraryItem: { + pathKeys: []categorizer{GroupsLibraryFolder, GroupsLibraryItem}, + pathType: path.LibrariesCategory, + }, GroupsGroup: { // the root category must be represented, even though it isn't a leaf pathKeys: []categorizer{GroupsGroup}, pathType: path.UnknownCategory, @@ -311,8 +368,10 @@ func (c groupsCategory) leafCat() categorizer { switch c { // TODO: if channels ever contain more than one type of item, // we'll need to fix this up. - case GroupsChannel, GroupsChannelMessage, GroupsInfoSiteLibraryDrive: + case GroupsChannel, GroupsChannelMessage: return GroupsChannelMessage + case GroupsLibraryFolder, GroupsLibraryItem, GroupsInfoSiteLibraryDrive: + return GroupsLibraryItem } return c @@ -342,7 +401,7 @@ func (c groupsCategory) isLeaf() bool { // pathValues transforms the two paths to maps of identified properties. // // Example: -// [tenantID, service, siteID, category, folder, itemID] +// [tenantID, service, groupID, site, siteID, category, folder, itemID] // => {spFolder: folder, spItemID: itemID} func (c groupsCategory) pathValues( repo path.Path, @@ -357,11 +416,14 @@ func (c groupsCategory) pathValues( switch c { case GroupsChannel, GroupsChannelMessage: + folderCat, itemCat = GroupsChannel, GroupsChannelMessage + rFld = ent.Groups.ParentPath + case GroupsLibraryFolder, GroupsLibraryItem: if ent.Groups == nil { return nil, clues.New("no Groups ItemInfo in details") } - folderCat, itemCat = GroupsChannel, GroupsChannelMessage + folderCat, itemCat = GroupsLibraryFolder, GroupsLibraryItem rFld = ent.Groups.ParentPath default: @@ -459,7 +521,7 @@ func (s GroupsScope) set(cat groupsCategory, v []string, opts ...option) GroupsS os := []option{} switch cat { - case GroupsChannel: + case GroupsChannel, GroupsLibraryFolder: os = append(os, pathComparator()) } @@ -472,8 +534,12 @@ func (s GroupsScope) setDefaults() { case GroupsGroup: s[GroupsChannel.String()] = passAny s[GroupsChannelMessage.String()] = passAny + s[GroupsLibraryFolder.String()] = passAny + s[GroupsLibraryItem.String()] = passAny case GroupsChannel: s[GroupsChannelMessage.String()] = passAny + case GroupsLibraryFolder: + s[GroupsLibraryItem.String()] = passAny } } @@ -494,6 +560,7 @@ func (s groups) Reduce( s.Selector, map[path.CategoryType]groupsCategory{ path.ChannelMessagesCategory: GroupsChannelMessage, + path.LibrariesCategory: GroupsLibraryItem, }, errs) } diff --git a/src/pkg/selectors/selectors.go b/src/pkg/selectors/selectors.go index 3a18c2bd0..860fa5572 100644 --- a/src/pkg/selectors/selectors.go +++ b/src/pkg/selectors/selectors.go @@ -32,6 +32,7 @@ var serviceToPathType = map[service]path.ServiceType{ ServiceExchange: path.ExchangeService, ServiceOneDrive: path.OneDriveService, ServiceSharePoint: path.SharePointService, + ServiceGroups: path.GroupsService, } var ( diff --git a/src/pkg/services/m365/api/groups.go b/src/pkg/services/m365/api/groups.go index 3d036e610..7a3a134f7 100644 --- a/src/pkg/services/m365/api/groups.go +++ b/src/pkg/services/m365/api/groups.go @@ -7,6 +7,7 @@ import ( msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/str" "github.com/alcionai/corso/src/internal/common/tform" "github.com/alcionai/corso/src/internal/m365/graph" @@ -27,7 +28,7 @@ func (c Client) Groups() Groups { return Groups{c} } -// On creation of each Teams team a corrsponding group gets created. +// On creation of each Teams team a corresponding group gets created. // The group acts as the protected resource, and all teams data like events, // drive and mail messages are owned by that group. @@ -115,6 +116,30 @@ func (c Groups) GetByID( return resp, graph.Stack(ctx, err).OrNil() } +// GetRootSite retrieves the root site for the group. +func (c Groups) GetRootSite( + ctx context.Context, + identifier string, +) (models.Siteable, error) { + service, err := c.Service() + if err != nil { + return nil, err + } + + resp, err := service. + Client(). + Groups(). + ByGroupId(identifier). + Sites(). + BySiteId("root"). + Get(ctx, nil) + if err != nil { + return nil, clues.Wrap(err, "getting root site for group") + } + + return resp, graph.Stack(ctx, err).OrNil() +} + // --------------------------------------------------------------------------- // helpers // --------------------------------------------------------------------------- @@ -167,3 +192,14 @@ func IsTeam(ctx context.Context, mg models.Groupable) bool { return false } + +// GetIDAndName looks up the group matching the given ID, and returns +// its canonical ID and the name. +func (c Groups) GetIDAndName(ctx context.Context, groupID string) (string, string, error) { + s, err := c.GetByID(ctx, groupID) + if err != nil { + return "", "", err + } + + return ptr.Val(s.GetId()), ptr.Val(s.GetDisplayName()), nil +} diff --git a/src/pkg/services/m365/api/groups_test.go b/src/pkg/services/m365/api/groups_test.go index ae435168a..6a0434196 100644 --- a/src/pkg/services/m365/api/groups_test.go +++ b/src/pkg/services/m365/api/groups_test.go @@ -107,7 +107,7 @@ func (suite *GroupsIntgSuite) TestGetAll() { Groups(). GetAll(ctx, fault.New(true)) require.NoError(t, err) - require.NotZero(t, len(groups), "must find at least one group") + require.NotZero(t, len(groups), "must have at least one group") } func (suite *GroupsIntgSuite) TestGroups_GetByID() { @@ -122,34 +122,19 @@ func (suite *GroupsIntgSuite) TestGroups_GetByID() { expectErr func(*testing.T, error) }{ { - name: "3 part id", + name: "valid id", id: groupID, expectErr: func(t *testing.T, err error) { assert.NoError(t, err, clues.ToCore(err)) }, }, { - name: "malformed id", + name: "invalid id", id: uuid.NewString(), expectErr: func(t *testing.T, err error) { assert.Error(t, err, clues.ToCore(err)) }, }, - { - name: "random id", - id: uuid.NewString() + "," + uuid.NewString(), - expectErr: func(t *testing.T, err error) { - assert.Error(t, err, clues.ToCore(err)) - }, - }, - - { - name: "malformed url", - id: "barunihlda", - expectErr: func(t *testing.T, err error) { - assert.Error(t, err, clues.ToCore(err)) - }, - }, } for _, test := range table { suite.Run(test.name, func() { From 4ace4bee761ccfcee1f2feb759d40dece1b52811 Mon Sep 17 00:00:00 2001 From: Abhishek Pandey Date: Tue, 22 Aug 2023 18:25:08 +0530 Subject: [PATCH 19/32] Remove duplicate mocks from kopia wrapper tests (#4083) No logic changes. Only removing duplicate test code. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/wrapper_test.go | 83 ++++++++---------------------- 1 file changed, 21 insertions(+), 62 deletions(-) diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index a21b954a9..8c511a6f0 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -25,6 +25,7 @@ import ( "github.com/alcionai/corso/src/internal/data" dataMock "github.com/alcionai/corso/src/internal/data/mock" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" + m365Mock "github.com/alcionai/corso/src/internal/m365/mock" exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup/details" @@ -1128,10 +1129,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { streams = append(streams, ms) } - mc := &mockBackupCollection{ - path: storePath, - loc: locPath, - streams: streams, + mc := &m365Mock.BackupCollection{ + Path: storePath, + Loc: locPath, + Streams: streams, } return []data.BackupCollection{mc} @@ -1155,11 +1156,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() { ItemInfo: details.ItemInfo{OneDrive: &info}, } - mc := &mockBackupCollection{ - path: storePath, - loc: locPath, - streams: []data.Item{ms}, - state: data.NotMovedState, + mc := &m365Mock.BackupCollection{ + Path: storePath, + Loc: locPath, + Streams: []data.Item{ms}, + CState: data.NotMovedState, } return []data.BackupCollection{mc} @@ -1293,48 +1294,6 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { testForFiles(t, ctx, expected, result) } -// TODO(pandeyabs): Switch to m365/mock/BackupCollection. -type mockBackupCollection struct { - path path.Path - loc *path.Builder - streams []data.Item - state data.CollectionState -} - -func (c *mockBackupCollection) Items(context.Context, *fault.Bus) <-chan data.Item { - res := make(chan data.Item) - - go func() { - defer close(res) - - for _, s := range c.streams { - res <- s - } - }() - - return res -} - -func (c mockBackupCollection) FullPath() path.Path { - return c.path -} - -func (c mockBackupCollection) PreviousPath() path.Path { - return c.path -} - -func (c mockBackupCollection) LocationPath() *path.Builder { - return c.loc -} - -func (c mockBackupCollection) State() data.CollectionState { - return c.state -} - -func (c mockBackupCollection) DoNotMergeItems() bool { - return false -} - func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { t := suite.T() @@ -1343,10 +1302,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory) collections := []data.BackupCollection{ - &mockBackupCollection{ - path: suite.storePath1, - loc: loc1, - streams: []data.Item{ + &m365Mock.BackupCollection{ + Path: suite.storePath1, + Loc: loc1, + Streams: []data.Item{ &dataMock.Item{ ItemID: testFileName, Reader: io.NopCloser(bytes.NewReader(testFileData)), @@ -1359,10 +1318,10 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { }, }, }, - &mockBackupCollection{ - path: suite.storePath2, - loc: loc2, - streams: []data.Item{ + &m365Mock.BackupCollection{ + Path: suite.storePath2, + Loc: loc2, + Streams: []data.Item{ &dataMock.Item{ ItemID: testFileName3, Reader: io.NopCloser(bytes.NewReader(testFileData3)), @@ -1603,11 +1562,11 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { for _, parent := range []path.Path{suite.testPath1, suite.testPath2} { loc := path.Builder{}.Append(parent.Folders()...) - collection := &mockBackupCollection{path: parent, loc: loc} + collection := &m365Mock.BackupCollection{Path: parent, Loc: loc} for _, item := range suite.files[parent.String()] { - collection.streams = append( - collection.streams, + collection.Streams = append( + collection.Streams, &dataMock.Item{ ItemID: item.itemPath.Item(), Reader: io.NopCloser(bytes.NewReader(item.data)), From 9255013d6f2c52944447ddb70607ab6a9b56cab7 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Tue, 22 Aug 2023 08:29:38 -0700 Subject: [PATCH 20/32] Refactor backup cleanup test code slightly (#4080) Switch to using functions that always return a new instance of the struct in question. Upcoming tests were having issues with state carrying over between individual tests. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * #3217 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/cleanup_backups_test.go | 298 +++++++++++---------- 1 file changed, 162 insertions(+), 136 deletions(-) diff --git a/src/internal/kopia/cleanup_backups_test.go b/src/internal/kopia/cleanup_backups_test.go index 895d9226e..ecd36848d 100644 --- a/src/internal/kopia/cleanup_backups_test.go +++ b/src/internal/kopia/cleanup_backups_test.go @@ -137,89 +137,113 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { backupTag, _ := makeTagKV(TagBackupCategory) // Current backup and snapshots. - bupCurrent := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("current-bup-id"), - ModelStoreID: manifest.ID("current-bup-msid"), - }, - SnapshotID: "current-snap-msid", - StreamStoreID: "current-deets-msid", + bupCurrent := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("current-bup-id"), + ModelStoreID: manifest.ID("current-bup-msid"), + }, + SnapshotID: "current-snap-msid", + StreamStoreID: "current-deets-msid", + } } - snapCurrent := &manifest.EntryMetadata{ - ID: "current-snap-msid", - Labels: map[string]string{ - backupTag: "0", - }, + snapCurrent := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } } - deetsCurrent := &manifest.EntryMetadata{ - ID: "current-deets-msid", + deetsCurrent := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "current-deets-msid", + } } // Legacy backup with details in separate model. - bupLegacy := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("legacy-bup-id"), - ModelStoreID: manifest.ID("legacy-bup-msid"), - }, - SnapshotID: "legacy-snap-msid", - DetailsID: "legacy-deets-msid", + bupLegacy := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("legacy-bup-id"), + ModelStoreID: manifest.ID("legacy-bup-msid"), + }, + SnapshotID: "legacy-snap-msid", + DetailsID: "legacy-deets-msid", + } } - snapLegacy := &manifest.EntryMetadata{ - ID: "legacy-snap-msid", - Labels: map[string]string{ - backupTag: "0", - }, + snapLegacy := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "legacy-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } } - deetsLegacy := &model.BaseModel{ - ID: "legacy-deets-id", - ModelStoreID: "legacy-deets-msid", + deetsLegacy := func() *model.BaseModel { + return &model.BaseModel{ + ID: "legacy-deets-id", + ModelStoreID: "legacy-deets-msid", + } } // Incomplete backup missing data snapshot. - bupNoSnapshot := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("ns-bup-id"), - ModelStoreID: manifest.ID("ns-bup-id-msid"), - }, - StreamStoreID: "ns-deets-msid", + bupNoSnapshot := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("ns-bup-id"), + ModelStoreID: manifest.ID("ns-bup-id-msid"), + }, + StreamStoreID: "ns-deets-msid", + } } - deetsNoSnapshot := &manifest.EntryMetadata{ - ID: "ns-deets-msid", + deetsNoSnapshot := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "ns-deets-msid", + } } // Legacy incomplete backup missing data snapshot. - bupLegacyNoSnapshot := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("ns-legacy-bup-id"), - ModelStoreID: manifest.ID("ns-legacy-bup-id-msid"), - }, - DetailsID: "ns-legacy-deets-msid", + bupLegacyNoSnapshot := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("ns-legacy-bup-id"), + ModelStoreID: manifest.ID("ns-legacy-bup-id-msid"), + }, + DetailsID: "ns-legacy-deets-msid", + } } - deetsLegacyNoSnapshot := &model.BaseModel{ - ID: "ns-legacy-deets-id", - ModelStoreID: "ns-legacy-deets-msid", + deetsLegacyNoSnapshot := func() *model.BaseModel { + return &model.BaseModel{ + ID: "ns-legacy-deets-id", + ModelStoreID: "ns-legacy-deets-msid", + } } // Incomplete backup missing details. - bupNoDetails := &backup.Backup{ - BaseModel: model.BaseModel{ - ID: model.StableID("nssid-bup-id"), - ModelStoreID: manifest.ID("nssid-bup-msid"), - }, - SnapshotID: "nssid-snap-msid", + bupNoDetails := func() *backup.Backup { + return &backup.Backup{ + BaseModel: model.BaseModel{ + ID: model.StableID("nssid-bup-id"), + ModelStoreID: manifest.ID("nssid-bup-msid"), + }, + SnapshotID: "nssid-snap-msid", + } } - snapNoDetails := &manifest.EntryMetadata{ - ID: "nssid-snap-msid", - Labels: map[string]string{ - backupTag: "0", - }, + snapNoDetails := func() *manifest.EntryMetadata { + return &manifest.EntryMetadata{ + ID: "nssid-snap-msid", + Labels: map[string]string{ + backupTag: "0", + }, + } } // Get some stable time so that we can do everything relative to this in the @@ -268,16 +292,16 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "OnlyCompleteBackups Noops", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, + snapCurrent(), + deetsCurrent(), + snapLegacy(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, }, time: baseTime, expectErr: assert.NoError, @@ -285,24 +309,24 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingFieldsInBackup CausesCleanup", snapshots: []*manifest.EntryMetadata{ - snapNoDetails, - deetsNoSnapshot, + snapNoDetails(), + deetsNoSnapshot(), }, detailsModels: []*model.BaseModel{ - deetsLegacyNoSnapshot, + deetsLegacyNoSnapshot(), }, backups: []backupRes{ - {bup: bupNoSnapshot}, - {bup: bupLegacyNoSnapshot}, - {bup: bupNoDetails}, + {bup: bupNoSnapshot()}, + {bup: bupLegacyNoSnapshot()}, + {bup: bupNoDetails()}, }, expectDeleteIDs: []manifest.ID{ - manifest.ID(bupNoSnapshot.ModelStoreID), - manifest.ID(bupLegacyNoSnapshot.ModelStoreID), - manifest.ID(bupNoDetails.ModelStoreID), - manifest.ID(deetsLegacyNoSnapshot.ModelStoreID), - snapNoDetails.ID, - deetsNoSnapshot.ID, + manifest.ID(bupNoSnapshot().ModelStoreID), + manifest.ID(bupLegacyNoSnapshot().ModelStoreID), + manifest.ID(bupNoDetails().ModelStoreID), + manifest.ID(deetsLegacyNoSnapshot().ModelStoreID), + snapNoDetails().ID, + deetsNoSnapshot().ID, }, time: baseTime, expectErr: assert.NoError, @@ -310,20 +334,20 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingSnapshot CausesCleanup", snapshots: []*manifest.EntryMetadata{ - deetsCurrent, + deetsCurrent(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, }, expectDeleteIDs: []manifest.ID{ - manifest.ID(bupCurrent.ModelStoreID), - deetsCurrent.ID, - manifest.ID(bupLegacy.ModelStoreID), - manifest.ID(deetsLegacy.ModelStoreID), + manifest.ID(bupCurrent().ModelStoreID), + deetsCurrent().ID, + manifest.ID(bupLegacy().ModelStoreID), + manifest.ID(deetsLegacy().ModelStoreID), }, time: baseTime, expectErr: assert.NoError, @@ -331,38 +355,39 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingDetails CausesCleanup", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - snapLegacy, + snapCurrent(), + snapLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, }, expectDeleteIDs: []manifest.ID{ - manifest.ID(bupCurrent.ModelStoreID), - manifest.ID(bupLegacy.ModelStoreID), - snapCurrent.ID, - snapLegacy.ID, + manifest.ID(bupCurrent().ModelStoreID), + manifest.ID(bupLegacy().ModelStoreID), + snapCurrent().ID, + snapLegacy().ID, }, time: baseTime, expectErr: assert.NoError, }, + // Tests with various errors from Storer. { name: "SnapshotsListError Fails", snapshotFetchErr: assert.AnError, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, }, expectErr: assert.Error, }, { name: "LegacyDetailsListError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, + snapCurrent(), }, detailsModelListErr: assert.AnError, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, }, time: baseTime, expectErr: assert.Error, @@ -370,8 +395,8 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupIDsListError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, + snapCurrent(), + deetsCurrent(), }, backupListErr: assert.AnError, time: baseTime, @@ -380,22 +405,22 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupModelGetErrorNotFound CausesCleanup", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, - snapNoDetails, + snapCurrent(), + deetsCurrent(), + snapLegacy(), + snapNoDetails(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, { - bup: bupLegacy, + bup: bupLegacy(), err: data.ErrNotFound, }, { - bup: bupNoDetails, + bup: bupNoDetails(), err: data.ErrNotFound, }, }, @@ -404,11 +429,11 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { // delete operation should ignore missing models though so there's no // issue. expectDeleteIDs: []manifest.ID{ - snapLegacy.ID, - manifest.ID(deetsLegacy.ModelStoreID), - manifest.ID(bupLegacy.ModelStoreID), - snapNoDetails.ID, - manifest.ID(bupNoDetails.ModelStoreID), + snapLegacy().ID, + manifest.ID(deetsLegacy().ModelStoreID), + manifest.ID(bupLegacy().ModelStoreID), + snapNoDetails().ID, + manifest.ID(bupNoDetails().ModelStoreID), }, time: baseTime, expectErr: assert.NoError, @@ -416,21 +441,21 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupModelGetError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, - snapNoDetails, + snapCurrent(), + deetsCurrent(), + snapLegacy(), + snapNoDetails(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, + {bup: bupCurrent()}, { - bup: bupLegacy, + bup: bupLegacy(), err: assert.AnError, }, - {bup: bupNoDetails}, + {bup: bupNoDetails()}, }, time: baseTime, expectErr: assert.Error, @@ -438,34 +463,35 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "DeleteError Fails", snapshots: []*manifest.EntryMetadata{ - snapCurrent, - deetsCurrent, - snapLegacy, - snapNoDetails, + snapCurrent(), + deetsCurrent(), + snapLegacy(), + snapNoDetails(), }, detailsModels: []*model.BaseModel{ - deetsLegacy, + deetsLegacy(), }, backups: []backupRes{ - {bup: bupCurrent}, - {bup: bupLegacy}, - {bup: bupNoDetails}, + {bup: bupCurrent()}, + {bup: bupLegacy()}, + {bup: bupNoDetails()}, }, expectDeleteIDs: []manifest.ID{ - snapNoDetails.ID, - manifest.ID(bupNoDetails.ModelStoreID), + snapNoDetails().ID, + manifest.ID(bupNoDetails().ModelStoreID), }, deleteErr: assert.AnError, time: baseTime, expectErr: assert.Error, }, + // Tests dealing with buffer times. { name: "MissingSnapshot BarelyTooYoungForCleanup Noops", snapshots: []*manifest.EntryMetadata{ - manifestWithTime(baseTime, deetsCurrent), + manifestWithTime(baseTime, deetsCurrent()), }, backups: []backupRes{ - {bup: backupWithTime(baseTime, bupCurrent)}, + {bup: backupWithTime(baseTime, bupCurrent())}, }, time: baseTime.Add(24 * time.Hour), buffer: 24 * time.Hour, @@ -474,14 +500,14 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "MissingSnapshot BarelyOldEnough CausesCleanup", snapshots: []*manifest.EntryMetadata{ - manifestWithTime(baseTime, deetsCurrent), + manifestWithTime(baseTime, deetsCurrent()), }, backups: []backupRes{ - {bup: backupWithTime(baseTime, bupCurrent)}, + {bup: backupWithTime(baseTime, bupCurrent())}, }, expectDeleteIDs: []manifest.ID{ - deetsCurrent.ID, - manifest.ID(bupCurrent.ModelStoreID), + deetsCurrent().ID, + manifest.ID(bupCurrent().ModelStoreID), }, time: baseTime.Add((24 * time.Hour) + time.Second), buffer: 24 * time.Hour, @@ -490,12 +516,12 @@ func (suite *BackupCleanupUnitSuite) TestCleanupOrphanedData() { { name: "BackupGetErrorNotFound TooYoung Noops", snapshots: []*manifest.EntryMetadata{ - manifestWithTime(baseTime, snapCurrent), - manifestWithTime(baseTime, deetsCurrent), + manifestWithTime(baseTime, snapCurrent()), + manifestWithTime(baseTime, deetsCurrent()), }, backups: []backupRes{ { - bup: backupWithTime(baseTime, bupCurrent), + bup: backupWithTime(baseTime, bupCurrent()), err: data.ErrNotFound, }, }, From 9f9ce34add7075ee2a218f05753d57f76eeaa755 Mon Sep 17 00:00:00 2001 From: neha_gupta Date: Tue, 22 Aug 2023 22:03:59 +0530 Subject: [PATCH 21/32] add handlers for channels (#4050) add Handlers interface for Channels. #### Does this PR need a docs update or release note? - [ ] :no_entry: No #### Type of change - [ ] :sunflower: Feature #### Issue(s) * # #### Test Plan --- .../m365/collection/groups/handler.go | 18 +++++++++ src/pkg/services/m365/api/channels.go | 1 + src/pkg/services/m365/api/channels_pager.go | 39 +++++++++++++++++++ 3 files changed, 58 insertions(+) create mode 100644 src/internal/m365/collection/groups/handler.go create mode 100644 src/pkg/services/m365/api/channels.go create mode 100644 src/pkg/services/m365/api/channels_pager.go diff --git a/src/internal/m365/collection/groups/handler.go b/src/internal/m365/collection/groups/handler.go new file mode 100644 index 000000000..d4a382149 --- /dev/null +++ b/src/internal/m365/collection/groups/handler.go @@ -0,0 +1,18 @@ +package groups + +import ( + "context" + + "github.com/microsoft/kiota-abstractions-go/serialization" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +type BackupMessagesHandler interface { + GetMessageByID(ctx context.Context, teamID, channelID, itemID string) (models.ChatMessageable, error) + NewMessagePager(teamID, channelID string) api.MessageItemDeltaEnumerator + GetChannelByID(ctx context.Context, teamID, channelID string) (models.Channelable, error) + NewChannelPager(teamID, channelID string) api.ChannelItemDeltaEnumerator + GetReplyByID(ctx context.Context, teamID, channelID, messageID string) (serialization.Parsable, error) +} diff --git a/src/pkg/services/m365/api/channels.go b/src/pkg/services/m365/api/channels.go new file mode 100644 index 000000000..778f64ec1 --- /dev/null +++ b/src/pkg/services/m365/api/channels.go @@ -0,0 +1 @@ +package api diff --git a/src/pkg/services/m365/api/channels_pager.go b/src/pkg/services/m365/api/channels_pager.go new file mode 100644 index 000000000..599c09649 --- /dev/null +++ b/src/pkg/services/m365/api/channels_pager.go @@ -0,0 +1,39 @@ +package api + +import ( + "context" +) + +// --------------------------------------------------------------------------- +// item pager +// --------------------------------------------------------------------------- + +type MessageItemDeltaEnumerator interface { + GetPage(context.Context) (DeltaPageLinker, error) +} + +// TODO: implement +// var _ MessageItemDeltaEnumerator = &messagePageCtrl{} + +// type messagePageCtrl struct { +// gs graph.Servicer +// builder *teams.ItemChannelsItemMessagesRequestBuilder +// options *teams.ItemChannelsItemMessagesRequestBuilderGetRequestConfiguration +// } + +// --------------------------------------------------------------------------- +// channel pager +// --------------------------------------------------------------------------- + +type ChannelItemDeltaEnumerator interface { + GetPage(context.Context) (DeltaPageLinker, error) +} + +// TODO: implement +// var _ ChannelsItemDeltaEnumerator = &channelsPageCtrl{} + +// type channelsPageCtrl struct { +// gs graph.Servicer +// builder *teams.ItemChannelsChannelItemRequestBuilder +// options *teams.ItemChannelsChannelItemRequestBuilderGetRequestConfiguration +// } From 06862c3b8c4eb80d2a3e92da2f58d5141b98dd51 Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 22 Aug 2023 11:25:06 -0600 Subject: [PATCH 22/32] add boilerplate groups backup collection (#4082) Adds the boilerplate for groups backup collection processing. Not necessarily functional at this time, due to missing dependencies and consts that aren't yet in the branch. Thus the lack of tests. It's just good enough to keep progress rolling forward. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3989 #### Test Plan --- .../m365/collection/exchange/backup.go | 2 - .../m365/collection/exchange/collection.go | 3 +- src/internal/m365/collection/groups/backup.go | 318 ++++++++++++++++++ .../m365/collection/groups/collection.go | 180 ++++++++++ .../m365/collection/groups/handler.go | 18 - .../m365/collection/groups/handlers.go | 33 ++ src/pkg/services/m365/api/channels_pager.go | 18 +- src/pkg/services/m365/api/item_pager.go | 19 +- 8 files changed, 561 insertions(+), 30 deletions(-) create mode 100644 src/internal/m365/collection/groups/backup.go create mode 100644 src/internal/m365/collection/groups/collection.go delete mode 100644 src/internal/m365/collection/groups/handler.go create mode 100644 src/internal/m365/collection/groups/handlers.go diff --git a/src/internal/m365/collection/exchange/backup.go b/src/internal/m365/collection/exchange/backup.go index 359701629..f5ebd1783 100644 --- a/src/internal/m365/collection/exchange/backup.go +++ b/src/internal/m365/collection/exchange/backup.go @@ -75,8 +75,6 @@ func CreateCollections( return nil, clues.Wrap(err, "filling collections") } - foldersComplete <- struct{}{} - for _, coll := range collections { allCollections = append(allCollections, coll) } diff --git a/src/internal/m365/collection/exchange/collection.go b/src/internal/m365/collection/exchange/collection.go index 8e0c0f897..ba421763c 100644 --- a/src/internal/m365/collection/exchange/collection.go +++ b/src/internal/m365/collection/exchange/collection.go @@ -39,8 +39,7 @@ const ( // Collection implements the interface from data.Collection // Structure holds data for an Exchange application for a single user type Collection struct { - // M365 user - user string // M365 user + user string data chan data.Item // added is a list of existing item IDs that were added to a container diff --git a/src/internal/m365/collection/groups/backup.go b/src/internal/m365/collection/groups/backup.go new file mode 100644 index 000000000..9b31126a1 --- /dev/null +++ b/src/internal/m365/collection/groups/backup.go @@ -0,0 +1,318 @@ +package groups + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/internal/m365/support" + "github.com/alcionai/corso/src/internal/observe" + "github.com/alcionai/corso/src/internal/operations/inject" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +// TODO: incremental support +// multiple lines in this file are commented out so that +// we can focus on v0 backups and re-integrate them later +// for v1 incrementals. +// since these lines represent otherwise standard boilerplate, +// it's simpler to comment them for tracking than to delete +// and re-discover them later. + +func CreateCollections( + ctx context.Context, + bpc inject.BackupProducerConfig, + handler BackupHandler, + tenantID string, + scope selectors.GroupsScope, + // dps DeltaPaths, + su support.StatusUpdater, + errs *fault.Bus, +) ([]data.BackupCollection, error) { + ctx = clues.Add(ctx, "category", scope.Category().PathType()) + + var ( + allCollections = make([]data.BackupCollection, 0) + category = scope.Category().PathType() + qp = graph.QueryParams{ + Category: category, + ProtectedResource: bpc.ProtectedResource, + TenantID: tenantID, + } + ) + + catProgress := observe.MessageWithCompletion( + ctx, + observe.Bulletf("%s", qp.Category)) + defer close(catProgress) + + // TODO(keepers): probably shouldn't call out channels here specifically. + // This should be a generic container handler. But we don't need + // to worry about that until if/when we use this code to get email + // conversations as well. + // Also, this should be produced by the Handler. + // chanPager := handler.NewChannelsPager(qp.ProtectedResource.ID()) + // TODO(neha): enumerate channels + channels := []graph.Displayable{} + + collections, err := populateCollections( + ctx, + qp, + handler, + su, + channels, + scope, + // dps, + bpc.Options, + errs) + if err != nil { + return nil, clues.Wrap(err, "filling collections") + } + + for _, coll := range collections { + allCollections = append(allCollections, coll) + } + + return allCollections, nil +} + +func populateCollections( + ctx context.Context, + qp graph.QueryParams, + bh BackupHandler, + statusUpdater support.StatusUpdater, + channels []graph.Displayable, + scope selectors.GroupsScope, + // dps DeltaPaths, + ctrlOpts control.Options, + errs *fault.Bus, +) (map[string]data.BackupCollection, error) { + // channel ID -> BackupCollection. + channelCollections := map[string]data.BackupCollection{} + + // channel ID -> delta url or folder path lookups + // TODO(neha/keepers): figure out if deltas are stored per channel, or per group. + // deltaURLs = map[string]string{} + // currPaths = map[string]string{} + // copy of previousPaths. every channel present in the slice param + // gets removed from this map; the remaining channels at the end of + // the process have been deleted. + // tombstones = makeTombstones(dps) + + logger.Ctx(ctx).Infow("filling collections") + // , "len_deltapaths", len(dps)) + + el := errs.Local() + + for _, c := range channels { + if el.Failure() != nil { + return nil, el.Failure() + } + + cID := ptr.Val(c.GetId()) + // delete(tombstones, cID) + + var ( + err error + // dp = dps[cID] + // prevDelta = dp.Delta + // prevPathStr = dp.Path // do not log: pii; log prevPath instead + // prevPath path.Path + ictx = clues.Add( + ctx, + "channel_id", cID) + // "previous_delta", pii.SafeURL{ + // URL: prevDelta, + // SafePathElems: graph.SafeURLPathParams, + // SafeQueryKeys: graph.SafeURLQueryParams, + // }) + ) + + // currPath, locPath + // TODO(rkeepers): the handler should provide this functionality. + // Only create a collection if the path matches the scope. + if !includeContainer(ictx, qp, c, scope, qp.Category) { + continue + } + + // if len(prevPathStr) > 0 { + // if prevPath, err = pathFromPrevString(prevPathStr); err != nil { + // logger.CtxErr(ictx, err).Error("parsing prev path") + // // if the previous path is unusable, then the delta must be, too. + // prevDelta = "" + // } + // } + + // ictx = clues.Add(ictx, "previous_path", prevPath) + + // TODO: the handler should provide this implementation. + items, err := collectItems( + ctx, + bh.NewMessagePager(qp.ProtectedResource.ID(), ptr.Val(c.GetId()))) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err)) + continue + } + + // if len(newDelta.URL) > 0 { + // deltaURLs[cID] = newDelta.URL + // } else if !newDelta.Reset { + // logger.Ctx(ictx).Info("missing delta url") + // } + + var prevPath path.Path + + // TODO: retrieve from handler + currPath, err := path.Builder{}. + Append(ptr.Val(c.GetId())). + ToDataLayerPath( + qp.TenantID, + qp.ProtectedResource.ID(), + path.GroupsService, + qp.Category, + true) + if err != nil { + el.AddRecoverable(ctx, clues.Stack(err)) + continue + } + + edc := NewCollection( + qp.ProtectedResource.ID(), + currPath, + prevPath, + path.Builder{}.Append(ptr.Val(c.GetDisplayName())), + qp.Category, + statusUpdater, + ctrlOpts) + + channelCollections[cID] = &edc + + // TODO: handle deleted items for v1 backup. + // // Remove any deleted IDs from the set of added IDs because items that are + // // deleted and then restored will have a different ID than they did + // // originally. + // for _, remove := range removed { + // delete(edc.added, remove) + // edc.removed[remove] = struct{}{} + // } + + // // add the current path for the container ID to be used in the next backup + // // as the "previous path", for reference in case of a rename or relocation. + // currPaths[cID] = currPath.String() + + // FIXME: normally this goes before removal, but linters + for _, item := range items { + edc.added[ptr.Val(item.GetId())] = struct{}{} + } + } + + // TODO: handle tombstones here + + logger.Ctx(ctx).Infow( + "adding metadata collection entries", + // "num_deltas_entries", len(deltaURLs), + "num_paths_entries", len(channelCollections)) + + // col, err := graph.MakeMetadataCollection( + // qp.TenantID, + // qp.ProtectedResource.ID(), + // path.ExchangeService, + // qp.Category, + // []graph.MetadataCollectionEntry{ + // graph.NewMetadataEntry(graph.PreviousPathFileName, currPaths), + // graph.NewMetadataEntry(graph.DeltaURLsFileName, deltaURLs), + // }, + // statusUpdater) + // if err != nil { + // return nil, clues.Wrap(err, "making metadata collection") + // } + + // channelCollections["metadata"] = col + + return channelCollections, el.Failure() +} + +func collectItems( + ctx context.Context, + pager api.ChannelMessageDeltaEnumerator, +) ([]models.ChatMessageable, error) { + items := []models.ChatMessageable{} + + for { + // assume delta urls here, which allows single-token consumption + page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting page") + } + + // if graph.IsErrInvalidDelta(err) { + // logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) + + // invalidPrevDelta = true + // newPaths = map[string]string{} + + // pager.Reset() + + // continue + // } + + vals, err := pager.ValuesIn(page) + if err != nil { + return nil, graph.Wrap(ctx, err, "getting items in page") + } + + items = append(items, vals...) + + nextLink, _ := api.NextAndDeltaLink(page) + + // if len(deltaLink) > 0 { + // newDeltaURL = deltaLink + // } + + // Check if there are more items + if len(nextLink) == 0 { + break + } + + logger.Ctx(ctx).Debugw("found nextLink", "next_link", nextLink) + pager.SetNext(nextLink) + } + + return items, nil +} + +// Returns true if the container passes the scope comparison and should be included. +// Returns: +// - the path representing the directory as it should be stored in the repository. +// - the human-readable path using display names. +// - true if the path passes the scope comparison. +func includeContainer( + ctx context.Context, + qp graph.QueryParams, + gd graph.Displayable, + scope selectors.GroupsScope, + category path.CategoryType, +) bool { + // assume a single-level hierarchy + directory := ptr.Val(gd.GetDisplayName()) + + // TODO(keepers): awaiting parent branch to update to main + ok := scope.Matches(selectors.GroupsCategoryUnknown, directory) + + logger.Ctx(ctx).With( + "included", ok, + "scope", scope, + "match_target", directory, + ).Debug("backup folder selection filter") + + return ok +} diff --git a/src/internal/m365/collection/groups/collection.go b/src/internal/m365/collection/groups/collection.go new file mode 100644 index 000000000..c1e6a4042 --- /dev/null +++ b/src/internal/m365/collection/groups/collection.go @@ -0,0 +1,180 @@ +package groups + +import ( + "bytes" + "context" + "io" + "time" + + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/m365/support" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" +) + +var ( + _ data.BackupCollection = &Collection{} + _ data.Item = &Item{} + _ data.ItemInfo = &Item{} + _ data.ItemModTime = &Item{} +) + +const ( + collectionChannelBufferSize = 1000 + numberOfRetries = 4 +) + +type Collection struct { + protectedResource string + items chan data.Item + + // added is a list of existing item IDs that were added to a container + added map[string]struct{} + // removed is a list of item IDs that were deleted from, or moved out, of a container + removed map[string]struct{} + + // items itemGetterSerializer + + category path.CategoryType + statusUpdater support.StatusUpdater + ctrl control.Options + + // FullPath is the current hierarchical path used by this collection. + fullPath path.Path + + // PrevPath is the previous hierarchical path used by this collection. + // It may be the same as fullPath, if the folder was not renamed or + // moved. It will be empty on its first retrieval. + prevPath path.Path + + // LocationPath contains the path with human-readable display names. + // IE: "/Inbox/Important" instead of "/abcdxyz123/algha=lgkhal=t" + locationPath *path.Builder + + state data.CollectionState + + // doNotMergeItems should only be true if the old delta token expired. + // doNotMergeItems bool +} + +// NewExchangeDataCollection creates an ExchangeDataCollection. +// State of the collection is set as an observation of the current +// and previous paths. If the curr path is nil, the state is assumed +// to be deleted. If the prev path is nil, it is assumed newly created. +// If both are populated, then state is either moved (if they differ), +// or notMoved (if they match). +func NewCollection( + protectedResource string, + curr, prev path.Path, + location *path.Builder, + category path.CategoryType, + statusUpdater support.StatusUpdater, + ctrlOpts control.Options, + // doNotMergeItems bool, +) Collection { + collection := Collection{ + added: make(map[string]struct{}, 0), + category: category, + ctrl: ctrlOpts, + items: make(chan data.Item, collectionChannelBufferSize), + // doNotMergeItems: doNotMergeItems, + fullPath: curr, + locationPath: location, + prevPath: prev, + removed: make(map[string]struct{}, 0), + state: data.StateOf(prev, curr), + statusUpdater: statusUpdater, + protectedResource: protectedResource, + } + + return collection +} + +// Items utility function to asynchronously execute process to fill data channel with +// M365 exchange objects and returns the data channel +func (col *Collection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Item { + // go col.streamItems(ctx, errs) + return col.items +} + +// FullPath returns the Collection's fullPath []string +func (col *Collection) FullPath() path.Path { + return col.fullPath +} + +// LocationPath produces the Collection's full path, but with display names +// instead of IDs in the folders. Only populated for Calendars. +func (col *Collection) LocationPath() *path.Builder { + return col.locationPath +} + +// TODO(ashmrtn): Fill in with previous path once the Controller compares old +// and new folder hierarchies. +func (col Collection) PreviousPath() path.Path { + return col.prevPath +} + +func (col Collection) State() data.CollectionState { + return col.state +} + +func (col Collection) DoNotMergeItems() bool { + // TODO: depends on whether or not deltas are valid + return true +} + +// --------------------------------------------------------------------------- +// items +// --------------------------------------------------------------------------- + +// Item represents a single item retrieved from exchange +type Item struct { + id string + // TODO: We may need this to be a "oneOf" of `message`, `contact`, etc. + // going forward. Using []byte for now but I assume we'll have + // some structured type in here (serialization to []byte can be done in `Read`) + message []byte + info *details.ExchangeInfo // temporary change to bring populate function into directory + // TODO(ashmrtn): Can probably eventually be sourced from info as there's a + // request to provide modtime in ItemInfo structs. + modTime time.Time + + // true if the item was marked by graph as deleted. + deleted bool +} + +func (i *Item) ID() string { + return i.id +} + +func (i *Item) ToReader() io.ReadCloser { + return io.NopCloser(bytes.NewReader(i.message)) +} + +func (i Item) Deleted() bool { + return i.deleted +} + +func (i *Item) Info() details.ItemInfo { + return details.ItemInfo{Exchange: i.info} +} + +func (i *Item) ModTime() time.Time { + return i.modTime +} + +func NewItem( + identifier string, + dataBytes []byte, + detail details.ExchangeInfo, + modTime time.Time, +) Item { + return Item{ + id: identifier, + message: dataBytes, + info: &detail, + modTime: modTime, + } +} diff --git a/src/internal/m365/collection/groups/handler.go b/src/internal/m365/collection/groups/handler.go deleted file mode 100644 index d4a382149..000000000 --- a/src/internal/m365/collection/groups/handler.go +++ /dev/null @@ -1,18 +0,0 @@ -package groups - -import ( - "context" - - "github.com/microsoft/kiota-abstractions-go/serialization" - "github.com/microsoftgraph/msgraph-sdk-go/models" - - "github.com/alcionai/corso/src/pkg/services/m365/api" -) - -type BackupMessagesHandler interface { - GetMessageByID(ctx context.Context, teamID, channelID, itemID string) (models.ChatMessageable, error) - NewMessagePager(teamID, channelID string) api.MessageItemDeltaEnumerator - GetChannelByID(ctx context.Context, teamID, channelID string) (models.Channelable, error) - NewChannelPager(teamID, channelID string) api.ChannelItemDeltaEnumerator - GetReplyByID(ctx context.Context, teamID, channelID, messageID string) (serialization.Parsable, error) -} diff --git a/src/internal/m365/collection/groups/handlers.go b/src/internal/m365/collection/groups/handlers.go new file mode 100644 index 000000000..f5a28fd28 --- /dev/null +++ b/src/internal/m365/collection/groups/handlers.go @@ -0,0 +1,33 @@ +package groups + +import ( + "context" + + "github.com/microsoft/kiota-abstractions-go/serialization" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +type BackupHandler interface { + GetChannelByID( + ctx context.Context, + teamID, channelID string, + ) (models.Channelable, error) + NewChannelsPager( + teamID string, + ) api.ChannelDeltaEnumerator + + GetMessageByID( + ctx context.Context, + teamID, channelID, itemID string, + ) (models.ChatMessageable, error) + NewMessagePager( + teamID, channelID string, + ) api.ChannelMessageDeltaEnumerator + + GetMessageReplies( + ctx context.Context, + teamID, channelID, messageID string, + ) (serialization.Parsable, error) +} diff --git a/src/pkg/services/m365/api/channels_pager.go b/src/pkg/services/m365/api/channels_pager.go index 599c09649..58aecaf6c 100644 --- a/src/pkg/services/m365/api/channels_pager.go +++ b/src/pkg/services/m365/api/channels_pager.go @@ -1,19 +1,21 @@ package api import ( - "context" + "github.com/microsoftgraph/msgraph-sdk-go/models" ) // --------------------------------------------------------------------------- // item pager // --------------------------------------------------------------------------- -type MessageItemDeltaEnumerator interface { - GetPage(context.Context) (DeltaPageLinker, error) +type ChannelMessageDeltaEnumerator interface { + DeltaGetPager + ValuesInPageLinker[models.ChatMessageable] + SetNextLinker } // TODO: implement -// var _ MessageItemDeltaEnumerator = &messagePageCtrl{} +// var _ ChannelMessageDeltaEnumerator = &messagePageCtrl{} // type messagePageCtrl struct { // gs graph.Servicer @@ -25,12 +27,14 @@ type MessageItemDeltaEnumerator interface { // channel pager // --------------------------------------------------------------------------- -type ChannelItemDeltaEnumerator interface { - GetPage(context.Context) (DeltaPageLinker, error) +type ChannelDeltaEnumerator interface { + DeltaGetPager + ValuesInPageLinker[models.Channelable] + SetNextLinker } // TODO: implement -// var _ ChannelsItemDeltaEnumerator = &channelsPageCtrl{} +// var _ ChannelDeltaEnumerator = &channelsPageCtrl{} // type channelsPageCtrl struct { // gs graph.Servicer diff --git a/src/pkg/services/m365/api/item_pager.go b/src/pkg/services/m365/api/item_pager.go index ef54b1a3d..4cb272d51 100644 --- a/src/pkg/services/m365/api/item_pager.go +++ b/src/pkg/services/m365/api/item_pager.go @@ -13,9 +13,18 @@ import ( ) // --------------------------------------------------------------------------- -// common interfaces and funcs +// common interfaces // --------------------------------------------------------------------------- +// TODO(keepers): replace all matching uses of GetPage with this. +type DeltaGetPager interface { + GetPage(context.Context) (DeltaPageLinker, error) +} + +type ValuesInPageLinker[T any] interface { + ValuesIn(PageLinker) ([]T, error) +} + type PageLinker interface { GetOdataNextLink() *string } @@ -25,6 +34,14 @@ type DeltaPageLinker interface { GetOdataDeltaLink() *string } +type SetNextLinker interface { + SetNext(nextLink string) +} + +// --------------------------------------------------------------------------- +// common funcs +// --------------------------------------------------------------------------- + // IsNextLinkValid separate check to investigate whether error is func IsNextLinkValid(next string) bool { return !strings.Contains(next, `users//`) From 74b92adbc35c7c0ffbc0fecad8b75f137da09491 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Aug 2023 06:01:16 +0000 Subject: [PATCH 23/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.328=20to=201.44.329=20in=20/src=20(#?= =?UTF-8?q?4094)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.328 to 1.44.329.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.329 (2023-08-22)

Service Client Updates

  • service/ce: Updates service API and documentation
  • service/globalaccelerator: Updates service documentation
  • service/rds: Updates service API, documentation, waiters, paginators, and examples
    • Adding parameters to CreateCustomDbEngineVersion reserved for future use.
  • service/verifiedpermissions: Updates service API and documentation
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.328&new-version=1.44.329)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 8438cb70f..eca537743 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.328 + github.com/aws/aws-sdk-go v1.44.329 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.1 diff --git a/src/go.sum b/src/go.sum index b88a52f28..9bd8f2480 100644 --- a/src/go.sum +++ b/src/go.sum @@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.44.328 h1:WBwlf8ym9SDQ/GTIBO9eXyvwappKJyOetWJKl4mT7ZU= -github.com/aws/aws-sdk-go v1.44.328/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.329 h1:Rqy+wYI8h+iq+FphR59KKTsHR1Lz7YiwRqFzWa7xoYU= +github.com/aws/aws-sdk-go v1.44.329/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= From 7b6c6026ad56f5bba6729199c2e54fcde37261c9 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Wed, 23 Aug 2023 13:34:32 +0530 Subject: [PATCH 24/32] Group CLI (#4043) CLI changes for groups. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [x] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * https://github.com/alcionai/corso/issues/3990 * #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/cli/backup/backup.go | 1 + src/cli/backup/groups.go | 147 +++++++++++++++- src/cli/utils/groups.go | 57 +++++++ src/cli/utils/groups_test.go | 161 ++++++++++++++++++ src/internal/m365/backup.go | 24 ++- .../m365/collection/drive/group_handler.go | 23 +-- src/pkg/backup/details/groups.go | 56 +++++- src/pkg/backup/details/iteminfo.go | 2 +- src/pkg/selectors/groups.go | 1 - src/pkg/services/m365/groups.go | 23 ++- src/pkg/services/m365/groups_test.go | 25 +++ 11 files changed, 486 insertions(+), 34 deletions(-) create mode 100644 src/cli/utils/groups_test.go diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index 56b5c5ef4..c21f5cbb3 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -39,6 +39,7 @@ var serviceCommands = []func(cmd *cobra.Command) *cobra.Command{ addExchangeCommands, addOneDriveCommands, addSharePointCommands, + addGroupsCommands, addTeamsCommands, } diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go index 1dc490ae7..f4cc101f0 100644 --- a/src/cli/backup/groups.go +++ b/src/cli/backup/groups.go @@ -1,14 +1,27 @@ package backup import ( + "context" + "errors" + "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/spf13/pflag" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/cli/flags" . "github.com/alcionai/corso/src/cli/print" + "github.com/alcionai/corso/src/cli/repo" "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/filters" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/repository" + "github.com/alcionai/corso/src/pkg/selectors" + "github.com/alcionai/corso/src/pkg/services/m365" ) // ------------------------------------------------------------------------------------------------ @@ -134,7 +147,38 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error { return nil } - return Only(ctx, utils.ErrNotYetImplemented) + if err := validateGroupsBackupCreateFlags(flags.GroupFV, flags.CategoryDataFV); err != nil { + return err + } + + r, acct, err := utils.AccountConnectAndWriteRepoConfig(ctx, path.GroupsService, repo.S3Overrides(cmd)) + if err != nil { + return Only(ctx, err) + } + + defer utils.CloseRepo(ctx, r) + + // TODO: log/print recoverable errors + errs := fault.New(false) + + ins, err := m365.GroupsMap(ctx, *acct, errs) + if err != nil { + return Only(ctx, clues.Wrap(err, "Failed to retrieve M365 groups")) + } + + sel := groupsBackupCreateSelectors(ctx, ins, flags.GroupFV, flags.CategoryDataFV) + selectorSet := []selectors.Selector{} + + for _, discSel := range sel.SplitByResourceOwner(ins.IDs()) { + selectorSet = append(selectorSet, discSel.Selector) + } + + return runBackups( + ctx, + r, + "Group", "group", + selectorSet, + ins) } // ------------------------------------------------------------------------------------------------ @@ -172,17 +216,71 @@ func groupsDetailsCmd() *cobra.Command { // processes a groups service backup. func detailsGroupsCmd(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() - if utils.HasNoFlagsAndShownHelp(cmd) { return nil } - if err := validateGroupBackupCreateFlags(flags.GroupFV); err != nil { + ctx := cmd.Context() + opts := utils.MakeGroupsOpts(cmd) + + r, _, _, ctrlOpts, err := utils.GetAccountAndConnect(ctx, path.GroupsService, repo.S3Overrides(cmd)) + if err != nil { return Only(ctx, err) } - return Only(ctx, utils.ErrNotYetImplemented) + defer utils.CloseRepo(ctx, r) + + ds, err := runDetailsGroupsCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) + if err != nil { + return Only(ctx, err) + } + + if len(ds.Entries) == 0 { + Info(ctx, selectors.ErrorNoMatchingItems) + return nil + } + + ds.PrintEntries(ctx) + + return nil +} + +// runDetailsGroupsCmd actually performs the lookup in backup details. +// the fault.Errors return is always non-nil. Callers should check if +// errs.Failure() == nil. +func runDetailsGroupsCmd( + ctx context.Context, + r repository.BackupGetter, + backupID string, + opts utils.GroupsOpts, + skipReduce bool, +) (*details.Details, error) { + if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil { + return nil, err + } + + ctx = clues.Add(ctx, "backup_id", backupID) + + d, _, errs := r.GetBackupDetails(ctx, backupID) + // TODO: log/track recoverable errors + if errs.Failure() != nil { + if errors.Is(errs.Failure(), data.ErrNotFound) { + return nil, clues.New("no backup exists with the id " + backupID) + } + + return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository") + } + + ctx = clues.Add(ctx, "details_entries", len(d.Entries)) + + if !skipReduce { + sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts) + sel.Configure(selectors.Config{OnlyMatchItemNames: true}) + utils.FilterGroupsRestoreInfoSelectors(sel, opts) + d = sel.Reduce(ctx, d, errs) + } + + return d, nil } // ------------------------------------------------------------------------------------------------ @@ -208,7 +306,7 @@ func deleteGroupsCmd(cmd *cobra.Command, args []string) error { // helpers // --------------------------------------------------------------------------- -func validateGroupBackupCreateFlags(groups []string) error { +func validateGroupsBackupCreateFlags(groups, cats []string) error { if len(groups) == 0 { return clues.New( "requires one or more --" + @@ -228,3 +326,40 @@ func validateGroupBackupCreateFlags(groups []string) error { return nil } + +// TODO: users might specify a data type, this only supports AllData(). +func groupsBackupCreateSelectors( + ctx context.Context, + ins idname.Cacher, + group, cats []string, +) *selectors.GroupsBackup { + if filters.PathContains(group).Compare(flags.Wildcard) { + return includeAllGroupWithCategories(ins, cats) + } + + sel := selectors.NewGroupsBackup(slices.Clone(group)) + + return addGroupsCategories(sel, cats) +} + +func includeAllGroupWithCategories(ins idname.Cacher, categories []string) *selectors.GroupsBackup { + return addGroupsCategories(selectors.NewGroupsBackup(ins.IDs()), categories) +} + +func addGroupsCategories(sel *selectors.GroupsBackup, cats []string) *selectors.GroupsBackup { + if len(cats) == 0 { + sel.Include(sel.AllData()) + } + + // TODO(meain): handle filtering + // for _, d := range cats { + // switch d { + // case dataLibraries: + // sel.Include(sel.LibraryFolders(selectors.Any())) + // case dataPages: + // sel.Include(sel.Pages(selectors.Any())) + // } + // } + + return sel +} diff --git a/src/cli/utils/groups.go b/src/cli/utils/groups.go index 9b0827d46..cabc9f3c6 100644 --- a/src/cli/utils/groups.go +++ b/src/cli/utils/groups.go @@ -1,9 +1,13 @@ package utils import ( + "context" + + "github.com/alcionai/clues" "github.com/spf13/cobra" "github.com/alcionai/corso/src/cli/flags" + "github.com/alcionai/corso/src/pkg/selectors" ) type GroupsOpts struct { @@ -28,3 +32,56 @@ func MakeGroupsOpts(cmd *cobra.Command) GroupsOpts { Populated: flags.GetPopulatedFlags(cmd), } } + +// ValidateGroupsRestoreFlags checks common flags for correctness and interdependencies +func ValidateGroupsRestoreFlags(backupID string, opts GroupsOpts) error { + if len(backupID) == 0 { + return clues.New("a backup ID is required") + } + + // TODO(meain): selectors (refer sharepoint) + + return validateRestoreConfigFlags(flags.CollisionsFV, opts.RestoreCfg) +} + +// AddGroupInfo adds the scope of the provided values to the selector's +// filter set +func AddGroupInfo( + sel *selectors.GroupsRestore, + v string, + f func(string) []selectors.GroupsScope, +) { + if len(v) == 0 { + return + } + + sel.Filter(f(v)) +} + +// IncludeGroupsRestoreDataSelectors builds the common data-selector +// inclusions for Group commands. +func IncludeGroupsRestoreDataSelectors(ctx context.Context, opts GroupsOpts) *selectors.GroupsRestore { + groups := opts.Groups + + ls := len(opts.Groups) + + if ls == 0 { + groups = selectors.Any() + } + + sel := selectors.NewGroupsRestore(groups) + + // TODO(meain): add selectors + sel.Include(sel.AllData()) + + return sel +} + +// FilterGroupsRestoreInfoSelectors builds the common info-selector filters. +func FilterGroupsRestoreInfoSelectors( + sel *selectors.GroupsRestore, + opts GroupsOpts, +) { + // TODO(meain) + // AddGroupInfo(sel, opts.GroupID, sel.Library) +} diff --git a/src/cli/utils/groups_test.go b/src/cli/utils/groups_test.go new file mode 100644 index 000000000..e2a48faf0 --- /dev/null +++ b/src/cli/utils/groups_test.go @@ -0,0 +1,161 @@ +package utils_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/cli/utils" + "github.com/alcionai/corso/src/internal/tester" +) + +type GroupsUtilsSuite struct { + tester.Suite +} + +func TestGroupsUtilsSuite(t *testing.T) { + suite.Run(t, &GroupsUtilsSuite{Suite: tester.NewUnitSuite(t)}) +} + +// Tests selector build for Groups properly +// differentiates between the 3 categories: Pages, Libraries and Lists CLI +func (suite *GroupsUtilsSuite) TestIncludeGroupsRestoreDataSelectors() { + var ( + empty = []string{} + single = []string{"single"} + multi = []string{"more", "than", "one"} + ) + + table := []struct { + name string + opts utils.GroupsOpts + expectIncludeLen int + }{ + { + name: "no inputs", + opts: utils.GroupsOpts{}, + expectIncludeLen: 2, + }, + { + name: "empty", + opts: utils.GroupsOpts{ + Groups: empty, + }, + expectIncludeLen: 2, + }, + { + name: "single inputs", + opts: utils.GroupsOpts{ + Groups: single, + }, + expectIncludeLen: 2, + }, + { + name: "multi inputs", + opts: utils.GroupsOpts{ + Groups: multi, + }, + expectIncludeLen: 2, + }, + // TODO Add library specific tests once we have filters based + // on library folders + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + sel := utils.IncludeGroupsRestoreDataSelectors(ctx, test.opts) + assert.Len(suite.T(), sel.Includes, test.expectIncludeLen) + }) + } +} + +func (suite *GroupsUtilsSuite) TestValidateGroupsRestoreFlags() { + table := []struct { + name string + backupID string + opts utils.GroupsOpts + expect assert.ErrorAssertionFunc + }{ + { + name: "no opts", + backupID: "id", + opts: utils.GroupsOpts{}, + expect: assert.NoError, + }, + { + name: "no backupID", + backupID: "", + opts: utils.GroupsOpts{}, + expect: assert.Error, + }, + // TODO: Add tests for selectors once we have them + // { + // name: "all valid", + // backupID: "id", + // opts: utils.GroupsOpts{ + // Populated: flags.PopulatedFlags{ + // flags.FileCreatedAfterFN: struct{}{}, + // flags.FileCreatedBeforeFN: struct{}{}, + // flags.FileModifiedAfterFN: struct{}{}, + // flags.FileModifiedBeforeFN: struct{}{}, + // }, + // }, + // expect: assert.NoError, + // }, + // { + // name: "invalid file created after", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileCreatedAfter: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileCreatedAfterFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + // { + // name: "invalid file created before", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileCreatedBefore: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileCreatedBeforeFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + // { + // name: "invalid file modified after", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileModifiedAfter: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileModifiedAfterFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + // { + // name: "invalid file modified before", + // backupID: "id", + // opts: utils.GroupsOpts{ + // FileModifiedBefore: "1235", + // Populated: flags.PopulatedFlags{ + // flags.FileModifiedBeforeFN: struct{}{}, + // }, + // }, + // expect: assert.Error, + // }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + test.expect(t, utils.ValidateGroupsRestoreFlags(test.backupID, test.opts)) + }) + } +} diff --git a/src/internal/m365/backup.go b/src/internal/m365/backup.go index 805dcebd1..55c4c7fdb 100644 --- a/src/internal/m365/backup.go +++ b/src/internal/m365/backup.go @@ -156,6 +156,17 @@ func (ctrl *Controller) IsBackupRunnable( service path.ServiceType, resourceOwner string, ) (bool, error) { + if service == path.GroupsService { + _, err := ctrl.AC.Groups().GetByID(ctx, resourceOwner) + if err != nil { + // TODO(meain): check for error message in case groups are + // not enabled at all similar to sharepoint + return false, err + } + + return true, nil + } + if service == path.SharePointService { _, err := ctrl.AC.Sites().GetRoot(ctx) if err != nil { @@ -181,7 +192,7 @@ func (ctrl *Controller) IsBackupRunnable( return true, nil } -func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error { +func verifyBackupInputs(sels selectors.Selector, cachedIDs []string) error { var ids []string switch sels.Service { @@ -189,16 +200,13 @@ func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error { // Exchange and OneDrive user existence now checked in checkServiceEnabled. return nil - case selectors.ServiceGroups: - // TODO(meain): check for group existence. - return nil - - case selectors.ServiceSharePoint: - ids = siteIDs + case selectors.ServiceSharePoint, selectors.ServiceGroups: + ids = cachedIDs } if !filters.Contains(ids).Compare(sels.ID()) { - return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_protected_resource", sels.DiscreteOwner) + return clues.Stack(graph.ErrResourceOwnerNotFound). + With("selector_protected_resource", sels.DiscreteOwner) } return nil diff --git a/src/internal/m365/collection/drive/group_handler.go b/src/internal/m365/collection/drive/group_handler.go index 81bbf36af..136d61b2d 100644 --- a/src/internal/m365/collection/drive/group_handler.go +++ b/src/internal/m365/collection/drive/group_handler.go @@ -100,6 +100,7 @@ func (h groupBackupHandler) NewLocationIDer( driveID string, elems ...string, ) details.LocationIDer { + // TODO(meain): path fixes return details.NewSharePointLocationIDer(driveID, elems...) } @@ -124,7 +125,6 @@ func (h groupBackupHandler) IsAllPass() bool { func (h groupBackupHandler) IncludesDir(dir string) bool { // TODO(meain) - // return h.scope.Matches(selectors.SharePointGroupFolder, dir) return true } @@ -138,7 +138,7 @@ func augmentGroupItemInfo( size int64, parentPath *path.Builder, ) details.ItemInfo { - var driveName, driveID, creatorEmail string + var driveName, driveID, creatorEmail, siteID, weburl string // TODO: we rely on this info for details/restore lookups, // so if it's nil we have an issue, and will need an alternative @@ -159,15 +159,15 @@ func augmentGroupItemInfo( } } - // gsi := item.GetSharepointIds() - // if gsi != nil { - // siteID = ptr.Val(gsi.GetSiteId()) - // weburl = ptr.Val(gsi.GetSiteUrl()) + gsi := item.GetSharepointIds() + if gsi != nil { + siteID = ptr.Val(gsi.GetSiteId()) + weburl = ptr.Val(gsi.GetSiteUrl()) - // if len(weburl) == 0 { - // weburl = constructWebURL(item.GetAdditionalData()) - // } - // } + if len(weburl) == 0 { + weburl = constructWebURL(item.GetAdditionalData()) + } + } if item.GetParentReference() != nil { driveID = ptr.Val(item.GetParentReference().GetDriveId()) @@ -179,6 +179,7 @@ func augmentGroupItemInfo( pps = parentPath.String() } + // TODO: Add channel name and ID dii.Groups = &details.GroupsInfo{ Created: ptr.Val(item.GetCreatedDateTime()), DriveID: driveID, @@ -189,6 +190,8 @@ func augmentGroupItemInfo( Owner: creatorEmail, ParentPath: pps, Size: size, + SiteID: siteID, + WebURL: weburl, } dii.Extension = &details.ExtensionData{} diff --git a/src/pkg/backup/details/groups.go b/src/pkg/backup/details/groups.go index 398d8f529..9065d6bbb 100644 --- a/src/pkg/backup/details/groups.go +++ b/src/pkg/backup/details/groups.go @@ -11,24 +11,48 @@ import ( // NewGroupsLocationIDer builds a LocationIDer for the groups. func NewGroupsLocationIDer( + category path.CategoryType, driveID string, escapedFolders ...string, -) uniqueLoc { - // TODO: implement - return uniqueLoc{} +) (uniqueLoc, error) { + // TODO(meain): path fixes + if err := path.ValidateServiceAndCategory(path.GroupsService, category); err != nil { + return uniqueLoc{}, clues.Wrap(err, "making groups LocationIDer") + } + + pb := path.Builder{}.Append(category.String()) + prefixElems := 1 + + if driveID != "" { // non sp paths don't have driveID + pb.Append(driveID) + + prefixElems = 2 + } + + pb.Append(escapedFolders...) + + return uniqueLoc{pb, prefixElems}, nil } // GroupsInfo describes a groups item type GroupsInfo struct { Created time.Time `json:"created,omitempty"` - DriveName string `json:"driveName,omitempty"` - DriveID string `json:"driveID,omitempty"` ItemName string `json:"itemName,omitempty"` ItemType ItemType `json:"itemType,omitempty"` Modified time.Time `json:"modified,omitempty"` Owner string `json:"owner,omitempty"` ParentPath string `json:"parentPath,omitempty"` Size int64 `json:"size,omitempty"` + + // Channels Specific + ChannelName string `json:"channelName,omitempty"` + ChannelID string `json:"channelID,omitempty"` + + // SharePoint specific + DriveName string `json:"driveName,omitempty"` + DriveID string `json:"driveID,omitempty"` + SiteID string `json:"siteID,omitempty"` + WebURL string `json:"webURL,omitempty"` } // Headers returns the human-readable names of properties in a SharePointInfo @@ -51,9 +75,27 @@ func (i *GroupsInfo) UpdateParentPath(newLocPath *path.Builder) { } func (i *GroupsInfo) uniqueLocation(baseLoc *path.Builder) (*uniqueLoc, error) { - return nil, clues.New("not yet implemented") + var category path.CategoryType + + switch i.ItemType { + case SharePointLibrary: + category = path.LibrariesCategory + + if len(i.DriveID) == 0 { + return nil, clues.New("empty drive ID") + } + } + + loc, err := NewGroupsLocationIDer(category, i.DriveID, baseLoc.Elements()...) + + return &loc, err } func (i *GroupsInfo) updateFolder(f *FolderInfo) error { - return clues.New("not yet implemented") + // TODO(meain): path updates if any + if i.ItemType == SharePointLibrary { + return updateFolderWithinDrive(SharePointLibrary, i.DriveName, i.DriveID, f) + } + + return clues.New("unsupported ItemType for GroupsInfo").With("item_type", i.ItemType) } diff --git a/src/pkg/backup/details/iteminfo.go b/src/pkg/backup/details/iteminfo.go index fbd6a92cd..a8ba23100 100644 --- a/src/pkg/backup/details/iteminfo.go +++ b/src/pkg/backup/details/iteminfo.go @@ -28,7 +28,7 @@ const ( ExchangeMail ItemType = 3 // SharePoint (10x) - SharePointLibrary ItemType = 101 + SharePointLibrary ItemType = 101 // also used for groups SharePointList ItemType = 102 SharePointPage ItemType = 103 diff --git a/src/pkg/selectors/groups.go b/src/pkg/selectors/groups.go index 50aa3db74..6f1bd1d74 100644 --- a/src/pkg/selectors/groups.go +++ b/src/pkg/selectors/groups.go @@ -425,7 +425,6 @@ func (c groupsCategory) pathValues( folderCat, itemCat = GroupsLibraryFolder, GroupsLibraryItem rFld = ent.Groups.ParentPath - default: return nil, clues.New("unrecognized groupsCategory").With("category", c) } diff --git a/src/pkg/services/m365/groups.go b/src/pkg/services/m365/groups.go index f4924be22..a32195c1c 100644 --- a/src/pkg/services/m365/groups.go +++ b/src/pkg/services/m365/groups.go @@ -6,6 +6,7 @@ import ( "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/alcionai/corso/src/internal/common/idname" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/fault" @@ -80,7 +81,7 @@ func getAllGroups( // helpers // --------------------------------------------------------------------------- -// parseUser extracts information from `models.Groupable` we care about +// parseGroup extracts information from `models.Groupable` we care about func parseGroup(ctx context.Context, mg models.Groupable) (*Group, error) { if mg.GetDisplayName() == nil { return nil, clues.New("group missing display name"). @@ -95,3 +96,23 @@ func parseGroup(ctx context.Context, mg models.Groupable) (*Group, error) { return u, nil } + +// GroupsMap retrieves an id-name cache of all groups in the tenant. +func GroupsMap( + ctx context.Context, + acct account.Account, + errs *fault.Bus, +) (idname.Cacher, error) { + groups, err := Groups(ctx, acct, errs) + if err != nil { + return idname.NewCache(nil), err + } + + itn := make(map[string]string, len(groups)) + + for _, s := range groups { + itn[s.ID] = s.DisplayName + } + + return idname.NewCache(itn), nil +} diff --git a/src/pkg/services/m365/groups_test.go b/src/pkg/services/m365/groups_test.go index 8fa650a98..9219209f0 100644 --- a/src/pkg/services/m365/groups_test.go +++ b/src/pkg/services/m365/groups_test.go @@ -68,6 +68,31 @@ func (suite *GroupsIntgSuite) TestGroups() { } } +func (suite *GroupsIntgSuite) TestGroupsMap() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) + + gm, err := m365.GroupsMap(ctx, suite.acct, fault.New(true)) + assert.NoError(t, err, clues.ToCore(err)) + assert.NotEmpty(t, gm) + + for _, gid := range gm.IDs() { + suite.Run("group_"+gid, func() { + t := suite.T() + + assert.NotEmpty(t, gid) + + name, ok := gm.NameOf(gid) + assert.True(t, ok) + assert.NotEmpty(t, name) + }) + } +} + func (suite *GroupsIntgSuite) TestGroups_InvalidCredentials() { table := []struct { name string From 9664846d22cc335779888f7a886392c2dd377159 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Wed, 23 Aug 2023 15:05:37 +0530 Subject: [PATCH 25/32] Simplify group_handler by leveraging library_handler (#4084) https://github.com/alcionai/corso/pull/4030#discussion_r1296246853 --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [x] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * https://github.com/alcionai/corso/issues/3990 #### Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- .../m365/collection/drive/group_handler.go | 182 ++---------------- .../m365/collection/drive/handler_utils.go | 112 +++++++++++ .../m365/collection/drive/item_handler.go | 54 +----- .../m365/collection/drive/library_handler.go | 99 ++-------- .../collection/drive/library_handler_test.go | 2 +- src/internal/m365/collection/site/restore.go | 2 +- .../m365/service/sharepoint/backup.go | 2 +- .../m365/service/sharepoint/backup_test.go | 5 +- .../m365/service/sharepoint/restore.go | 2 +- .../operations/test/sharepoint_test.go | 2 +- 10 files changed, 155 insertions(+), 307 deletions(-) create mode 100644 src/internal/m365/collection/drive/handler_utils.go diff --git a/src/internal/m365/collection/drive/group_handler.go b/src/internal/m365/collection/drive/group_handler.go index 136d61b2d..d6faad8fb 100644 --- a/src/internal/m365/collection/drive/group_handler.go +++ b/src/internal/m365/collection/drive/group_handler.go @@ -1,15 +1,6 @@ package drive import ( - "context" - "net/http" - "strings" - - "github.com/microsoftgraph/msgraph-sdk-go/models" - - "github.com/alcionai/corso/src/internal/common/ptr" - odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" - "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" @@ -18,183 +9,40 @@ import ( var _ BackupHandler = &groupBackupHandler{} type groupBackupHandler struct { + libraryBackupHandler groupID string - ac api.Drives scope selectors.GroupsScope } func NewGroupBackupHandler(groupID string, ac api.Drives, scope selectors.GroupsScope) groupBackupHandler { - return groupBackupHandler{groupID, ac, scope} -} - -func (h groupBackupHandler) Get( - ctx context.Context, - url string, - headers map[string]string, -) (*http.Response, error) { - return h.ac.Get(ctx, url, headers) -} - -func (h groupBackupHandler) PathPrefix( - tenantID, resourceOwner, driveID string, -) (path.Path, error) { - return path.Build( - tenantID, - resourceOwner, - path.GroupsService, - path.LibrariesCategory, // TODO(meain) - false, - odConsts.DrivesPathDir, - driveID, - odConsts.RootPathDir) + return groupBackupHandler{ + libraryBackupHandler{ + ac: ac, + // Not adding scope here. Anything that needs scope has to + // be from group handler + service: path.GroupsService, + }, + groupID, + scope, + } } func (h groupBackupHandler) CanonicalPath( folders *path.Builder, tenantID, resourceOwner string, ) (path.Path, error) { - // TODO(meain): path fixes: sharepoint site ids should be in the path - return folders.ToDataLayerPath( - tenantID, - h.groupID, - path.GroupsService, - path.LibrariesCategory, - false) + // TODO(meain): path fixes + return folders.ToDataLayerPath(tenantID, h.groupID, h.service, path.LibrariesCategory, false) } func (h groupBackupHandler) ServiceCat() (path.ServiceType, path.CategoryType) { return path.GroupsService, path.LibrariesCategory } -func (h groupBackupHandler) NewDrivePager( - resourceOwner string, - fields []string, -) api.DrivePager { - return h.ac.NewSiteDrivePager(resourceOwner, fields) -} - -func (h groupBackupHandler) NewItemPager( - driveID, link string, - fields []string, -) api.DriveItemDeltaEnumerator { - return h.ac.NewDriveItemDeltaPager(driveID, link, fields) -} - -func (h groupBackupHandler) AugmentItemInfo( - dii details.ItemInfo, - item models.DriveItemable, - size int64, - parentPath *path.Builder, -) details.ItemInfo { - return augmentGroupItemInfo(dii, item, size, parentPath) -} - -func (h groupBackupHandler) FormatDisplayPath( - driveName string, - pb *path.Builder, -) string { - return "/" + driveName + "/" + pb.String() -} - -func (h groupBackupHandler) NewLocationIDer( - driveID string, - elems ...string, -) details.LocationIDer { - // TODO(meain): path fixes - return details.NewSharePointLocationIDer(driveID, elems...) -} - -func (h groupBackupHandler) GetItemPermission( - ctx context.Context, - driveID, itemID string, -) (models.PermissionCollectionResponseable, error) { - return h.ac.GetItemPermission(ctx, driveID, itemID) -} - -func (h groupBackupHandler) GetItem( - ctx context.Context, - driveID, itemID string, -) (models.DriveItemable, error) { - return h.ac.GetItem(ctx, driveID, itemID) -} - func (h groupBackupHandler) IsAllPass() bool { - // TODO(meain) - return true + return h.scope.IsAny(selectors.GroupsLibraryFolder) } func (h groupBackupHandler) IncludesDir(dir string) bool { - // TODO(meain) - return true -} - -// --------------------------------------------------------------------------- -// Common -// --------------------------------------------------------------------------- - -func augmentGroupItemInfo( - dii details.ItemInfo, - item models.DriveItemable, - size int64, - parentPath *path.Builder, -) details.ItemInfo { - var driveName, driveID, creatorEmail, siteID, weburl string - - // TODO: we rely on this info for details/restore lookups, - // so if it's nil we have an issue, and will need an alternative - // way to source the data. - - if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil { - // User is sometimes not available when created via some - // external applications (like backup/restore solutions) - additionalData := item.GetCreatedBy().GetUser().GetAdditionalData() - - ed, ok := additionalData["email"] - if !ok { - ed = additionalData["displayName"] - } - - if ed != nil { - creatorEmail = *ed.(*string) - } - } - - gsi := item.GetSharepointIds() - if gsi != nil { - siteID = ptr.Val(gsi.GetSiteId()) - weburl = ptr.Val(gsi.GetSiteUrl()) - - if len(weburl) == 0 { - weburl = constructWebURL(item.GetAdditionalData()) - } - } - - if item.GetParentReference() != nil { - driveID = ptr.Val(item.GetParentReference().GetDriveId()) - driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName())) - } - - var pps string - if parentPath != nil { - pps = parentPath.String() - } - - // TODO: Add channel name and ID - dii.Groups = &details.GroupsInfo{ - Created: ptr.Val(item.GetCreatedDateTime()), - DriveID: driveID, - DriveName: driveName, - ItemName: ptr.Val(item.GetName()), - ItemType: details.SharePointLibrary, - Modified: ptr.Val(item.GetLastModifiedDateTime()), - Owner: creatorEmail, - ParentPath: pps, - Size: size, - SiteID: siteID, - WebURL: weburl, - } - - dii.Extension = &details.ExtensionData{} - - return dii + return h.scope.Matches(selectors.GroupsLibraryFolder, dir) } diff --git a/src/internal/m365/collection/drive/handler_utils.go b/src/internal/m365/collection/drive/handler_utils.go new file mode 100644 index 000000000..6dc4be66e --- /dev/null +++ b/src/internal/m365/collection/drive/handler_utils.go @@ -0,0 +1,112 @@ +package drive + +import ( + "strings" + + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/path" +) + +func augmentItemInfo( + dii details.ItemInfo, + service path.ServiceType, + item models.DriveItemable, + size int64, + parentPath *path.Builder, +) details.ItemInfo { + var driveName, siteID, driveID, weburl, creatorEmail string + + // TODO: we rely on this info for details/restore lookups, + // so if it's nil we have an issue, and will need an alternative + // way to source the data. + + if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil { + // User is sometimes not available when created via some + // external applications (like backup/restore solutions) + additionalData := item.GetCreatedBy().GetUser().GetAdditionalData() + + ed, ok := additionalData["email"] + if !ok { + ed = additionalData["displayName"] + } + + if ed != nil { + creatorEmail = *ed.(*string) + } + } + + if service == path.SharePointService || + service == path.GroupsService { + gsi := item.GetSharepointIds() + if gsi != nil { + siteID = ptr.Val(gsi.GetSiteId()) + weburl = ptr.Val(gsi.GetSiteUrl()) + + if len(weburl) == 0 { + weburl = constructWebURL(item.GetAdditionalData()) + } + } + } + + if item.GetParentReference() != nil { + driveID = ptr.Val(item.GetParentReference().GetDriveId()) + driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName())) + } + + var pps string + if parentPath != nil { + pps = parentPath.String() + } + + switch service { + case path.OneDriveService: + dii.OneDrive = &details.OneDriveInfo{ + Created: ptr.Val(item.GetCreatedDateTime()), + DriveID: driveID, + DriveName: driveName, + ItemName: ptr.Val(item.GetName()), + ItemType: details.OneDriveItem, + Modified: ptr.Val(item.GetLastModifiedDateTime()), + Owner: creatorEmail, + ParentPath: pps, + Size: size, + } + case path.SharePointService: + dii.SharePoint = &details.SharePointInfo{ + Created: ptr.Val(item.GetCreatedDateTime()), + DriveID: driveID, + DriveName: driveName, + ItemName: ptr.Val(item.GetName()), + ItemType: details.SharePointLibrary, + Modified: ptr.Val(item.GetLastModifiedDateTime()), + Owner: creatorEmail, + ParentPath: pps, + SiteID: siteID, + Size: size, + WebURL: weburl, + } + + case path.GroupsService: + // TODO: Add channel name and ID + dii.Groups = &details.GroupsInfo{ + Created: ptr.Val(item.GetCreatedDateTime()), + DriveID: driveID, + DriveName: driveName, + ItemName: ptr.Val(item.GetName()), + ItemType: details.SharePointLibrary, + Modified: ptr.Val(item.GetLastModifiedDateTime()), + Owner: creatorEmail, + ParentPath: pps, + SiteID: siteID, + Size: size, + WebURL: weburl, + } + } + + dii.Extension = &details.ExtensionData{} + + return dii +} diff --git a/src/internal/m365/collection/drive/item_handler.go b/src/internal/m365/collection/drive/item_handler.go index 929649aae..58fccd7a8 100644 --- a/src/internal/m365/collection/drive/item_handler.go +++ b/src/internal/m365/collection/drive/item_handler.go @@ -3,13 +3,11 @@ package drive import ( "context" "net/http" - "strings" "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/alcionai/corso/src/internal/common/ptr" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" @@ -85,7 +83,7 @@ func (h itemBackupHandler) AugmentItemInfo( size int64, parentPath *path.Builder, ) details.ItemInfo { - return augmentItemInfo(dii, item, size, parentPath) + return augmentItemInfo(dii, path.OneDriveService, item, size, parentPath) } func (h itemBackupHandler) FormatDisplayPath( @@ -162,7 +160,7 @@ func (h itemRestoreHandler) AugmentItemInfo( size int64, parentPath *path.Builder, ) details.ItemInfo { - return augmentItemInfo(dii, item, size, parentPath) + return augmentItemInfo(dii, path.OneDriveService, item, size, parentPath) } func (h itemRestoreHandler) DeleteItem( @@ -236,51 +234,3 @@ func (h itemRestoreHandler) GetRootFolder( ) (models.DriveItemable, error) { return h.ac.GetRootFolder(ctx, driveID) } - -// --------------------------------------------------------------------------- -// Common -// --------------------------------------------------------------------------- - -func augmentItemInfo( - dii details.ItemInfo, - item models.DriveItemable, - size int64, - parentPath *path.Builder, -) details.ItemInfo { - var email, driveName, driveID string - - if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil { - // User is sometimes not available when created via some - // external applications (like backup/restore solutions) - ed, ok := item.GetCreatedBy().GetUser().GetAdditionalData()["email"] - if ok { - email = *ed.(*string) - } - } - - if item.GetParentReference() != nil { - driveID = ptr.Val(item.GetParentReference().GetDriveId()) - driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName())) - } - - var pps string - if parentPath != nil { - pps = parentPath.String() - } - - dii.OneDrive = &details.OneDriveInfo{ - Created: ptr.Val(item.GetCreatedDateTime()), - DriveID: driveID, - DriveName: driveName, - ItemName: ptr.Val(item.GetName()), - ItemType: details.OneDriveItem, - Modified: ptr.Val(item.GetLastModifiedDateTime()), - Owner: email, - ParentPath: pps, - Size: size, - } - - dii.Extension = &details.ExtensionData{} - - return dii -} diff --git a/src/internal/m365/collection/drive/library_handler.go b/src/internal/m365/collection/drive/library_handler.go index 4649e458c..e06a279db 100644 --- a/src/internal/m365/collection/drive/library_handler.go +++ b/src/internal/m365/collection/drive/library_handler.go @@ -20,12 +20,17 @@ import ( var _ BackupHandler = &libraryBackupHandler{} type libraryBackupHandler struct { - ac api.Drives - scope selectors.SharePointScope + ac api.Drives + scope selectors.SharePointScope + service path.ServiceType } -func NewLibraryBackupHandler(ac api.Drives, scope selectors.SharePointScope) libraryBackupHandler { - return libraryBackupHandler{ac, scope} +func NewLibraryBackupHandler( + ac api.Drives, + scope selectors.SharePointScope, + service path.ServiceType, +) libraryBackupHandler { + return libraryBackupHandler{ac, scope, service} } func (h libraryBackupHandler) Get( @@ -42,7 +47,7 @@ func (h libraryBackupHandler) PathPrefix( return path.Build( tenantID, resourceOwner, - path.SharePointService, + h.service, path.LibrariesCategory, false, odConsts.DrivesPathDir, @@ -54,7 +59,7 @@ func (h libraryBackupHandler) CanonicalPath( folders *path.Builder, tenantID, resourceOwner string, ) (path.Path, error) { - return folders.ToDataLayerSharePointPath(tenantID, resourceOwner, path.LibrariesCategory, false) + return folders.ToDataLayerPath(tenantID, resourceOwner, h.service, path.LibrariesCategory, false) } func (h libraryBackupHandler) ServiceCat() (path.ServiceType, path.CategoryType) { @@ -81,7 +86,7 @@ func (h libraryBackupHandler) AugmentItemInfo( size int64, parentPath *path.Builder, ) details.ItemInfo { - return augmentLibraryItemInfo(dii, item, size, parentPath) + return augmentItemInfo(dii, h.service, item, size, parentPath) } // constructWebURL is a helper function for recreating the webURL @@ -128,6 +133,7 @@ func (h libraryBackupHandler) NewLocationIDer( driveID string, elems ...string, ) details.LocationIDer { + // TODO(meain): path related changes for groups return details.NewSharePointLocationIDer(driveID, elems...) } @@ -160,11 +166,12 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool { var _ RestoreHandler = &libraryRestoreHandler{} type libraryRestoreHandler struct { - ac api.Client + ac api.Client + service path.ServiceType } -func NewLibraryRestoreHandler(ac api.Client) libraryRestoreHandler { - return libraryRestoreHandler{ac} +func NewLibraryRestoreHandler(ac api.Client, service path.ServiceType) libraryRestoreHandler { + return libraryRestoreHandler{ac, service} } func (h libraryRestoreHandler) PostDrive( @@ -187,7 +194,7 @@ func (h libraryRestoreHandler) AugmentItemInfo( size int64, parentPath *path.Builder, ) details.ItemInfo { - return augmentLibraryItemInfo(dii, item, size, parentPath) + return augmentItemInfo(dii, h.service, item, size, parentPath) } func (h libraryRestoreHandler) DeleteItem( @@ -261,73 +268,3 @@ func (h libraryRestoreHandler) GetRootFolder( ) (models.DriveItemable, error) { return h.ac.Drives().GetRootFolder(ctx, driveID) } - -// --------------------------------------------------------------------------- -// Common -// --------------------------------------------------------------------------- - -func augmentLibraryItemInfo( - dii details.ItemInfo, - item models.DriveItemable, - size int64, - parentPath *path.Builder, -) details.ItemInfo { - var driveName, siteID, driveID, weburl, creatorEmail string - - // TODO: we rely on this info for details/restore lookups, - // so if it's nil we have an issue, and will need an alternative - // way to source the data. - - if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil { - // User is sometimes not available when created via some - // external applications (like backup/restore solutions) - additionalData := item.GetCreatedBy().GetUser().GetAdditionalData() - - ed, ok := additionalData["email"] - if !ok { - ed = additionalData["displayName"] - } - - if ed != nil { - creatorEmail = *ed.(*string) - } - } - - gsi := item.GetSharepointIds() - if gsi != nil { - siteID = ptr.Val(gsi.GetSiteId()) - weburl = ptr.Val(gsi.GetSiteUrl()) - - if len(weburl) == 0 { - weburl = constructWebURL(item.GetAdditionalData()) - } - } - - if item.GetParentReference() != nil { - driveID = ptr.Val(item.GetParentReference().GetDriveId()) - driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName())) - } - - var pps string - if parentPath != nil { - pps = parentPath.String() - } - - dii.SharePoint = &details.SharePointInfo{ - Created: ptr.Val(item.GetCreatedDateTime()), - DriveID: driveID, - DriveName: driveName, - ItemName: ptr.Val(item.GetName()), - ItemType: details.SharePointLibrary, - Modified: ptr.Val(item.GetLastModifiedDateTime()), - Owner: creatorEmail, - ParentPath: pps, - SiteID: siteID, - Size: size, - WebURL: weburl, - } - - dii.Extension = &details.ExtensionData{} - - return dii -} diff --git a/src/internal/m365/collection/drive/library_handler_test.go b/src/internal/m365/collection/drive/library_handler_test.go index 1646868e0..93ff8d2ae 100644 --- a/src/internal/m365/collection/drive/library_handler_test.go +++ b/src/internal/m365/collection/drive/library_handler_test.go @@ -36,7 +36,7 @@ func (suite *LibraryBackupHandlerUnitSuite) TestCanonicalPath() { for _, test := range table { suite.Run(test.name, func() { t := suite.T() - h := libraryBackupHandler{} + h := libraryBackupHandler{service: path.SharePointService} p := path.Builder{}.Append("prefix") result, err := h.CanonicalPath(p, tenantID, resourceOwner) diff --git a/src/internal/m365/collection/site/restore.go b/src/internal/m365/collection/site/restore.go index 7b13df1a5..c83dd6290 100644 --- a/src/internal/m365/collection/site/restore.go +++ b/src/internal/m365/collection/site/restore.go @@ -41,7 +41,7 @@ func ConsumeRestoreCollections( ctr *count.Bus, ) (*support.ControllerOperationStatus, error) { var ( - lrh = drive.NewLibraryRestoreHandler(ac) + lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService()) restoreMetrics support.CollectionMetrics caches = drive.NewRestoreCaches(backupDriveIDNames) el = errs.Local() diff --git a/src/internal/m365/service/sharepoint/backup.go b/src/internal/m365/service/sharepoint/backup.go index ce7789b64..ad34ef9c9 100644 --- a/src/internal/m365/service/sharepoint/backup.go +++ b/src/internal/m365/service/sharepoint/backup.go @@ -80,7 +80,7 @@ func ProduceBackupCollections( spcs, canUsePreviousBackup, err = site.CollectLibraries( ctx, bpc, - drive.NewLibraryBackupHandler(ac.Drives(), scope), + drive.NewLibraryBackupHandler(ac.Drives(), scope, bpc.Selector.PathService()), creds.AzureTenantID, ssmb, su, diff --git a/src/internal/m365/service/sharepoint/backup_test.go b/src/internal/m365/service/sharepoint/backup_test.go index 2a7c6aad8..8365cb099 100644 --- a/src/internal/m365/service/sharepoint/backup_test.go +++ b/src/internal/m365/service/sharepoint/backup_test.go @@ -50,7 +50,8 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { ) pb := path.Builder{}.Append(testBaseDrivePath.Elements()...) - ep, err := drive.NewLibraryBackupHandler(api.Drives{}, nil).CanonicalPath(pb, tenantID, siteID) + ep, err := drive.NewLibraryBackupHandler(api.Drives{}, nil, path.SharePointService). + CanonicalPath(pb, tenantID, siteID) require.NoError(suite.T(), err, clues.ToCore(err)) tests := []struct { @@ -100,7 +101,7 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { ) c := drive.NewCollections( - drive.NewLibraryBackupHandler(api.Drives{}, test.scope), + drive.NewLibraryBackupHandler(api.Drives{}, test.scope, path.SharePointService), tenantID, siteID, nil, diff --git a/src/internal/m365/service/sharepoint/restore.go b/src/internal/m365/service/sharepoint/restore.go index 35e1c67cd..e4336dbd2 100644 --- a/src/internal/m365/service/sharepoint/restore.go +++ b/src/internal/m365/service/sharepoint/restore.go @@ -33,7 +33,7 @@ func ConsumeRestoreCollections( ctr *count.Bus, ) (*support.ControllerOperationStatus, error) { var ( - lrh = drive.NewLibraryRestoreHandler(ac) + lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService()) restoreMetrics support.CollectionMetrics caches = drive.NewRestoreCaches(backupDriveIDNames) el = errs.Local() diff --git a/src/internal/operations/test/sharepoint_test.go b/src/internal/operations/test/sharepoint_test.go index dea0c23bf..8ab7d3e4d 100644 --- a/src/internal/operations/test/sharepoint_test.go +++ b/src/internal/operations/test/sharepoint_test.go @@ -74,7 +74,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() { } grh := func(ac api.Client) drive.RestoreHandler { - return drive.NewLibraryRestoreHandler(ac) + return drive.NewLibraryRestoreHandler(ac, path.SharePointService) } runDriveIncrementalTest( From bc5bb8f0dcfc35fbdcfd9f88262847a91c5ca72e Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Wed, 23 Aug 2023 09:07:47 -0700 Subject: [PATCH 26/32] Update kopia version (#4089) Pull in recent upstream changes around having the repo in read-only mode. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * #4031 #### Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- src/go.mod | 5 +++-- src/go.sum | 12 +++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/go.mod b/src/go.mod index eca537743..0fe31a67c 100644 --- a/src/go.mod +++ b/src/go.mod @@ -2,7 +2,7 @@ module github.com/alcionai/corso/src go 1.20 -replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230803184432-5f2a35eade6b +replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.20230822191057-17d4deff94a3 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 @@ -46,6 +46,7 @@ require ( github.com/gofrs/flock v0.8.1 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -59,7 +60,7 @@ require ( github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasthttp v1.48.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect ) require ( diff --git a/src/go.sum b/src/go.sum index 9bd8f2480..ec27f3a4b 100644 --- a/src/go.sum +++ b/src/go.sum @@ -55,8 +55,8 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 h1:husF7eAYw2HEzgjfAmNy+ZLzyztJV2SyoUngSUo829Y= github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4/go.mod h1:MLEWSZ0cjEMg6hiGCRvE7AtrOhs7deBcm7ZrJBpfGRM= -github.com/alcionai/kopia v0.12.2-0.20230803184432-5f2a35eade6b h1:pkTllM0wtHVFnHfI3vXPYh1ObD4FKo2G2G/qWqzmIfY= -github.com/alcionai/kopia v0.12.2-0.20230803184432-5f2a35eade6b/go.mod h1:WH725ws0BYpZpTkVh4uqFHHPiiJuirl1Cm73jv5RYyA= +github.com/alcionai/kopia v0.12.2-0.20230822191057-17d4deff94a3 h1:6YjRGjEZr/Bmux1XkS13Re1m1LI7VAcbFsA3PiqO2BI= +github.com/alcionai/kopia v0.12.2-0.20230822191057-17d4deff94a3/go.mod h1:u5wAx1XN07PJsO1BLBkGicwSrbmAC1biONnumSCA210= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -204,6 +204,8 @@ github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/B github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hanwen/go-fuse/v2 v2.3.0 h1:t5ivNIH2PK+zw4OBul/iJjsoG9K6kXo4nMDoBpciC8A= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -244,7 +246,7 @@ github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kopia/htmluibuild v0.0.0-20230605144737-e386b860759d h1:qvV3TN5X/RsgmckkxsKh9P7Vtf9GYy6vOPzQY1SY4qM= +github.com/kopia/htmluibuild v0.0.0-20230716183504-d78b44b3a9bd h1:Vskpc00T65HkkDSWbkiXOG5yYsgWg5LN48daUfGZ+u0= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -736,8 +738,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 h1:eSaPbMR4T7WfH9FvABk36NBMacoTUKdWCvV0dx+KfOg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= From af5d98e182971ccb59e75c8338f53a02cd69d707 Mon Sep 17 00:00:00 2001 From: Keepers Date: Wed, 23 Aug 2023 10:46:28 -0600 Subject: [PATCH 27/32] Cleanup services (#4059) Some quick tech-debt splitting up of the code in services/m365 --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :broom: Tech Debt/Cleanup #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/pkg/services/m365/m365.go | 291 ----------- src/pkg/services/m365/sites.go | 99 ++++ src/pkg/services/m365/sites_test.go | 191 +++++++ src/pkg/services/m365/users.go | 211 ++++++++ .../m365/{m365_test.go => users_test.go} | 479 ++++++------------ 5 files changed, 653 insertions(+), 618 deletions(-) create mode 100644 src/pkg/services/m365/sites.go create mode 100644 src/pkg/services/m365/sites_test.go create mode 100644 src/pkg/services/m365/users.go rename src/pkg/services/m365/{m365_test.go => users_test.go} (64%) diff --git a/src/pkg/services/m365/m365.go b/src/pkg/services/m365/m365.go index 469f4d08f..6bb8125c4 100644 --- a/src/pkg/services/m365/m365.go +++ b/src/pkg/services/m365/m365.go @@ -6,9 +6,6 @@ import ( "github.com/alcionai/clues" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/alcionai/corso/src/internal/common/idname" - "github.com/alcionai/corso/src/internal/common/ptr" - "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/fault" @@ -28,294 +25,6 @@ type getAller[T any] interface { GetAll(ctx context.Context, errs *fault.Bus) ([]T, error) } -// --------------------------------------------------------------------------- -// Users -// --------------------------------------------------------------------------- - -// User is the minimal information required to identify and display a user. -type User struct { - PrincipalName string - ID string - Name string - Info api.UserInfo -} - -// UserNoInfo is the minimal information required to identify and display a user. -// TODO: Remove this once `UsersCompatNoInfo` is removed -type UserNoInfo struct { - PrincipalName string - ID string - Name string -} - -// UsersCompat returns a list of users in the specified M365 tenant. -// TODO(ashmrtn): Remove when upstream consumers of the SDK support the fault -// package. -func UsersCompat(ctx context.Context, acct account.Account) ([]*User, error) { - errs := fault.New(true) - - us, err := Users(ctx, acct, errs) - if err != nil { - return nil, err - } - - return us, errs.Failure() -} - -// UsersCompatNoInfo returns a list of users in the specified M365 tenant. -// TODO: Remove this once `Info` is removed from the `User` struct and callers -// have switched over -func UsersCompatNoInfo(ctx context.Context, acct account.Account) ([]*UserNoInfo, error) { - errs := fault.New(true) - - us, err := usersNoInfo(ctx, acct, errs) - if err != nil { - return nil, err - } - - return us, errs.Failure() -} - -// UserHasMailbox returns true if the user has an exchange mailbox enabled -// false otherwise, and a nil pointer and an error in case of error -func UserHasMailbox(ctx context.Context, acct account.Account, userID string) (bool, error) { - ac, err := makeAC(ctx, acct, path.ExchangeService) - if err != nil { - return false, clues.Stack(err).WithClues(ctx) - } - - _, err = ac.Users().GetMailInbox(ctx, userID) - if err != nil { - if err := api.EvaluateMailboxError(err); err != nil { - return false, clues.Stack(err) - } - - return false, nil - } - - return true, nil -} - -// UserHasDrives returns true if the user has any drives -// false otherwise, and a nil pointer and an error in case of error -func UserHasDrives(ctx context.Context, acct account.Account, userID string) (bool, error) { - ac, err := makeAC(ctx, acct, path.OneDriveService) - if err != nil { - return false, clues.Stack(err).WithClues(ctx) - } - - return checkUserHasDrives(ctx, ac.Users(), userID) -} - -func checkUserHasDrives(ctx context.Context, dgdd getDefaultDriver, userID string) (bool, error) { - _, err := dgdd.GetDefaultDrive(ctx, userID) - if err != nil { - // we consider this a non-error case, since it - // answers the question the caller is asking. - if clues.HasLabel(err, graph.LabelsMysiteNotFound) || clues.HasLabel(err, graph.LabelsNoSharePointLicense) { - return false, nil - } - - if graph.IsErrUserNotFound(err) { - return false, clues.Stack(graph.ErrResourceOwnerNotFound, err) - } - - return false, clues.Stack(err) - } - - return true, nil -} - -// usersNoInfo returns a list of users in the specified M365 tenant - with no info -// TODO: Remove this once we remove `Info` from `Users` and instead rely on the `GetUserInfo` API -// to get user information -func usersNoInfo(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*UserNoInfo, error) { - ac, err := makeAC(ctx, acct, path.UnknownService) - if err != nil { - return nil, clues.Stack(err).WithClues(ctx) - } - - us, err := ac.Users().GetAll(ctx, errs) - if err != nil { - return nil, err - } - - ret := make([]*UserNoInfo, 0, len(us)) - - for _, u := range us { - pu, err := parseUser(u) - if err != nil { - return nil, clues.Wrap(err, "formatting user data") - } - - puNoInfo := &UserNoInfo{ - PrincipalName: pu.PrincipalName, - ID: pu.ID, - Name: pu.Name, - } - - ret = append(ret, puNoInfo) - } - - return ret, nil -} - -// Users returns a list of users in the specified M365 tenant -func Users(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*User, error) { - ac, err := makeAC(ctx, acct, path.ExchangeService) - if err != nil { - return nil, clues.Stack(err).WithClues(ctx) - } - - us, err := ac.Users().GetAll(ctx, errs) - if err != nil { - return nil, err - } - - ret := make([]*User, 0, len(us)) - - for _, u := range us { - pu, err := parseUser(u) - if err != nil { - return nil, clues.Wrap(err, "formatting user data") - } - - userInfo, err := ac.Users().GetInfo(ctx, pu.ID) - if err != nil { - return nil, clues.Wrap(err, "getting user details") - } - - pu.Info = *userInfo - - ret = append(ret, pu) - } - - return ret, nil -} - -// parseUser extracts information from `models.Userable` we care about -func parseUser(item models.Userable) (*User, error) { - if item.GetUserPrincipalName() == nil { - return nil, clues.New("user missing principal name"). - With("user_id", ptr.Val(item.GetId())) - } - - u := &User{ - PrincipalName: ptr.Val(item.GetUserPrincipalName()), - ID: ptr.Val(item.GetId()), - Name: ptr.Val(item.GetDisplayName()), - } - - return u, nil -} - -// UserInfo returns the corso-specific set of user metadata. -func GetUserInfo( - ctx context.Context, - acct account.Account, - userID string, -) (*api.UserInfo, error) { - ac, err := makeAC(ctx, acct, path.ExchangeService) - if err != nil { - return nil, clues.Stack(err).WithClues(ctx) - } - - ui, err := ac.Users().GetInfo(ctx, userID) - if err != nil { - return nil, err - } - - return ui, nil -} - -// --------------------------------------------------------------------------- -// Sites -// --------------------------------------------------------------------------- - -// Site is the minimal information required to identify and display a SharePoint site. -type Site struct { - // WebURL is the url for the site, works as an alias for the user name. - WebURL string - - // ID is of the format: .. - // for example: contoso.sharepoint.com,abcdeab3-0ccc-4ce1-80ae-b32912c9468d,xyzud296-9f7c-44e1-af81-3c06d0d43007 - ID string - - // DisplayName is the human-readable name of the site. Normally the plaintext name that the - // user provided when they created the site, though it can be changed across time. - // Ex: webUrl: https://host.com/sites/TestingSite, displayName: "Testing Site" - DisplayName string -} - -// Sites returns a list of Sites in a specified M365 tenant -func Sites(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*Site, error) { - ac, err := makeAC(ctx, acct, path.SharePointService) - if err != nil { - return nil, clues.Stack(err).WithClues(ctx) - } - - return getAllSites(ctx, ac.Sites()) -} - -func getAllSites( - ctx context.Context, - ga getAller[models.Siteable], -) ([]*Site, error) { - sites, err := ga.GetAll(ctx, fault.New(true)) - if err != nil { - if clues.HasLabel(err, graph.LabelsNoSharePointLicense) { - return nil, clues.Stack(graph.ErrServiceNotEnabled, err) - } - - return nil, clues.Wrap(err, "retrieving sites") - } - - ret := make([]*Site, 0, len(sites)) - - for _, s := range sites { - ps, err := parseSite(s) - if err != nil { - return nil, clues.Wrap(err, "parsing siteable") - } - - ret = append(ret, ps) - } - - return ret, nil -} - -// parseSite extracts the information from `models.Siteable` we care about -func parseSite(item models.Siteable) (*Site, error) { - s := &Site{ - ID: ptr.Val(item.GetId()), - WebURL: ptr.Val(item.GetWebUrl()), - DisplayName: ptr.Val(item.GetDisplayName()), - } - - return s, nil -} - -// SitesMap retrieves all sites in the tenant, and returns two maps: one id-to-webURL, -// and one webURL-to-id. -func SitesMap( - ctx context.Context, - acct account.Account, - errs *fault.Bus, -) (idname.Cacher, error) { - sites, err := Sites(ctx, acct, errs) - if err != nil { - return idname.NewCache(nil), err - } - - itn := make(map[string]string, len(sites)) - - for _, s := range sites { - itn[s.ID] = s.WebURL - } - - return idname.NewCache(itn), nil -} - // --------------------------------------------------------------------------- // helpers // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/sites.go b/src/pkg/services/m365/sites.go new file mode 100644 index 000000000..ab7b28bca --- /dev/null +++ b/src/pkg/services/m365/sites.go @@ -0,0 +1,99 @@ +package m365 + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" +) + +// Site is the minimal information required to identify and display a SharePoint site. +type Site struct { + // WebURL is the url for the site, works as an alias for the user name. + WebURL string + + // ID is of the format: .. + // for example: contoso.sharepoint.com,abcdeab3-0ccc-4ce1-80ae-b32912c9468d,xyzud296-9f7c-44e1-af81-3c06d0d43007 + ID string + + // DisplayName is the human-readable name of the site. Normally the plaintext name that the + // user provided when they created the site, though it can be changed across time. + // Ex: webUrl: https://host.com/sites/TestingSite, displayName: "Testing Site" + DisplayName string +} + +// Sites returns a list of Sites in a specified M365 tenant +func Sites(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*Site, error) { + ac, err := makeAC(ctx, acct, path.SharePointService) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + return getAllSites(ctx, ac.Sites()) +} + +func getAllSites( + ctx context.Context, + ga getAller[models.Siteable], +) ([]*Site, error) { + sites, err := ga.GetAll(ctx, fault.New(true)) + if err != nil { + if clues.HasLabel(err, graph.LabelsNoSharePointLicense) { + return nil, clues.Stack(graph.ErrServiceNotEnabled, err) + } + + return nil, clues.Wrap(err, "retrieving sites") + } + + ret := make([]*Site, 0, len(sites)) + + for _, s := range sites { + ps, err := parseSite(s) + if err != nil { + return nil, clues.Wrap(err, "parsing siteable") + } + + ret = append(ret, ps) + } + + return ret, nil +} + +// parseSite extracts the information from `models.Siteable` we care about +func parseSite(item models.Siteable) (*Site, error) { + s := &Site{ + ID: ptr.Val(item.GetId()), + WebURL: ptr.Val(item.GetWebUrl()), + DisplayName: ptr.Val(item.GetDisplayName()), + } + + return s, nil +} + +// SitesMap retrieves all sites in the tenant, and returns two maps: one id-to-webURL, +// and one webURL-to-id. +func SitesMap( + ctx context.Context, + acct account.Account, + errs *fault.Bus, +) (idname.Cacher, error) { + sites, err := Sites(ctx, acct, errs) + if err != nil { + return idname.NewCache(nil), err + } + + itn := make(map[string]string, len(sites)) + + for _, s := range sites { + itn[s.ID] = s.WebURL + } + + return idname.NewCache(itn), nil +} diff --git a/src/pkg/services/m365/sites_test.go b/src/pkg/services/m365/sites_test.go new file mode 100644 index 000000000..a4d6a597d --- /dev/null +++ b/src/pkg/services/m365/sites_test.go @@ -0,0 +1,191 @@ +package m365 + +import ( + "context" + "testing" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/internal/tester/tconfig" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/credentials" + "github.com/alcionai/corso/src/pkg/fault" +) + +type siteIntegrationSuite struct { + tester.Suite +} + +func TestSiteIntegrationSuite(t *testing.T) { + suite.Run(t, &siteIntegrationSuite{ + Suite: tester.NewIntegrationSuite( + t, + [][]string{tconfig.M365AcctCredEnvs}), + }) +} + +func (suite *siteIntegrationSuite) SetupSuite() { + ctx, flush := tester.NewContext(suite.T()) + defer flush() + + graph.InitializeConcurrencyLimiter(ctx, true, 4) +} + +func (suite *siteIntegrationSuite) TestSites() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + acct := tconfig.NewM365Account(t) + + sites, err := Sites(ctx, acct, fault.New(true)) + assert.NoError(t, err, clues.ToCore(err)) + assert.NotEmpty(t, sites) + + for _, s := range sites { + suite.Run("site_"+s.ID, func() { + t := suite.T() + assert.NotEmpty(t, s.WebURL) + assert.NotEmpty(t, s.ID) + assert.NotEmpty(t, s.DisplayName) + }) + } +} + +func (suite *siteIntegrationSuite) TestSites_InvalidCredentials() { + table := []struct { + name string + acct func(t *testing.T) account.Account + }{ + { + name: "Invalid Credentials", + acct: func(t *testing.T) account.Account { + a, err := account.NewAccount( + account.ProviderM365, + account.M365Config{ + M365: credentials.M365{ + AzureClientID: "Test", + AzureClientSecret: "without", + }, + AzureTenantID: "data", + }, + ) + require.NoError(t, err, clues.ToCore(err)) + + return a + }, + }, + { + name: "Empty Credentials", + acct: func(t *testing.T) account.Account { + // intentionally swallowing the error here + a, _ := account.NewAccount(account.ProviderM365) + return a + }, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + sites, err := Sites(ctx, test.acct(t), fault.New(true)) + assert.Empty(t, sites, "returned some sites") + assert.NotNil(t, err) + }) + } +} + +// --------------------------------------------------------------------------- +// Unit +// --------------------------------------------------------------------------- + +type siteUnitSuite struct { + tester.Suite +} + +func TestSiteUnitSuite(t *testing.T) { + suite.Run(t, &siteUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +type mockGASites struct { + response []models.Siteable + err error +} + +func (m mockGASites) GetAll(context.Context, *fault.Bus) ([]models.Siteable, error) { + return m.response, m.err +} + +func (suite *siteUnitSuite) TestGetAllSites() { + table := []struct { + name string + mock func(context.Context) getAller[models.Siteable] + expectErr func(*testing.T, error) + }{ + { + name: "ok", + mock: func(ctx context.Context) getAller[models.Siteable] { + return mockGASites{[]models.Siteable{}, nil} + }, + expectErr: func(t *testing.T, err error) { + assert.NoError(t, err, clues.ToCore(err)) + }, + }, + { + name: "no sharepoint license", + mock: func(ctx context.Context) getAller[models.Siteable] { + odErr := odataerrors.NewODataError() + merr := odataerrors.NewMainError() + merr.SetCode(ptr.To("code")) + merr.SetMessage(ptr.To(string(graph.NoSPLicense))) + odErr.SetErrorEscaped(merr) + + return mockGASites{nil, graph.Stack(ctx, odErr)} + }, + expectErr: func(t *testing.T, err error) { + assert.ErrorIs(t, err, graph.ErrServiceNotEnabled, clues.ToCore(err)) + }, + }, + { + name: "arbitrary error", + mock: func(ctx context.Context) getAller[models.Siteable] { + odErr := odataerrors.NewODataError() + merr := odataerrors.NewMainError() + merr.SetCode(ptr.To("code")) + merr.SetMessage(ptr.To("message")) + odErr.SetErrorEscaped(merr) + + return mockGASites{nil, graph.Stack(ctx, odErr)} + }, + expectErr: func(t *testing.T, err error) { + assert.Error(t, err, clues.ToCore(err)) + }, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + gas := test.mock(ctx) + + _, err := getAllSites(ctx, gas) + test.expectErr(t, err) + }) + } +} diff --git a/src/pkg/services/m365/users.go b/src/pkg/services/m365/users.go new file mode 100644 index 000000000..35b3a0630 --- /dev/null +++ b/src/pkg/services/m365/users.go @@ -0,0 +1,211 @@ +package m365 + +import ( + "context" + + "github.com/alcionai/clues" + "github.com/microsoftgraph/msgraph-sdk-go/models" + + "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/services/m365/api" +) + +// User is the minimal information required to identify and display a user. +type User struct { + PrincipalName string + ID string + Name string + Info api.UserInfo +} + +// UserNoInfo is the minimal information required to identify and display a user. +// TODO: Remove this once `UsersCompatNoInfo` is removed +type UserNoInfo struct { + PrincipalName string + ID string + Name string +} + +// UsersCompat returns a list of users in the specified M365 tenant. +// TODO(ashmrtn): Remove when upstream consumers of the SDK support the fault +// package. +func UsersCompat(ctx context.Context, acct account.Account) ([]*User, error) { + errs := fault.New(true) + + us, err := Users(ctx, acct, errs) + if err != nil { + return nil, err + } + + return us, errs.Failure() +} + +// UsersCompatNoInfo returns a list of users in the specified M365 tenant. +// TODO: Remove this once `Info` is removed from the `User` struct and callers +// have switched over +func UsersCompatNoInfo(ctx context.Context, acct account.Account) ([]*UserNoInfo, error) { + errs := fault.New(true) + + us, err := usersNoInfo(ctx, acct, errs) + if err != nil { + return nil, err + } + + return us, errs.Failure() +} + +// UserHasMailbox returns true if the user has an exchange mailbox enabled +// false otherwise, and a nil pointer and an error in case of error +func UserHasMailbox(ctx context.Context, acct account.Account, userID string) (bool, error) { + ac, err := makeAC(ctx, acct, path.ExchangeService) + if err != nil { + return false, clues.Stack(err).WithClues(ctx) + } + + _, err = ac.Users().GetMailInbox(ctx, userID) + if err != nil { + if err := api.EvaluateMailboxError(err); err != nil { + return false, clues.Stack(err) + } + + return false, nil + } + + return true, nil +} + +// UserHasDrives returns true if the user has any drives +// false otherwise, and a nil pointer and an error in case of error +func UserHasDrives(ctx context.Context, acct account.Account, userID string) (bool, error) { + ac, err := makeAC(ctx, acct, path.OneDriveService) + if err != nil { + return false, clues.Stack(err).WithClues(ctx) + } + + return checkUserHasDrives(ctx, ac.Users(), userID) +} + +func checkUserHasDrives(ctx context.Context, dgdd getDefaultDriver, userID string) (bool, error) { + _, err := dgdd.GetDefaultDrive(ctx, userID) + if err != nil { + // we consider this a non-error case, since it + // answers the question the caller is asking. + if clues.HasLabel(err, graph.LabelsMysiteNotFound) || clues.HasLabel(err, graph.LabelsNoSharePointLicense) { + return false, nil + } + + if graph.IsErrUserNotFound(err) { + return false, clues.Stack(graph.ErrResourceOwnerNotFound, err) + } + + return false, clues.Stack(err) + } + + return true, nil +} + +// usersNoInfo returns a list of users in the specified M365 tenant - with no info +// TODO: Remove this once we remove `Info` from `Users` and instead rely on the `GetUserInfo` API +// to get user information +func usersNoInfo(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*UserNoInfo, error) { + ac, err := makeAC(ctx, acct, path.UnknownService) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + us, err := ac.Users().GetAll(ctx, errs) + if err != nil { + return nil, err + } + + ret := make([]*UserNoInfo, 0, len(us)) + + for _, u := range us { + pu, err := parseUser(u) + if err != nil { + return nil, clues.Wrap(err, "formatting user data") + } + + puNoInfo := &UserNoInfo{ + PrincipalName: pu.PrincipalName, + ID: pu.ID, + Name: pu.Name, + } + + ret = append(ret, puNoInfo) + } + + return ret, nil +} + +// Users returns a list of users in the specified M365 tenant +func Users(ctx context.Context, acct account.Account, errs *fault.Bus) ([]*User, error) { + ac, err := makeAC(ctx, acct, path.ExchangeService) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + us, err := ac.Users().GetAll(ctx, errs) + if err != nil { + return nil, err + } + + ret := make([]*User, 0, len(us)) + + for _, u := range us { + pu, err := parseUser(u) + if err != nil { + return nil, clues.Wrap(err, "formatting user data") + } + + userInfo, err := ac.Users().GetInfo(ctx, pu.ID) + if err != nil { + return nil, clues.Wrap(err, "getting user details") + } + + pu.Info = *userInfo + + ret = append(ret, pu) + } + + return ret, nil +} + +// parseUser extracts information from `models.Userable` we care about +func parseUser(item models.Userable) (*User, error) { + if item.GetUserPrincipalName() == nil { + return nil, clues.New("user missing principal name"). + With("user_id", ptr.Val(item.GetId())) + } + + u := &User{ + PrincipalName: ptr.Val(item.GetUserPrincipalName()), + ID: ptr.Val(item.GetId()), + Name: ptr.Val(item.GetDisplayName()), + } + + return u, nil +} + +// UserInfo returns the corso-specific set of user metadata. +func GetUserInfo( + ctx context.Context, + acct account.Account, + userID string, +) (*api.UserInfo, error) { + ac, err := makeAC(ctx, acct, path.ExchangeService) + if err != nil { + return nil, clues.Stack(err).WithClues(ctx) + } + + ui, err := ac.Users().GetInfo(ctx, userID) + if err != nil { + return nil, err + } + + return ui, nil +} diff --git a/src/pkg/services/m365/m365_test.go b/src/pkg/services/m365/users_test.go similarity index 64% rename from src/pkg/services/m365/m365_test.go rename to src/pkg/services/m365/users_test.go index 0124f13f2..78a27e111 100644 --- a/src/pkg/services/m365/m365_test.go +++ b/src/pkg/services/m365/users_test.go @@ -23,26 +23,29 @@ import ( "github.com/alcionai/corso/src/pkg/services/m365/api" ) -type M365IntegrationSuite struct { +type userIntegrationSuite struct { tester.Suite + acct account.Account } -func TestM365IntegrationSuite(t *testing.T) { - suite.Run(t, &M365IntegrationSuite{ +func TestUserIntegrationSuite(t *testing.T) { + suite.Run(t, &userIntegrationSuite{ Suite: tester.NewIntegrationSuite( t, [][]string{tconfig.M365AcctCredEnvs}), }) } -func (suite *M365IntegrationSuite) SetupSuite() { +func (suite *userIntegrationSuite) SetupSuite() { ctx, flush := tester.NewContext(suite.T()) defer flush() graph.InitializeConcurrencyLimiter(ctx, true, 4) + + suite.acct = tconfig.NewM365Account(suite.T()) } -func (suite *M365IntegrationSuite) TestUsers() { +func (suite *userIntegrationSuite) TestUsers() { t := suite.T() ctx, flush := tester.NewContext(t) @@ -50,9 +53,7 @@ func (suite *M365IntegrationSuite) TestUsers() { graph.InitializeConcurrencyLimiter(ctx, true, 4) - acct := tconfig.NewM365Account(suite.T()) - - users, err := Users(ctx, acct, fault.New(true)) + users, err := Users(ctx, suite.acct, fault.New(true)) assert.NoError(t, err, clues.ToCore(err)) assert.NotEmpty(t, users) @@ -68,7 +69,7 @@ func (suite *M365IntegrationSuite) TestUsers() { } } -func (suite *M365IntegrationSuite) TestUsersCompat_HasNoInfo() { +func (suite *userIntegrationSuite) TestUsersCompat_HasNoInfo() { t := suite.T() ctx, flush := tester.NewContext(t) @@ -91,7 +92,7 @@ func (suite *M365IntegrationSuite) TestUsersCompat_HasNoInfo() { } } -func (suite *M365IntegrationSuite) TestUserHasMailbox() { +func (suite *userIntegrationSuite) TestUserHasMailbox() { t := suite.T() ctx, flush := tester.NewContext(t) @@ -107,7 +108,7 @@ func (suite *M365IntegrationSuite) TestUserHasMailbox() { assert.True(t, enabled) } -func (suite *M365IntegrationSuite) TestUserHasDrive() { +func (suite *userIntegrationSuite) TestUserHasDrive() { t := suite.T() ctx, flush := tester.NewContext(t) @@ -123,34 +124,155 @@ func (suite *M365IntegrationSuite) TestUserHasDrive() { assert.True(t, enabled) } -func (suite *M365IntegrationSuite) TestSites() { - t := suite.T() +func (suite *userIntegrationSuite) TestUsers_InvalidCredentials() { + table := []struct { + name string + acct func(t *testing.T) account.Account + }{ + { + name: "Invalid Credentials", + acct: func(t *testing.T) account.Account { + a, err := account.NewAccount( + account.ProviderM365, + account.M365Config{ + M365: credentials.M365{ + AzureClientID: "Test", + AzureClientSecret: "without", + }, + AzureTenantID: "data", + }, + ) + require.NoError(t, err, clues.ToCore(err)) - ctx, flush := tester.NewContext(t) - defer flush() + return a + }, + }, + } - acct := tconfig.NewM365Account(t) - - sites, err := Sites(ctx, acct, fault.New(true)) - assert.NoError(t, err, clues.ToCore(err)) - assert.NotEmpty(t, sites) - - for _, s := range sites { - suite.Run("site_"+s.ID, func() { + for _, test := range table { + suite.Run(test.name, func() { t := suite.T() - assert.NotEmpty(t, s.WebURL) - assert.NotEmpty(t, s.ID) - assert.NotEmpty(t, s.DisplayName) + + ctx, flush := tester.NewContext(t) + defer flush() + + users, err := Users(ctx, test.acct(t), fault.New(true)) + assert.Empty(t, users, "returned some users") + assert.NotNil(t, err) }) } } -type m365UnitSuite struct { +func (suite *userIntegrationSuite) TestGetUserInfo() { + table := []struct { + name string + user string + expect *api.UserInfo + expectErr require.ErrorAssertionFunc + }{ + { + name: "standard test user", + user: tconfig.M365UserID(suite.T()), + expect: &api.UserInfo{ + ServicesEnabled: map[path.ServiceType]struct{}{ + path.ExchangeService: {}, + path.OneDriveService: {}, + }, + Mailbox: api.MailboxInfo{ + Purpose: "user", + ErrGetMailBoxSetting: nil, + }, + }, + expectErr: require.NoError, + }, + { + name: "user does not exist", + user: uuid.NewString(), + expect: &api.UserInfo{ + ServicesEnabled: map[path.ServiceType]struct{}{}, + Mailbox: api.MailboxInfo{}, + }, + expectErr: require.Error, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + result, err := GetUserInfo(ctx, suite.acct, test.user) + test.expectErr(t, err, clues.ToCore(err)) + + if err != nil { + return + } + + assert.Equal(t, test.expect.ServicesEnabled, result.ServicesEnabled) + }) + } +} + +func (suite *userIntegrationSuite) TestGetUserInfo_userWithoutDrive() { + userID := tconfig.M365UserID(suite.T()) + + table := []struct { + name string + user string + expect *api.UserInfo + }{ + { + name: "user without drive and exchange", + user: "a53c26f7-5100-4acb-a910-4d20960b2c19", // User: testevents@10rqc2.onmicrosoft.com + expect: &api.UserInfo{ + ServicesEnabled: map[path.ServiceType]struct{}{}, + Mailbox: api.MailboxInfo{ + ErrGetMailBoxSetting: []error{api.ErrMailBoxSettingsNotFound}, + }, + }, + }, + { + name: "user with drive and exchange", + user: userID, + expect: &api.UserInfo{ + ServicesEnabled: map[path.ServiceType]struct{}{ + path.ExchangeService: {}, + path.OneDriveService: {}, + }, + Mailbox: api.MailboxInfo{ + Purpose: "user", + ErrGetMailBoxSetting: []error{}, + }, + }, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + result, err := GetUserInfo(ctx, suite.acct, test.user) + require.NoError(t, err, clues.ToCore(err)) + assert.Equal(t, test.expect.ServicesEnabled, result.ServicesEnabled) + assert.Equal(t, test.expect.Mailbox.ErrGetMailBoxSetting, result.Mailbox.ErrGetMailBoxSetting) + assert.Equal(t, test.expect.Mailbox.Purpose, result.Mailbox.Purpose) + }) + } +} + +// --------------------------------------------------------------------------- +// Unit +// --------------------------------------------------------------------------- + +type userUnitSuite struct { tester.Suite } -func TestM365UnitSuite(t *testing.T) { - suite.Run(t, &m365UnitSuite{Suite: tester.NewUnitSuite(t)}) +func TestUserUnitSuite(t *testing.T) { + suite.Run(t, &userUnitSuite{Suite: tester.NewUnitSuite(t)}) } type mockDGDD struct { @@ -162,7 +284,7 @@ func (m mockDGDD) GetDefaultDrive(context.Context, string) (models.Driveable, er return m.response, m.err } -func (suite *m365UnitSuite) TestCheckUserHasDrives() { +func (suite *userUnitSuite) TestCheckUserHasDrives() { table := []struct { name string mock func(context.Context) getDefaultDriver @@ -275,300 +397,3 @@ func (suite *m365UnitSuite) TestCheckUserHasDrives() { }) } } - -type mockGASites struct { - response []models.Siteable - err error -} - -func (m mockGASites) GetAll(context.Context, *fault.Bus) ([]models.Siteable, error) { - return m.response, m.err -} - -func (suite *m365UnitSuite) TestGetAllSites() { - table := []struct { - name string - mock func(context.Context) getAller[models.Siteable] - expectErr func(*testing.T, error) - }{ - { - name: "ok", - mock: func(ctx context.Context) getAller[models.Siteable] { - return mockGASites{[]models.Siteable{}, nil} - }, - expectErr: func(t *testing.T, err error) { - assert.NoError(t, err, clues.ToCore(err)) - }, - }, - { - name: "no sharepoint license", - mock: func(ctx context.Context) getAller[models.Siteable] { - odErr := odataerrors.NewODataError() - merr := odataerrors.NewMainError() - merr.SetCode(ptr.To("code")) - merr.SetMessage(ptr.To(string(graph.NoSPLicense))) - odErr.SetErrorEscaped(merr) - - return mockGASites{nil, graph.Stack(ctx, odErr)} - }, - expectErr: func(t *testing.T, err error) { - assert.ErrorIs(t, err, graph.ErrServiceNotEnabled, clues.ToCore(err)) - }, - }, - { - name: "arbitrary error", - mock: func(ctx context.Context) getAller[models.Siteable] { - odErr := odataerrors.NewODataError() - merr := odataerrors.NewMainError() - merr.SetCode(ptr.To("code")) - merr.SetMessage(ptr.To("message")) - odErr.SetErrorEscaped(merr) - - return mockGASites{nil, graph.Stack(ctx, odErr)} - }, - expectErr: func(t *testing.T, err error) { - assert.Error(t, err, clues.ToCore(err)) - }, - }, - } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - gas := test.mock(ctx) - - _, err := getAllSites(ctx, gas) - test.expectErr(t, err) - }) - } -} - -type DiscoveryIntgSuite struct { - tester.Suite - acct account.Account -} - -func TestDiscoveryIntgSuite(t *testing.T) { - suite.Run(t, &DiscoveryIntgSuite{ - Suite: tester.NewIntegrationSuite( - t, - [][]string{tconfig.M365AcctCredEnvs}), - }) -} - -func (suite *DiscoveryIntgSuite) SetupSuite() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - graph.InitializeConcurrencyLimiter(ctx, true, 4) - - suite.acct = tconfig.NewM365Account(t) -} - -func (suite *DiscoveryIntgSuite) TestUsers() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - errs := fault.New(true) - - users, err := Users(ctx, suite.acct, errs) - assert.NoError(t, err, clues.ToCore(err)) - - ferrs := errs.Errors() - assert.Nil(t, ferrs.Failure) - assert.Empty(t, ferrs.Recovered) - assert.NotEmpty(t, users) -} - -func (suite *DiscoveryIntgSuite) TestUsers_InvalidCredentials() { - table := []struct { - name string - acct func(t *testing.T) account.Account - }{ - { - name: "Invalid Credentials", - acct: func(t *testing.T) account.Account { - a, err := account.NewAccount( - account.ProviderM365, - account.M365Config{ - M365: credentials.M365{ - AzureClientID: "Test", - AzureClientSecret: "without", - }, - AzureTenantID: "data", - }, - ) - require.NoError(t, err, clues.ToCore(err)) - - return a - }, - }, - } - - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - users, err := Users(ctx, test.acct(t), fault.New(true)) - assert.Empty(t, users, "returned some users") - assert.NotNil(t, err) - }) - } -} - -func (suite *DiscoveryIntgSuite) TestSites_InvalidCredentials() { - table := []struct { - name string - acct func(t *testing.T) account.Account - }{ - { - name: "Invalid Credentials", - acct: func(t *testing.T) account.Account { - a, err := account.NewAccount( - account.ProviderM365, - account.M365Config{ - M365: credentials.M365{ - AzureClientID: "Test", - AzureClientSecret: "without", - }, - AzureTenantID: "data", - }, - ) - require.NoError(t, err, clues.ToCore(err)) - - return a - }, - }, - { - name: "Empty Credentials", - acct: func(t *testing.T) account.Account { - // intentionally swallowing the error here - a, _ := account.NewAccount(account.ProviderM365) - return a - }, - }, - } - - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - sites, err := Sites(ctx, test.acct(t), fault.New(true)) - assert.Empty(t, sites, "returned some sites") - assert.NotNil(t, err) - }) - } -} - -func (suite *DiscoveryIntgSuite) TestGetUserInfo() { - table := []struct { - name string - user string - expect *api.UserInfo - expectErr require.ErrorAssertionFunc - }{ - { - name: "standard test user", - user: tconfig.M365UserID(suite.T()), - expect: &api.UserInfo{ - ServicesEnabled: map[path.ServiceType]struct{}{ - path.ExchangeService: {}, - path.OneDriveService: {}, - }, - Mailbox: api.MailboxInfo{ - Purpose: "user", - ErrGetMailBoxSetting: nil, - }, - }, - expectErr: require.NoError, - }, - { - name: "user does not exist", - user: uuid.NewString(), - expect: &api.UserInfo{ - ServicesEnabled: map[path.ServiceType]struct{}{}, - Mailbox: api.MailboxInfo{}, - }, - expectErr: require.Error, - }, - } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - result, err := GetUserInfo(ctx, suite.acct, test.user) - test.expectErr(t, err, clues.ToCore(err)) - - if err != nil { - return - } - - assert.Equal(t, test.expect.ServicesEnabled, result.ServicesEnabled) - }) - } -} - -func (suite *DiscoveryIntgSuite) TestGetUserInfo_userWithoutDrive() { - userID := tconfig.M365UserID(suite.T()) - - table := []struct { - name string - user string - expect *api.UserInfo - }{ - { - name: "user without drive and exchange", - user: "a53c26f7-5100-4acb-a910-4d20960b2c19", // User: testevents@10rqc2.onmicrosoft.com - expect: &api.UserInfo{ - ServicesEnabled: map[path.ServiceType]struct{}{}, - Mailbox: api.MailboxInfo{ - ErrGetMailBoxSetting: []error{api.ErrMailBoxSettingsNotFound}, - }, - }, - }, - { - name: "user with drive and exchange", - user: userID, - expect: &api.UserInfo{ - ServicesEnabled: map[path.ServiceType]struct{}{ - path.ExchangeService: {}, - path.OneDriveService: {}, - }, - Mailbox: api.MailboxInfo{ - Purpose: "user", - ErrGetMailBoxSetting: []error{}, - }, - }, - }, - } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - result, err := GetUserInfo(ctx, suite.acct, test.user) - require.NoError(t, err, clues.ToCore(err)) - assert.Equal(t, test.expect.ServicesEnabled, result.ServicesEnabled) - assert.Equal(t, test.expect.Mailbox.ErrGetMailBoxSetting, result.Mailbox.ErrGetMailBoxSetting) - assert.Equal(t, test.expect.Mailbox.Purpose, result.Mailbox.Purpose) - }) - } -} From 719d6b98cd9091b1f3b4e62dd08762fcb0e25b01 Mon Sep 17 00:00:00 2001 From: Keepers Date: Wed, 23 Aug 2023 11:22:27 -0600 Subject: [PATCH 28/32] add channel message item info to details (#4092) #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :sunflower: Feature #### Issue(s) * #3989 #### Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/pkg/backup/details/groups.go | 44 ++++++++++++++++++++++++++---- src/pkg/backup/details/iteminfo.go | 3 ++ 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/src/pkg/backup/details/groups.go b/src/pkg/backup/details/groups.go index 9065d6bbb..1b67dac4f 100644 --- a/src/pkg/backup/details/groups.go +++ b/src/pkg/backup/details/groups.go @@ -1,9 +1,11 @@ package details import ( + "strconv" "time" "github.com/alcionai/clues" + "github.com/dustin/go-humanize" "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/pkg/path" @@ -45,8 +47,12 @@ type GroupsInfo struct { Size int64 `json:"size,omitempty"` // Channels Specific - ChannelName string `json:"channelName,omitempty"` - ChannelID string `json:"channelID,omitempty"` + ChannelName string `json:"channelName,omitempty"` + ChannelID string `json:"channelID,omitempty"` + LastResponseAt time.Time `json:"lastResponseAt,omitempty"` + MessageCreator string `json:"messageCreator,omitempty"` + MessagePreview string `json:"messagePreview,omitempty"` + ReplyCount int `json:"replyCount,omitempty"` // SharePoint specific DriveName string `json:"driveName,omitempty"` @@ -58,16 +64,42 @@ type GroupsInfo struct { // Headers returns the human-readable names of properties in a SharePointInfo // for printing out to a terminal in a columnar display. func (i GroupsInfo) Headers() []string { - return []string{"Created", "Modified"} + switch i.ItemType { + case SharePointLibrary: + return []string{"ItemName", "Library", "ParentPath", "Size", "Owner", "Created", "Modified"} + case TeamsChannelMessage: + return []string{"Message", "Channel", "Replies", "Creator", "Created", "Last Response"} + } + + return []string{} } // Values returns the values matching the Headers list for printing // out to a terminal in a columnar display. func (i GroupsInfo) Values() []string { - return []string{ - dttm.FormatToTabularDisplay(i.Created), - dttm.FormatToTabularDisplay(i.Modified), + switch i.ItemType { + case SharePointLibrary: + return []string{ + i.ItemName, + i.DriveName, + i.ParentPath, + humanize.Bytes(uint64(i.Size)), + i.Owner, + dttm.FormatToTabularDisplay(i.Created), + dttm.FormatToTabularDisplay(i.Modified), + } + case TeamsChannelMessage: + return []string{ + i.MessagePreview, + i.ChannelName, + strconv.Itoa(i.ReplyCount), + i.MessageCreator, + dttm.FormatToTabularDisplay(i.Created), + dttm.FormatToTabularDisplay(i.Modified), + } } + + return []string{} } func (i *GroupsInfo) UpdateParentPath(newLocPath *path.Builder) { diff --git a/src/pkg/backup/details/iteminfo.go b/src/pkg/backup/details/iteminfo.go index a8ba23100..ef71343dd 100644 --- a/src/pkg/backup/details/iteminfo.go +++ b/src/pkg/backup/details/iteminfo.go @@ -37,6 +37,9 @@ const ( // Folder Management(30x) FolderItem ItemType = 306 + + // Groups/Teams(40x) + TeamsChannelMessage ItemType = 401 ) func UpdateItem(item *ItemInfo, newLocPath *path.Builder) { From 3a5fcdce99fdea25709569a2d15867c09051219e Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Wed, 23 Aug 2023 10:58:24 -0700 Subject: [PATCH 29/32] Reenable point-in-time longevity test (#4090) Now that kopia won't try to mutate state in read-only mode it's safe to reenable the test ensuring backups that were deleted are still available when opening the repo at a specific point in time This reverts commit 8d3fdeeb8dbd8dc68284c1d33b0b431d309b4f5b. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [x] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * closes #4031 merge after: * #4089 #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/cmd/longevity_test/longevity.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/cmd/longevity_test/longevity.go b/src/cmd/longevity_test/longevity.go index efe3bd352..b3d6f865d 100644 --- a/src/cmd/longevity_test/longevity.go +++ b/src/cmd/longevity_test/longevity.go @@ -67,9 +67,6 @@ func deleteBackups( // pitrListBackups connects to the repository at the given point in time and // lists the backups for service. It then checks the list of backups contains // the backups in backupIDs. -// -//nolint:unused -//lint:ignore U1000 Waiting for upstream fix tracked by 4031 func pitrListBackups( ctx context.Context, service path.ServiceType, @@ -159,10 +156,16 @@ func main() { fatal(ctx, "invalid number of days provided", nil) } - _, err = deleteBackups(ctx, service, days) + beforeDel := time.Now() + + backups, err := deleteBackups(ctx, service, days) if err != nil { fatal(ctx, "deleting backups", clues.Stack(err)) } + + if err := pitrListBackups(ctx, service, beforeDel, backups); err != nil { + fatal(ctx, "listing backups from point in time", clues.Stack(err)) + } } func fatal(ctx context.Context, msg string, err error) { From a39526f64ebeb46357b1d1c8540981dc6ddecf07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Aug 2023 18:35:27 +0000 Subject: [PATCH 30/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/mi?= =?UTF-8?q?crosoftgraph/msgraph-sdk-go=20from=201.15.0=20to=201.16.0=20in?= =?UTF-8?q?=20/src=20(#4098)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/microsoftgraph/msgraph-sdk-go](https://github.com/microsoftgraph/msgraph-sdk-go) from 1.15.0 to 1.16.0.
Changelog

Sourced from github.com/microsoftgraph/msgraph-sdk-go's changelog.

[1.16.0]- 2023-08-23

Changed

  • Weekly generation.
Commits
  • cd8edf7 Generated models and request builders (#557)
  • d4cd019 Merge pull request #556 from microsoftgraph/dependabot/go_modules/github.com/...
  • 0a454be Bump github.com/google/uuid from 1.3.0 to 1.3.1
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/microsoftgraph/msgraph-sdk-go&package-manager=go_modules&previous-version=1.15.0&new-version=1.16.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 0fe31a67c..01e9a2c02 100644 --- a/src/go.mod +++ b/src/go.mod @@ -19,7 +19,7 @@ require ( github.com/microsoft/kiota-http-go v1.1.0 github.com/microsoft/kiota-serialization-form-go v1.0.0 github.com/microsoft/kiota-serialization-json-go v1.0.4 - github.com/microsoftgraph/msgraph-sdk-go v1.15.0 + github.com/microsoftgraph/msgraph-sdk-go v1.16.0 github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/pkg/errors v0.9.1 github.com/puzpuzpuz/xsync/v2 v2.5.0 diff --git a/src/go.sum b/src/go.sum index ec27f3a4b..5e3687cbc 100644 --- a/src/go.sum +++ b/src/go.sum @@ -287,8 +287,8 @@ github.com/microsoft/kiota-serialization-multipart-go v1.0.0 h1:3O5sb5Zj+moLBiJy github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so= github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= -github.com/microsoftgraph/msgraph-sdk-go v1.15.0 h1:cdz6Bs0T0Hl/NTdUAZq8TRJwidTmX741X2SnVIsn5l4= -github.com/microsoftgraph/msgraph-sdk-go v1.15.0/go.mod h1:YfKdWdUwQWuS6E+Qg6+SZnHxJ/kvG2nYQutwzGa5NZs= +github.com/microsoftgraph/msgraph-sdk-go v1.16.0 h1:6YjL2f8PZFlJUuCoX1yJwhDFYKPtogxYr/SnKJHAHZ4= +github.com/microsoftgraph/msgraph-sdk-go v1.16.0/go.mod h1:DdshtIL3VJ3abSG6O+gmlvbc/pX7Xh7xbruLTWoRjfU= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= From 7972ff6e8f35a635f7cbd9ae92aafcc30dc70896 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Wed, 23 Aug 2023 12:16:42 -0700 Subject: [PATCH 31/32] Shuffle around logic for details merging (#4087) Shuffle around some logic for details merging so that we always attempt to extract a LocationRef from the backup base entry that's currently being examined. A LocationRef should always be available from either the LocationRef field in the details entry (newer backups) or by extracting it from the RepoRef (older backups) Manually tested incremental backups for exchange in the following scenarios: 1. v0.3.0 backup (calendars use IDs in RepoRef) -> incremental with this patch -> incremental with this patch 1. v0.2.0 backup (exchange uses folder names in RepoRef) -> incremental with this patch -> incremental with this patch The above tests should cover the cases where: * base backup details don't have LocationRef for exchange items * base backup details have LocationRef for exchange items --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup #### Issue(s) * closes #3716 #### Test Plan - [x] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/operations/backup.go | 25 ------------ src/internal/operations/backup_test.go | 48 ++++++++++++++++++++++ src/pkg/backup/details/details_test.go | 50 +++++++++++++++++++++-- src/pkg/backup/details/entry.go | 55 ++++++++++++++++---------- 4 files changed, 129 insertions(+), 49 deletions(-) diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index d3d4f0a9f..9215bc0d5 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -559,31 +559,6 @@ func getNewPathRefs( repoRef path.Path, backupVersion int, ) (path.Path, *path.Builder, error) { - // Right now we can't guarantee that we have an old location in the - // previous details entry so first try a lookup without a location to see - // if it matches so we don't need to try parsing from the old entry. - // - // TODO(ashmrtn): In the future we can remove this first check as we'll be - // able to assume we always have the location in the previous entry. We'll end - // up doing some extra parsing, but it will simplify this code. - if repoRef.Service() == path.ExchangeService { - newPath, newLoc, err := dataFromBackup.GetNewPathRefs( - repoRef.ToBuilder(), - entry.Modified(), - nil) - if err != nil { - return nil, nil, clues.Wrap(err, "getting new paths") - } else if newPath == nil { - // This entry doesn't need merging. - return nil, nil, nil - } else if newLoc == nil { - return nil, nil, clues.New("unable to find new exchange location") - } - - return newPath, newLoc, nil - } - - // We didn't have an exact entry, so retry with a location. locRef, err := entry.ToLocationIDer(backupVersion) if err != nil { return nil, nil, clues.Wrap(err, "getting previous item location") diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index 5b489e769..e8970ea1c 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -606,6 +606,24 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems time1 = time.Now() time2 = time1.Add(time.Hour) + + exchangeItemPath1 = makePath( + suite.T(), + []string{ + tenant, + path.ExchangeService.String(), + ro, + path.EmailCategory.String(), + "work", + "item1", + }, + true) + exchangeLocationPath1 = path.Builder{}.Append("work-display-name") + exchangePathReason1 = kopia.NewReason( + "", + exchangeItemPath1.ResourceOwner(), + exchangeItemPath1.Service(), + exchangeItemPath1.Category()) ) itemParents1, err := path.GetDriveFolderPath(itemPath1) @@ -803,6 +821,36 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems makeDetailsEntry(suite.T(), itemPath1, locationPath1, 42, false), }, }, + { + name: "ExchangeItemMerged", + mdm: func() *mockDetailsMergeInfoer { + res := newMockDetailsMergeInfoer() + res.add(exchangeItemPath1, exchangeItemPath1, exchangeLocationPath1) + + return res + }(), + inputBackups: []kopia.BackupEntry{ + { + Backup: &backup1, + Reasons: []identity.Reasoner{ + exchangePathReason1, + }, + }, + }, + populatedDetails: map[string]*details.Details{ + backup1.DetailsID: { + DetailsModel: details.DetailsModel{ + Entries: []details.Entry{ + *makeDetailsEntry(suite.T(), exchangeItemPath1, exchangeLocationPath1, 42, false), + }, + }, + }, + }, + errCheck: assert.NoError, + expectedEntries: []*details.Entry{ + makeDetailsEntry(suite.T(), exchangeItemPath1, exchangeLocationPath1, 42, false), + }, + }, { name: "ItemMergedSameLocation", mdm: func() *mockDetailsMergeInfoer { diff --git a/src/pkg/backup/details/details_test.go b/src/pkg/backup/details/details_test.go index 43883ed5a..1d5e57035 100644 --- a/src/pkg/backup/details/details_test.go +++ b/src/pkg/backup/details/details_test.go @@ -1353,7 +1353,7 @@ func (suite *DetailsUnitSuite) TestLocationIDer_FromEntry() { expectedUniqueLoc: fmt.Sprintf(expectedExchangeUniqueLocFmt, path.EmailCategory), }, { - name: "Exchange Email Without LocationRef Old Version Errors", + name: "Exchange Email Without LocationRef Old Version", service: path.ExchangeService.String(), category: path.EmailCategory.String(), itemInfo: ItemInfo{ @@ -1361,11 +1361,13 @@ func (suite *DetailsUnitSuite) TestLocationIDer_FromEntry() { ItemType: ExchangeMail, }, }, - backupVersion: version.OneDrive7LocationRef - 1, - expectedErr: require.Error, + backupVersion: version.OneDrive7LocationRef - 1, + hasLocRef: true, + expectedErr: require.NoError, + expectedUniqueLoc: fmt.Sprintf(expectedExchangeUniqueLocFmt, path.EmailCategory), }, { - name: "Exchange Email Without LocationRef New Version Errors", + name: "Exchange Email Without LocationRef New Version", service: path.ExchangeService.String(), category: path.EmailCategory.String(), itemInfo: ItemInfo{ @@ -1373,9 +1375,49 @@ func (suite *DetailsUnitSuite) TestLocationIDer_FromEntry() { ItemType: ExchangeMail, }, }, + backupVersion: version.OneDrive7LocationRef, + hasLocRef: true, + expectedErr: require.NoError, + expectedUniqueLoc: fmt.Sprintf(expectedExchangeUniqueLocFmt, path.EmailCategory), + }, + { + name: "Exchange Email Bad RepoRef Fails", + service: path.OneDriveService.String(), + category: path.EmailCategory.String(), + itemInfo: ItemInfo{ + Exchange: &ExchangeInfo{ + ItemType: ExchangeMail, + }, + }, backupVersion: version.OneDrive7LocationRef, expectedErr: require.Error, }, + { + name: "Exchange Event Empty LocationRef New Version Fails", + service: path.ExchangeService.String(), + category: path.EventsCategory.String(), + itemInfo: ItemInfo{ + Exchange: &ExchangeInfo{ + ItemType: ExchangeEvent, + }, + }, + backupVersion: 2, + expectedErr: require.Error, + }, + { + name: "Exchange Event Empty LocationRef Old Version", + service: path.ExchangeService.String(), + category: path.EventsCategory.String(), + itemInfo: ItemInfo{ + Exchange: &ExchangeInfo{ + ItemType: ExchangeEvent, + }, + }, + backupVersion: version.OneDrive1DataAndMetaFiles, + hasLocRef: true, + expectedErr: require.NoError, + expectedUniqueLoc: fmt.Sprintf(expectedExchangeUniqueLocFmt, path.EventsCategory), + }, } for _, test := range table { diff --git a/src/pkg/backup/details/entry.go b/src/pkg/backup/details/entry.go index 47e2d5196..cfadd8641 100644 --- a/src/pkg/backup/details/entry.go +++ b/src/pkg/backup/details/entry.go @@ -56,6 +56,9 @@ type Entry struct { // ToLocationIDer takes a backup version and produces the unique location for // this entry if possible. Reasons it may not be possible to produce the unique // location include an unsupported backup version or missing information. +// +// TODO(ashmrtn): Remove this function completely if we ever decide to sunset +// older corso versions that didn't populate LocationRef. func (de Entry) ToLocationIDer(backupVersion int) (LocationIDer, error) { if len(de.LocationRef) > 0 { baseLoc, err := path.Builder{}.SplitUnescapeAppend(de.LocationRef) @@ -68,32 +71,44 @@ func (de Entry) ToLocationIDer(backupVersion int) (LocationIDer, error) { return de.ItemInfo.uniqueLocation(baseLoc) } - if backupVersion >= version.OneDrive7LocationRef || - (de.ItemInfo.infoType() != OneDriveItem && - de.ItemInfo.infoType() != SharePointLibrary) { - return nil, clues.New("no previous location for entry") - } - - // This is a little hacky, but we only want to try to extract the old - // location if it's OneDrive or SharePoint libraries and it's known to - // be an older backup version. - // - // TODO(ashmrtn): Remove this code once OneDrive/SharePoint libraries - // LocationRef code has been out long enough that all delta tokens for - // previous backup versions will have expired. At that point, either - // we'll do a full backup (token expired, no newer backups) or have a - // backup of a higher version with the information we need. rr, err := path.FromDataLayerPath(de.RepoRef, true) if err != nil { - return nil, clues.Wrap(err, "getting item RepoRef") + return nil, clues.Wrap(err, "getting item RepoRef"). + With("repo_ref", de.RepoRef) } - p, err := path.ToDrivePath(rr) - if err != nil { - return nil, clues.New("converting RepoRef to drive path") + var baseLoc *path.Builder + + switch de.ItemInfo.infoType() { + case ExchangeEvent: + if backupVersion >= 2 { + return nil, clues.New("no previous location for calendar entry"). + With("repo_ref", rr) + } + + fallthrough + case ExchangeMail, ExchangeContact: + baseLoc = path.Builder{}.Append(rr.Folders()...) + + case OneDriveItem, SharePointLibrary: + if backupVersion >= version.OneDrive7LocationRef { + return nil, clues.New("no previous location for drive entry"). + With("repo_ref", rr) + } + + p, err := path.ToDrivePath(rr) + if err != nil { + return nil, clues.New("converting RepoRef to drive path"). + With("repo_ref", rr) + } + + baseLoc = path.Builder{}.Append(p.Root).Append(p.Folders...) } - baseLoc := path.Builder{}.Append(p.Root).Append(p.Folders...) + if baseLoc == nil { + return nil, clues.New("unable to extract LocationRef from RepoRef"). + With("repo_ref", rr) + } // Individual services may add additional info to the base and return that. return de.ItemInfo.uniqueLocation(baseLoc) From c7dff3c42ba2ea076ea780ac28b93c9401797267 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Aug 2023 06:51:03 +0000 Subject: [PATCH 32/32] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.329=20to=201.44.330=20in=20/src=20(#?= =?UTF-8?q?4103)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.329 to 1.44.330.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.330 (2023-08-23)

Service Client Updates

  • service/apigateway: Updates service API and documentation
    • This release adds RootResourceId to GetRestApi response.
  • service/ec2: Updates service API and documentation
    • Marking fields as sensitive on BundleTask and GetPasswordData
  • service/polly: Updates service API
    • Amazon Polly adds 1 new voice - Zayd (ar-AE)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.329&new-version=1.44.330)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index 01e9a2c02..5708b40a4 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,7 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.44.329 + github.com/aws/aws-sdk-go v1.44.330 github.com/aws/aws-xray-sdk-go v1.8.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/uuid v1.3.1 diff --git a/src/go.sum b/src/go.sum index 5e3687cbc..72ef9231f 100644 --- a/src/go.sum +++ b/src/go.sum @@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aws/aws-sdk-go v1.44.329 h1:Rqy+wYI8h+iq+FphR59KKTsHR1Lz7YiwRqFzWa7xoYU= -github.com/aws/aws-sdk-go v1.44.329/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.330 h1:kO41s8I4hRYtWSIuMc/O053wmEGfMTT8D4KtPSojUkA= +github.com/aws/aws-sdk-go v1.44.330/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=