From adbc7fe03e8c58bf8bc45a92da70f500d3c69e76 Mon Sep 17 00:00:00 2001 From: neha_gupta Date: Tue, 26 Sep 2023 14:23:14 +0530 Subject: [PATCH 01/14] allow delete of multiple backup IDs (#4335) allow deletion of multiple IDs in single command #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [ ] :no_entry: No #### Type of change - [ ] :sunflower: Feature #### Issue(s) * https://github.com/alcionai/corso/issues/4119 #### Test Plan - [ ] :muscle: Manual - [ ] :green_heart: E2E --- CHANGELOG.md | 1 + src/cli/backup/backup.go | 10 ++-- src/cli/backup/exchange.go | 22 +++++-- src/cli/backup/exchange_e2e_test.go | 78 ++++++++++++++++++++++-- src/cli/backup/groups.go | 22 +++++-- src/cli/backup/groups_e2e_test.go | 69 ++++++++++++++++++++-- src/cli/backup/onedrive.go | 22 +++++-- src/cli/backup/onedrive_e2e_test.go | 85 +++++++++++++++++++++++++-- src/cli/backup/sharepoint.go | 22 +++++-- src/cli/backup/sharepoint_e2e_test.go | 43 ++++++++++++-- src/cli/flags/repo.go | 14 +++++ src/cli/restore/groups.go | 2 +- 12 files changed, 346 insertions(+), 44 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 862a58f08..7d5ad8ce4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Enables local or network-attached storage for Corso repositories. - Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes. - Increase Exchange backup performance by lazily fetching data only for items whose content changed. +- Added `--backups` flag to delete multiple backups in `corso backup delete` command. ## [v0.13.0] (beta) - 2023-09-18 diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index a79a8afb2..51f712e53 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -252,8 +252,8 @@ func runBackups( func genericDeleteCommand( cmd *cobra.Command, pst path.ServiceType, - bID, designation string, - args []string, + designation string, + bID, args []string, ) error { if utils.HasNoFlagsAndShownHelp(cmd) { return nil @@ -275,11 +275,11 @@ func genericDeleteCommand( defer utils.CloseRepo(ctx, r) - if err := r.DeleteBackups(ctx, true, bID); err != nil { - return Only(ctx, clues.Wrap(err, "Deleting backup "+bID)) + if err := r.DeleteBackups(ctx, true, bID...); err != nil { + return Only(ctx, clues.Wrap(err, fmt.Sprintf("Deleting backup %v", bID))) } - Infof(ctx, "Deleted %s backup %s", designation, bID) + Infof(ctx, "Deleted %s backup %v", designation, bID) return nil } diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 4a37c032b..85b474bbc 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -32,7 +32,7 @@ const ( const ( exchangeServiceCommand = "exchange" exchangeServiceCommandCreateUseSuffix = "--mailbox | '" + flags.Wildcard + "'" - exchangeServiceCommandDeleteUseSuffix = "--backup " + exchangeServiceCommandDeleteUseSuffix = "--backups " exchangeServiceCommandDetailsUseSuffix = "--backup " ) @@ -46,8 +46,9 @@ corso backup create exchange --mailbox alice@example.com,bob@example.com --data # Backup all Exchange data for all M365 users corso backup create exchange --mailbox '*'` - exchangeServiceCommandDeleteExamples = `# Delete Exchange backup with ID 1234abcd-12ab-cd34-56de-1234abcd -corso backup delete exchange --backup 1234abcd-12ab-cd34-56de-1234abcd` + exchangeServiceCommandDeleteExamples = `# Delete Exchange backup with IDs 1234abcd-12ab-cd34-56de-1234abcd \ +and 1234abcd-12ab-cd34-56de-1234abce +corso backup delete exchange --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce` exchangeServiceCommandDetailsExamples = `# Explore items in Alice's latest backup (1234abcd...) corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd @@ -121,7 +122,8 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command { c.Use = c.Use + " " + exchangeServiceCommandDeleteUseSuffix c.Example = exchangeServiceCommandDeleteExamples - flags.AddBackupIDFlag(c, true) + flags.AddMultipleBackupIDsFlag(c, false) + flags.AddBackupIDFlag(c, false) } return c @@ -352,5 +354,15 @@ func exchangeDeleteCmd() *cobra.Command { // deletes an exchange service backup. func deleteExchangeCmd(cmd *cobra.Command, args []string) error { - return genericDeleteCommand(cmd, path.ExchangeService, flags.BackupIDFV, "Exchange", args) + var backupIDValue []string + + if len(flags.BackupIDsFV) > 0 { + backupIDValue = flags.BackupIDsFV + } else if len(flags.BackupIDFV) > 0 { + backupIDValue = append(backupIDValue, flags.BackupIDFV) + } else { + return clues.New("either --backup or --backups flag is required") + } + + return genericDeleteCommand(cmd, path.ExchangeService, "Exchange", backupIDValue, args) } diff --git a/src/cli/backup/exchange_e2e_test.go b/src/cli/backup/exchange_e2e_test.go index 992787997..9965652d6 100644 --- a/src/cli/backup/exchange_e2e_test.go +++ b/src/cli/backup/exchange_e2e_test.go @@ -561,8 +561,9 @@ func runExchangeDetailsCmdTest(suite *PreparedBackupExchangeE2ESuite, category p type BackupDeleteExchangeE2ESuite struct { tester.Suite - dpnd dependencies - backupOp operations.BackupOperation + dpnd dependencies + backupOp operations.BackupOperation + secondaryBackupOp operations.BackupOperation } func TestBackupDeleteExchangeE2ESuite(t *testing.T) { @@ -595,6 +596,14 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() { err = suite.backupOp.Run(ctx) require.NoError(t, err, clues.ToCore(err)) + + backupOp2, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) + require.NoError(t, err, clues.ToCore(err)) + + suite.secondaryBackupOp = backupOp2 + + err = suite.secondaryBackupOp.Run(ctx) + require.NoError(t, err, clues.ToCore(err)) } func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { @@ -608,7 +617,10 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { cmd := cliTD.StubRootCmd( "backup", "delete", "exchange", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) + "--"+flags.BackupIDsFN, + fmt.Sprintf("%s,%s", + string(suite.backupOp.Results.BackupID), + string(suite.secondaryBackupOp.Results.BackupID))) cli.BuildCommandTree(cmd) // run the command @@ -624,6 +636,46 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { err = cmd.ExecuteContext(ctx) require.Error(t, err, clues.ToCore(err)) + + // a follow-up details call should fail, due to the backup ID being deleted + cmd = cliTD.StubRootCmd( + "backup", "details", "exchange", + "--config-file", suite.dpnd.configFilePath, + "--backup", string(suite.secondaryBackupOp.Results.BackupID)) + cli.BuildCommandTree(cmd) + + err = cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) +} + +func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_SingleID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + ctx = config.SetViper(ctx, suite.dpnd.vpr) + + defer flush() + + cmd := cliTD.StubRootCmd( + "backup", "delete", "exchange", + "--config-file", suite.dpnd.configFilePath, + "--"+flags.BackupFN, + string(suite.backupOp.Results.BackupID)) + cli.BuildCommandTree(cmd) + + // run the command + err := cmd.ExecuteContext(ctx) + require.NoError(t, err, clues.ToCore(err)) + + // a follow-up details call should fail, due to the backup ID being deleted + cmd = cliTD.StubRootCmd( + "backup", "details", "exchange", + "--config-file", suite.dpnd.configFilePath, + "--backup", string(suite.secondaryBackupOp.Results.BackupID)) + cli.BuildCommandTree(cmd) + + err = cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) } func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_UnknownID() { @@ -637,10 +689,28 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_UnknownID cmd := cliTD.StubRootCmd( "backup", "delete", "exchange", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, uuid.NewString()) + "--"+flags.BackupIDsFN, uuid.NewString()) cli.BuildCommandTree(cmd) // unknown backupIDs should error since the modelStore can't find the backup err := cmd.ExecuteContext(ctx) require.Error(t, err, clues.ToCore(err)) } + +func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_NoBackupID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + ctx = config.SetViper(ctx, suite.dpnd.vpr) + + defer flush() + + cmd := cliTD.StubRootCmd( + "backup", "delete", "exchange", + "--config-file", suite.dpnd.configFilePath) + cli.BuildCommandTree(cmd) + + // empty backupIDs should error since no data provided + err := cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) +} diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go index 7e3e041db..84db6ebde 100644 --- a/src/cli/backup/groups.go +++ b/src/cli/backup/groups.go @@ -32,7 +32,7 @@ const ( groupsServiceCommand = "groups" teamsServiceCommand = "teams" groupsServiceCommandCreateUseSuffix = "--group | '" + flags.Wildcard + "'" - groupsServiceCommandDeleteUseSuffix = "--backup " + groupsServiceCommandDeleteUseSuffix = "--backups " groupsServiceCommandDetailsUseSuffix = "--backup " ) @@ -46,8 +46,9 @@ corso backup create groups --group Marketing --data messages # Backup all Groups and Teams data for all groups corso backup create groups --group '*'` - groupsServiceCommandDeleteExamples = `# Delete Groups backup with ID 1234abcd-12ab-cd34-56de-1234abcd -corso backup delete groups --backup 1234abcd-12ab-cd34-56de-1234abcd` + groupsServiceCommandDeleteExamples = `# Delete Groups backup with ID 1234abcd-12ab-cd34-56de-1234abcd \ +and 1234abcd-12ab-cd34-56de-1234abce +corso backup delete groups --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce` groupsServiceCommandDetailsExamples = `# Explore items in Marketing's latest backup (1234abcd...) corso backup details groups --backup 1234abcd-12ab-cd34-56de-1234abcd @@ -110,7 +111,8 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command { c.Use = c.Use + " " + groupsServiceCommandDeleteUseSuffix c.Example = groupsServiceCommandDeleteExamples - flags.AddBackupIDFlag(c, true) + flags.AddMultipleBackupIDsFlag(c, false) + flags.AddBackupIDFlag(c, false) } return c @@ -305,7 +307,17 @@ func groupsDeleteCmd() *cobra.Command { // deletes an groups service backup. func deleteGroupsCmd(cmd *cobra.Command, args []string) error { - return genericDeleteCommand(cmd, path.GroupsService, flags.BackupIDFV, "Groups", args) + backupIDValue := []string{} + + if len(flags.BackupIDsFV) > 0 { + backupIDValue = flags.BackupIDsFV + } else if len(flags.BackupIDFV) > 0 { + backupIDValue = append(backupIDValue, flags.BackupIDFV) + } else { + return clues.New("either --backup or --backups flag is required") + } + + return genericDeleteCommand(cmd, path.GroupsService, "Groups", backupIDValue, args) } // --------------------------------------------------------------------------- diff --git a/src/cli/backup/groups_e2e_test.go b/src/cli/backup/groups_e2e_test.go index 036b2cbb6..66de86eb4 100644 --- a/src/cli/backup/groups_e2e_test.go +++ b/src/cli/backup/groups_e2e_test.go @@ -497,8 +497,9 @@ func runGroupsDetailsCmdTest(suite *PreparedBackupGroupsE2ESuite, category path. type BackupDeleteGroupsE2ESuite struct { tester.Suite - dpnd dependencies - backupOp operations.BackupOperation + dpnd dependencies + backupOp operations.BackupOperation + secondaryBackupOp operations.BackupOperation } func TestBackupDeleteGroupsE2ESuite(t *testing.T) { @@ -531,6 +532,15 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() { err = suite.backupOp.Run(ctx) require.NoError(t, err, clues.ToCore(err)) + + // secondary backup + secondaryBackupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) + require.NoError(t, err, clues.ToCore(err)) + + suite.secondaryBackupOp = secondaryBackupOp + + err = suite.secondaryBackupOp.Run(ctx) + require.NoError(t, err, clues.ToCore(err)) } func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() { @@ -544,7 +554,40 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() { cmd := cliTD.StubRootCmd( "backup", "delete", "groups", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) + "--"+flags.BackupIDsFN, + fmt.Sprintf("%s,%s", + string(suite.backupOp.Results.BackupID), + string(suite.secondaryBackupOp.Results.BackupID))) + cli.BuildCommandTree(cmd) + + // run the command + err := cmd.ExecuteContext(ctx) + require.NoError(t, err, clues.ToCore(err)) + + // a follow-up details call should fail, due to the backup ID being deleted + cmd = cliTD.StubRootCmd( + "backup", "details", "groups", + "--config-file", suite.dpnd.configFilePath, + "--backups", string(suite.backupOp.Results.BackupID)) + cli.BuildCommandTree(cmd) + + err = cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) +} + +func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_SingleID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + ctx = config.SetViper(ctx, suite.dpnd.vpr) + + defer flush() + + cmd := cliTD.StubRootCmd( + "backup", "delete", "groups", + "--config-file", suite.dpnd.configFilePath, + "--"+flags.BackupFN, + string(suite.backupOp.Results.BackupID)) cli.BuildCommandTree(cmd) // run the command @@ -573,7 +616,7 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_UnknownID() { cmd := cliTD.StubRootCmd( "backup", "delete", "groups", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, uuid.NewString()) + "--"+flags.BackupIDsFN, uuid.NewString()) cli.BuildCommandTree(cmd) // unknown backupIDs should error since the modelStore can't find the backup @@ -581,6 +624,24 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_UnknownID() { require.Error(t, err, clues.ToCore(err)) } +func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_NoBackupID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + ctx = config.SetViper(ctx, suite.dpnd.vpr) + + defer flush() + + cmd := cliTD.StubRootCmd( + "backup", "delete", "groups", + "--config-file", suite.dpnd.configFilePath) + cli.BuildCommandTree(cmd) + + // empty backupIDs should error since no data provided + err := cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) +} + // --------------------------------------------------------------------------- // helpers // --------------------------------------------------------------------------- diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index c870f84ee..0eeb13308 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -26,7 +26,7 @@ import ( const ( oneDriveServiceCommand = "onedrive" oneDriveServiceCommandCreateUseSuffix = "--user | '" + flags.Wildcard + "'" - oneDriveServiceCommandDeleteUseSuffix = "--backup " + oneDriveServiceCommandDeleteUseSuffix = "--backups " oneDriveServiceCommandDetailsUseSuffix = "--backup " ) @@ -40,8 +40,9 @@ corso backup create onedrive --user alice@example.com,bob@example.com # Backup all OneDrive data for all M365 users corso backup create onedrive --user '*'` - oneDriveServiceCommandDeleteExamples = `# Delete OneDrive backup with ID 1234abcd-12ab-cd34-56de-1234abcd -corso backup delete onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd` + oneDriveServiceCommandDeleteExamples = `# Delete OneDrive backup with ID 1234abcd-12ab-cd34-56de-1234abcd \ +and 1234abcd-12ab-cd34-56de-1234abce +corso backup delete onedrive --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce` oneDriveServiceCommandDetailsExamples = `# Explore items in Bob's latest backup (1234abcd...) corso backup details onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd @@ -100,7 +101,8 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command { c.Use = c.Use + " " + oneDriveServiceCommandDeleteUseSuffix c.Example = oneDriveServiceCommandDeleteExamples - flags.AddBackupIDFlag(c, true) + flags.AddMultipleBackupIDsFlag(c, false) + flags.AddBackupIDFlag(c, false) } return c @@ -306,5 +308,15 @@ func oneDriveDeleteCmd() *cobra.Command { // deletes a oneDrive service backup. func deleteOneDriveCmd(cmd *cobra.Command, args []string) error { - return genericDeleteCommand(cmd, path.OneDriveService, flags.BackupIDFV, "OneDrive", args) + backupIDValue := []string{} + + if len(flags.BackupIDsFV) > 0 { + backupIDValue = flags.BackupIDsFV + } else if len(flags.BackupIDFV) > 0 { + backupIDValue = append(backupIDValue, flags.BackupIDFV) + } else { + return clues.New("either --backup or --backups flag is required") + } + + return genericDeleteCommand(cmd, path.OneDriveService, "OneDrive", backupIDValue, args) } diff --git a/src/cli/backup/onedrive_e2e_test.go b/src/cli/backup/onedrive_e2e_test.go index ea5e808b4..77d599862 100644 --- a/src/cli/backup/onedrive_e2e_test.go +++ b/src/cli/backup/onedrive_e2e_test.go @@ -121,8 +121,9 @@ func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupCmd_userNotInTenant() { type BackupDeleteOneDriveE2ESuite struct { tester.Suite - dpnd dependencies - backupOp operations.BackupOperation + dpnd dependencies + backupOp operations.BackupOperation + secondaryBackupOp operations.BackupOperation } func TestBackupDeleteOneDriveE2ESuite(t *testing.T) { @@ -158,6 +159,15 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() { err = suite.backupOp.Run(ctx) require.NoError(t, err, clues.ToCore(err)) + + // secondary backup + secondaryBackupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins) + require.NoError(t, err, clues.ToCore(err)) + + suite.secondaryBackupOp = secondaryBackupOp + + err = suite.secondaryBackupOp.Run(ctx) + require.NoError(t, err, clues.ToCore(err)) } func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() { @@ -173,7 +183,10 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() { cmd := cliTD.StubRootCmd( "backup", "delete", "onedrive", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) + "--"+flags.BackupIDsFN, + fmt.Sprintf("%s,%s", + string(suite.backupOp.Results.BackupID), + string(suite.secondaryBackupOp.Results.BackupID))) cli.BuildCommandTree(cmd) cmd.SetErr(&suite.dpnd.recorder) @@ -187,7 +200,51 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() { assert.True(t, strings.HasSuffix( result, - fmt.Sprintf("Deleted OneDrive backup %s\n", string(suite.backupOp.Results.BackupID)))) + fmt.Sprintf("Deleted OneDrive backup [%s %s]\n", + string(suite.backupOp.Results.BackupID), + string(suite.secondaryBackupOp.Results.BackupID)))) + + // a follow-up details call should fail, due to the backup ID being deleted + cmd = cliTD.StubRootCmd( + "backup", "details", "onedrive", + "--config-file", suite.dpnd.configFilePath, + "--backups", string(suite.backupOp.Results.BackupID)) + cli.BuildCommandTree(cmd) + + err = cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) +} + +func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_SingleID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + ctx = config.SetViper(ctx, suite.dpnd.vpr) + + defer flush() + + suite.dpnd.recorder.Reset() + + cmd := cliTD.StubRootCmd( + "backup", "delete", "onedrive", + "--config-file", suite.dpnd.configFilePath, + "--"+flags.BackupFN, + string(suite.backupOp.Results.BackupID)) + cli.BuildCommandTree(cmd) + cmd.SetErr(&suite.dpnd.recorder) + + ctx = print.SetRootCmd(ctx, cmd) + + // run the command + err := cmd.ExecuteContext(ctx) + require.NoError(t, err, clues.ToCore(err)) + + result := suite.dpnd.recorder.String() + assert.True(t, + strings.HasSuffix( + result, + fmt.Sprintf("Deleted OneDrive backup [%s]\n", + string(suite.backupOp.Results.BackupID)))) // a follow-up details call should fail, due to the backup ID being deleted cmd = cliTD.StubRootCmd( @@ -211,10 +268,28 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_unknownID cmd := cliTD.StubRootCmd( "backup", "delete", "onedrive", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, uuid.NewString()) + "--"+flags.BackupIDsFN, uuid.NewString()) cli.BuildCommandTree(cmd) // unknown backupIDs should error since the modelStore can't find the backup err := cmd.ExecuteContext(ctx) require.Error(t, err, clues.ToCore(err)) } + +func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_NoBackupID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + ctx = config.SetViper(ctx, suite.dpnd.vpr) + + defer flush() + + cmd := cliTD.StubRootCmd( + "backup", "delete", "onedrive", + "--config-file", suite.dpnd.configFilePath) + cli.BuildCommandTree(cmd) + + // empty backupIDs should error since no data provided + err := cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) +} diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index 2b154573e..71a5ca524 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -30,7 +30,7 @@ import ( const ( sharePointServiceCommand = "sharepoint" sharePointServiceCommandCreateUseSuffix = "--site | '" + flags.Wildcard + "'" - sharePointServiceCommandDeleteUseSuffix = "--backup " + sharePointServiceCommandDeleteUseSuffix = "--backups " sharePointServiceCommandDetailsUseSuffix = "--backup " ) @@ -44,8 +44,9 @@ corso backup create sharepoint --site https://example.com/hr,https://example.com # Backup all SharePoint data for all Sites corso backup create sharepoint --site '*'` - sharePointServiceCommandDeleteExamples = `# Delete SharePoint backup with ID 1234abcd-12ab-cd34-56de-1234abcd -corso backup delete sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd` + sharePointServiceCommandDeleteExamples = `# Delete SharePoint backup with ID 1234abcd-12ab-cd34-56de-1234abcd \ +and 1234abcd-12ab-cd34-56de-1234abce +corso backup delete sharepoint --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce` sharePointServiceCommandDetailsExamples = `# Explore items in the HR site's latest backup (1234abcd...) corso backup details sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd @@ -111,7 +112,8 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command { c.Use = c.Use + " " + sharePointServiceCommandDeleteUseSuffix c.Example = sharePointServiceCommandDeleteExamples - flags.AddBackupIDFlag(c, true) + flags.AddMultipleBackupIDsFlag(c, false) + flags.AddBackupIDFlag(c, false) } return c @@ -284,7 +286,17 @@ func sharePointDeleteCmd() *cobra.Command { // deletes a sharePoint service backup. func deleteSharePointCmd(cmd *cobra.Command, args []string) error { - return genericDeleteCommand(cmd, path.SharePointService, flags.BackupIDFV, "SharePoint", args) + backupIDValue := []string{} + + if len(flags.BackupIDsFV) > 0 { + backupIDValue = flags.BackupIDsFV + } else if len(flags.BackupIDFV) > 0 { + backupIDValue = append(backupIDValue, flags.BackupIDFV) + } else { + return clues.New("either --backup or --backups flag is required") + } + + return genericDeleteCommand(cmd, path.SharePointService, "SharePoint", backupIDValue, args) } // ------------------------------------------------------------------------------------------------ diff --git a/src/cli/backup/sharepoint_e2e_test.go b/src/cli/backup/sharepoint_e2e_test.go index 138247850..bfb67f85a 100644 --- a/src/cli/backup/sharepoint_e2e_test.go +++ b/src/cli/backup/sharepoint_e2e_test.go @@ -84,8 +84,9 @@ func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() { type BackupDeleteSharePointE2ESuite struct { tester.Suite - dpnd dependencies - backupOp operations.BackupOperation + dpnd dependencies + backupOp operations.BackupOperation + secondaryBackupOp operations.BackupOperation } func TestBackupDeleteSharePointE2ESuite(t *testing.T) { @@ -121,6 +122,15 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() { err = suite.backupOp.Run(ctx) require.NoError(t, err, clues.ToCore(err)) + + // secondary backup + secondaryBackupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins) + require.NoError(t, err, clues.ToCore(err)) + + suite.secondaryBackupOp = secondaryBackupOp + + err = suite.secondaryBackupOp.Run(ctx) + require.NoError(t, err, clues.ToCore(err)) } func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd() { @@ -136,7 +146,10 @@ func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd() { cmd := cliTD.StubRootCmd( "backup", "delete", "sharepoint", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) + "--"+flags.BackupIDsFN, + fmt.Sprintf("%s,%s", + string(suite.backupOp.Results.BackupID), + string(suite.secondaryBackupOp.Results.BackupID))) cli.BuildCommandTree(cmd) cmd.SetErr(&suite.dpnd.recorder) @@ -150,7 +163,9 @@ func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd() { assert.True(t, strings.HasSuffix( result, - fmt.Sprintf("Deleted SharePoint backup %s\n", string(suite.backupOp.Results.BackupID)))) + fmt.Sprintf("Deleted SharePoint backup [%s %s]\n", + string(suite.backupOp.Results.BackupID), + string(suite.secondaryBackupOp.Results.BackupID)))) } // moved out of the func above to make the linter happy @@ -175,10 +190,28 @@ func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd_unkno cmd := cliTD.StubRootCmd( "backup", "delete", "sharepoint", "--config-file", suite.dpnd.configFilePath, - "--"+flags.BackupFN, uuid.NewString()) + "--"+flags.BackupIDsFN, uuid.NewString()) cli.BuildCommandTree(cmd) // unknown backupIDs should error since the modelStore can't find the backup err := cmd.ExecuteContext(ctx) require.Error(t, err, clues.ToCore(err)) } + +func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd_NoBackupID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + ctx = config.SetViper(ctx, suite.dpnd.vpr) + + defer flush() + + cmd := cliTD.StubRootCmd( + "backup", "delete", "groups", + "--config-file", suite.dpnd.configFilePath) + cli.BuildCommandTree(cmd) + + // empty backupIDs should error since no data provided + err := cmd.ExecuteContext(ctx) + require.Error(t, err, clues.ToCore(err)) +} diff --git a/src/cli/flags/repo.go b/src/cli/flags/repo.go index 44bc3a2a3..77495c08d 100644 --- a/src/cli/flags/repo.go +++ b/src/cli/flags/repo.go @@ -6,6 +6,7 @@ import ( const ( BackupFN = "backup" + BackupIDsFN = "backups" AWSAccessKeyFN = "aws-access-key" AWSSecretAccessKeyFN = "aws-secret-access-key" AWSSessionTokenFN = "aws-session-token" @@ -17,6 +18,7 @@ const ( var ( BackupIDFV string + BackupIDsFV []string AWSAccessKeyFV string AWSSecretAccessKeyFV string AWSSessionTokenFV string @@ -24,6 +26,18 @@ var ( SucceedIfExistsFV bool ) +// AddMultipleBackupIDsFlag adds the --backups flag. +func AddMultipleBackupIDsFlag(cmd *cobra.Command, require bool) { + cmd.Flags().StringSliceVar( + &BackupIDsFV, + BackupIDsFN, nil, + "',' separated IDs of the backup to retrieve") + + if require { + cobra.CheckErr(cmd.MarkFlagRequired(BackupIDsFN)) + } +} + // AddBackupIDFlag adds the --backup flag. func AddBackupIDFlag(cmd *cobra.Command, require bool) { cmd.Flags().StringVar(&BackupIDFV, BackupFN, "", "ID of the backup to retrieve.") diff --git a/src/cli/restore/groups.go b/src/cli/restore/groups.go index 755d797e0..9e1f9cf5d 100644 --- a/src/cli/restore/groups.go +++ b/src/cli/restore/groups.go @@ -46,7 +46,7 @@ const ( corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef # Restore the file with ID 98765abcdef without its associated permissions -corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --skip-permissions +corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --no-permissions # Restore all files named "FY2021 Planning.xlsx" corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file "FY2021 Planning.xlsx" From 8791c59c176b286e1b0c5e618c7c3b42693e1891 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Tue, 26 Sep 2023 09:57:42 -0700 Subject: [PATCH 02/14] Fix NPE in Exchange CLI restore E2E setup suite (#4374) #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/cli/restore/exchange_e2e_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cli/restore/exchange_e2e_test.go b/src/cli/restore/exchange_e2e_test.go index 5e3d6181d..effbf14d8 100644 --- a/src/cli/restore/exchange_e2e_test.go +++ b/src/cli/restore/exchange_e2e_test.go @@ -86,7 +86,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() { ) // init the repo first - r, err := repository.New( + suite.repo, err = repository.New( ctx, suite.acct, suite.st, @@ -94,7 +94,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() { repository.NewRepoID) require.NoError(t, err, clues.ToCore(err)) - err = r.Initialize(ctx, ctrlRepo.Retention{}) + err = suite.repo.Initialize(ctx, ctrlRepo.Retention{}) require.NoError(t, err, clues.ToCore(err)) suite.backupOps = make(map[path.CategoryType]operations.BackupOperation) From 0474e5c95717eb5923edcc425645e6105249b387 Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 26 Sep 2023 12:37:15 -0600 Subject: [PATCH 03/14] separate delta not supported from invalid (#4375) groups backup has a failing case where delta queries are not supported, but we aren't distinguishing that case from the invalid delta case as expected. This results in backup failures, instead of success with no data. --- #### Does this PR need a docs update or release note? - [x] :white_check_mark: Yes, it's included #### Type of change - [x] :bug: Bugfix #### Issue(s) * #3988 #### Test Plan - [x] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- CHANGELOG.md | 3 ++ src/internal/m365/graph/errors.go | 27 ++++++++----- src/internal/m365/graph/errors_test.go | 54 ++++++++++++++++++++++--- src/pkg/services/m365/api/item_pager.go | 7 +++- 4 files changed, 76 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d5ad8ce4..0e8c12dfd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Increase Exchange backup performance by lazily fetching data only for items whose content changed. - Added `--backups` flag to delete multiple backups in `corso backup delete` command. +## Fixed +- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup. + ## [v0.13.0] (beta) - 2023-09-18 ### Added diff --git a/src/internal/m365/graph/errors.go b/src/internal/m365/graph/errors.go index 91dc01bb0..f5c7824ab 100644 --- a/src/internal/m365/graph/errors.go +++ b/src/internal/m365/graph/errors.go @@ -95,6 +95,10 @@ var ( // https://learn.microsoft.com/en-us/graph/errors#code-property ErrInvalidDelta = clues.New("invalid delta token") + // Not all systems support delta queries. This must be handled separately + // from invalid delta token cases. + ErrDeltaNotSupported = clues.New("delta not supported") + // ErrItemAlreadyExistsConflict denotes that a post or put attempted to create // an item which already exists by some unique identifier. The identifier is // not always the id. For example, in onedrive, this error can be produced @@ -122,8 +126,8 @@ var ( ) func IsErrApplicationThrottled(err error) bool { - return hasErrorCode(err, applicationThrottled) || - errors.Is(err, ErrApplicationThrottled) + return errors.Is(err, ErrApplicationThrottled) || + hasErrorCode(err, applicationThrottled) } func IsErrAuthenticationError(err error) bool { @@ -151,9 +155,13 @@ func IsErrItemNotFound(err error) bool { } func IsErrInvalidDelta(err error) bool { - return hasErrorCode(err, syncStateNotFound, resyncRequired, syncStateInvalid) || - hasErrorMessage(err, parameterDeltaTokenNotSupported) || - errors.Is(err, ErrInvalidDelta) + return errors.Is(err, ErrInvalidDelta) || + hasErrorCode(err, syncStateNotFound, resyncRequired, syncStateInvalid) +} + +func IsErrDeltaNotSupported(err error) bool { + return errors.Is(err, ErrDeltaNotSupported) || + hasErrorMessage(err, parameterDeltaTokenNotSupported) } func IsErrQuotaExceeded(err error) bool { @@ -190,7 +198,8 @@ func IsErrCannotOpenFileAttachment(err error) bool { } func IsErrAccessDenied(err error) bool { - return hasErrorCode(err, ErrorAccessDenied) || clues.HasLabel(err, LabelStatus(http.StatusForbidden)) + return hasErrorCode(err, ErrorAccessDenied) || + clues.HasLabel(err, LabelStatus(http.StatusForbidden)) } func IsErrTimeout(err error) bool { @@ -218,8 +227,8 @@ func IsErrUnauthorized(err error) bool { } func IsErrItemAlreadyExistsConflict(err error) bool { - return hasErrorCode(err, nameAlreadyExists) || - errors.Is(err, ErrItemAlreadyExistsConflict) + return errors.Is(err, ErrItemAlreadyExistsConflict) || + hasErrorCode(err, nameAlreadyExists) } // LabelStatus transforms the provided statusCode into @@ -298,7 +307,7 @@ func hasErrorMessage(err error, msgs ...errorMessage) bool { cs[i] = string(c) } - return filters.Contains(cs).Compare(msg) + return filters.In(cs).Compare(msg) } // Wrap is a helper function that extracts ODataError metadata from diff --git a/src/internal/m365/graph/errors_test.go b/src/internal/m365/graph/errors_test.go index 606eb23a5..cf9f2f99d 100644 --- a/src/internal/m365/graph/errors_test.go +++ b/src/internal/m365/graph/errors_test.go @@ -247,11 +247,6 @@ func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() { err: odErr(string(syncStateInvalid)), expect: assert.True, }, - { - name: "deltatoken not supported oDataErrMsg", - err: odErrMsg("fnords", string(parameterDeltaTokenNotSupported)), - expect: assert.True, - }, // next two tests are to make sure the checks are case insensitive { name: "resync-required oDataErr camelcase", @@ -271,6 +266,55 @@ func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() { } } +func (suite *GraphErrorsUnitSuite) TestIsErrDeltaNotSupported() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrDeltaNotSupported, + expect: assert.True, + }, + { + name: "non-matching oDataErr", + err: odErr("fnords"), + expect: assert.False, + }, + { + name: "non-matching oDataErrMsg", + err: odErrMsg("fnords", "deltatoken not supported"), + expect: assert.False, + }, + { + name: "deltatoken not supported oDataErrMsg", + err: odErrMsg("fnords", string(parameterDeltaTokenNotSupported)), + expect: assert.True, + }, + { + name: "deltatoken not supported oDataErrMsg with punctuation", + err: odErrMsg("fnords", string(parameterDeltaTokenNotSupported)+"."), + expect: assert.True, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + test.expect(suite.T(), IsErrDeltaNotSupported(test.err)) + }) + } +} + func (suite *GraphErrorsUnitSuite) TestIsErrQuotaExceeded() { table := []struct { name string diff --git a/src/pkg/services/m365/api/item_pager.go b/src/pkg/services/m365/api/item_pager.go index d8fed9116..3cd1deb53 100644 --- a/src/pkg/services/m365/api/item_pager.go +++ b/src/pkg/services/m365/api/item_pager.go @@ -138,6 +138,11 @@ func deltaEnumerateItems[T any]( // Loop through all pages returned by Graph API. for len(nextLink) > 0 { page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) + if graph.IsErrDeltaNotSupported(err) { + logger.Ctx(ctx).Infow("delta queries not supported") + return nil, DeltaUpdate{}, clues.Stack(graph.ErrDeltaNotSupported, err) + } + if graph.IsErrInvalidDelta(err) { logger.Ctx(ctx).Infow("invalid previous delta", "delta_link", prevDeltaLink) @@ -191,7 +196,7 @@ func getAddedAndRemovedItemIDs[T any]( ) (map[string]time.Time, bool, []string, DeltaUpdate, error) { if canMakeDeltaQueries { ts, du, err := deltaEnumerateItems[T](ctx, deltaPager, prevDeltaLink) - if err != nil && (!graph.IsErrInvalidDelta(err) || len(prevDeltaLink) == 0) { + if err != nil && !graph.IsErrInvalidDelta(err) && !graph.IsErrDeltaNotSupported(err) { return nil, false, nil, DeltaUpdate{}, graph.Stack(ctx, err) } From bc25869173df9052e358a055867539f32af8935c Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Wed, 27 Sep 2023 01:15:44 +0530 Subject: [PATCH 04/14] Add sanity tests for Groups incrementals and restore (#4373) Need to add `CORSO_SECONDARY_M365_TEST_USER_ID` to the CI before we merge this. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test - [x] :green_heart: E2E --- .github/workflows/sanity-test.yaml | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index 096924dba..c24dc1141 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -364,10 +364,34 @@ jobs: service: groups kind: initial backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"' + restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' log-dir: ${{ env.CORSO_LOG_DIR }} + with-export: true - # TODO: incrementals + # generate some more enteries for incremental check + - name: Groups - Create new data (for incremental) + working-directory: ./src/cmd/factory + run: | + go run . sharepoint files \ + --site ${{ secrets.CORSO_M365_TEST_SITE_URL }} \ + --user ${{ env.TEST_USER }} \ + --secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \ + --tenant ${{ secrets.TENANT_ID }} \ + --destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }} \ + --count 4 + + - name: Groups - Incremental backup + id: groups-incremental + uses: ./.github/actions/backup-restore-test + with: + service: groups + kind: incremental + backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"' + restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' + test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' + log-dir: ${{ env.CORSO_LOG_DIR }} + with-export: true ########################################################################################################################################## From f37e58ee5c17b8ed9285f2295f3356a0ede3007d Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 26 Sep 2023 16:01:00 -0600 Subject: [PATCH 05/14] graph debug logging improvements (#4368) #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :robot: Supportability/Tests - [x] :broom: Tech Debt/Cleanup --- src/internal/m365/graph/http_wrapper.go | 10 ++-- src/internal/m365/graph/logging.go | 72 +++++++++++++++++++++++++ src/internal/m365/graph/middleware.go | 52 +----------------- src/internal/m365/graph/service.go | 21 ++++---- 4 files changed, 88 insertions(+), 67 deletions(-) create mode 100644 src/internal/m365/graph/logging.go diff --git a/src/internal/m365/graph/http_wrapper.go b/src/internal/m365/graph/http_wrapper.go index fdffb583f..0948b9b9e 100644 --- a/src/internal/m365/graph/http_wrapper.go +++ b/src/internal/m365/graph/http_wrapper.go @@ -105,7 +105,7 @@ func (hw httpWrapper) Request( // retry in the event of a `stream error`, which is not // a common expectation. for i := 0; i < hw.config.maxConnectionRetries+1; i++ { - ictx := clues.Add(ctx, "request_retry_iter", i) + ctx = clues.Add(ctx, "request_retry_iter", i) resp, err = hw.client.Do(req) @@ -114,15 +114,15 @@ func (hw httpWrapper) Request( } if IsErrApplicationThrottled(err) { - return nil, Stack(ictx, clues.Stack(ErrApplicationThrottled, err)) + return nil, Stack(ctx, clues.Stack(ErrApplicationThrottled, err)) } var http2StreamErr http2.StreamError if !errors.As(err, &http2StreamErr) { - return nil, Stack(ictx, err) + return nil, Stack(ctx, err) } - logger.Ctx(ictx).Debug("http2 stream error") + logger.Ctx(ctx).Debug("http2 stream error") events.Inc(events.APICall, "streamerror") time.Sleep(3 * time.Second) @@ -132,6 +132,8 @@ func (hw httpWrapper) Request( return nil, Stack(ctx, err) } + logResp(ctx, resp) + return resp, nil } diff --git a/src/internal/m365/graph/logging.go b/src/internal/m365/graph/logging.go new file mode 100644 index 000000000..4f96ae926 --- /dev/null +++ b/src/internal/m365/graph/logging.go @@ -0,0 +1,72 @@ +package graph + +import ( + "context" + "net/http" + "net/http/httputil" + "os" + + "github.com/alcionai/corso/src/pkg/logger" +) + +const ( + // 1 MB + logMBLimit = 1 * 1024 * 1024 + logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS" + log2xxGraphRequestsEnvKey = "LOG_2XX_GRAPH_REQUESTS" + log2xxGraphResponseEnvKey = "LOG_2XX_GRAPH_RESPONSES" +) + +// special cases where we always dump the response body, since the response +// details might be critical to understanding the response when debugging. +func shouldLogRespBody(resp *http.Response) bool { + return logger.DebugAPIFV || + os.Getenv(logGraphRequestsEnvKey) != "" || + resp.StatusCode == http.StatusBadRequest || + resp.StatusCode == http.StatusForbidden || + resp.StatusCode == http.StatusConflict +} + +func logResp(ctx context.Context, resp *http.Response) { + var ( + log = logger.Ctx(ctx) + respClass = resp.StatusCode / 100 + + // special cases where we always dump the response body, since the response + // details might be critical to understanding the response when debugging. + logBody = shouldLogRespBody(resp) + ) + + // special case: always info-level status 429 logs + if resp.StatusCode == http.StatusTooManyRequests { + log.With("response", getRespDump(ctx, resp, logBody)). + Info("graph api throttling") + return + } + + // Log api calls according to api debugging configurations. + switch respClass { + case 2: + if logBody { + // only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log. + dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit) + log.Infow("2xx graph api resp", "response", dump) + } + case 3: + log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))). + With("response", getRespDump(ctx, resp, false)). + Info("graph api redirect: " + resp.Status) + default: + log.With("response", getRespDump(ctx, resp, logBody)). + Error("graph api error: " + resp.Status) + } +} + +func getRespDump(ctx context.Context, resp *http.Response, getBody bool) string { + respDump, err := httputil.DumpResponse(resp, getBody) + if err != nil { + logger.CtxErr(ctx, err).Error("dumping http response") + } + + return string(respDump) +} diff --git a/src/internal/m365/graph/middleware.go b/src/internal/m365/graph/middleware.go index 63789ab23..d5935a5b0 100644 --- a/src/internal/m365/graph/middleware.go +++ b/src/internal/m365/graph/middleware.go @@ -4,8 +4,6 @@ import ( "context" "io" "net/http" - "net/http/httputil" - "os" "strconv" "strings" "time" @@ -115,9 +113,6 @@ func LoggableURL(url string) pii.SafeURL { } } -// 1 MB -const logMBLimit = 1 * 1048576 - func (mw *LoggingMiddleware) Intercept( pipeline khttp.Pipeline, middlewareIndex int, @@ -138,56 +133,11 @@ func (mw *LoggingMiddleware) Intercept( "resp_status_code", resp.StatusCode, "resp_content_len", resp.ContentLength) - var ( - log = logger.Ctx(ctx) - respClass = resp.StatusCode / 100 - - // special cases where we always dump the response body, since the response - // details might be critical to understanding the response when debugging. - logBody = logger.DebugAPIFV || - os.Getenv(logGraphRequestsEnvKey) != "" || - resp.StatusCode == http.StatusBadRequest || - resp.StatusCode == http.StatusForbidden || - resp.StatusCode == http.StatusConflict - ) - - // special case: always info-level status 429 logs - if resp.StatusCode == http.StatusTooManyRequests { - log.With("response", getRespDump(ctx, resp, logBody)). - Info("graph api throttling") - - return resp, err - } - - // Log api calls according to api debugging configurations. - switch respClass { - case 2: - if logBody { - // only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log. - dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit) - log.Infow("2xx graph api resp", "response", dump) - } - case 3: - log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))). - With("response", getRespDump(ctx, resp, false)). - Info("graph api redirect: " + resp.Status) - default: - log.With("response", getRespDump(ctx, resp, logBody)). - Error("graph api error: " + resp.Status) - } + logResp(ctx, resp) return resp, err } -func getRespDump(ctx context.Context, resp *http.Response, getBody bool) string { - respDump, err := httputil.DumpResponse(resp, getBody) - if err != nil { - logger.CtxErr(ctx, err).Error("dumping http response") - } - - return string(respDump) -} - // --------------------------------------------------------------------------- // Retry & Backoff // --------------------------------------------------------------------------- diff --git a/src/internal/m365/graph/service.go b/src/internal/m365/graph/service.go index eedda99df..0b54b5589 100644 --- a/src/internal/m365/graph/service.go +++ b/src/internal/m365/graph/service.go @@ -23,18 +23,15 @@ import ( ) const ( - logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS" - log2xxGraphRequestsEnvKey = "LOG_2XX_GRAPH_REQUESTS" - log2xxGraphResponseEnvKey = "LOG_2XX_GRAPH_RESPONSES" - defaultMaxRetries = 3 - defaultDelay = 3 * time.Second - locationHeader = "Location" - rateLimitHeader = "RateLimit-Limit" - rateRemainingHeader = "RateLimit-Remaining" - rateResetHeader = "RateLimit-Reset" - retryAfterHeader = "Retry-After" - retryAttemptHeader = "Retry-Attempt" - defaultHTTPClientTimeout = 1 * time.Hour + defaultMaxRetries = 3 + defaultDelay = 3 * time.Second + locationHeader = "Location" + rateLimitHeader = "RateLimit-Limit" + rateRemainingHeader = "RateLimit-Remaining" + rateResetHeader = "RateLimit-Reset" + retryAfterHeader = "Retry-After" + retryAttemptHeader = "Retry-Attempt" + defaultHTTPClientTimeout = 1 * time.Hour ) type QueryParams struct { From a8a2aee99ddbcca3bceb9c86e922bcc7008b40ce Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 26 Sep 2023 16:32:15 -0600 Subject: [PATCH 06/14] remove expand drive from root site (#4377) missed the root site call usage of expand drive on the last fix. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :bug: Bugfix #### Issue(s) * #4337 #### Test Plan - [x] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/m365/service/sharepoint/enabled.go | 5 +++-- .../m365/service/sharepoint/enabled_test.go | 6 +++++- src/pkg/services/m365/api/sites.go | 13 +++++++++---- src/pkg/services/m365/api/sites_test.go | 2 +- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/internal/m365/service/sharepoint/enabled.go b/src/internal/m365/service/sharepoint/enabled.go index d816cad7f..b5507af0f 100644 --- a/src/internal/m365/service/sharepoint/enabled.go +++ b/src/internal/m365/service/sharepoint/enabled.go @@ -7,10 +7,11 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type getSiteRooter interface { - GetRoot(ctx context.Context) (models.Siteable, error) + GetRoot(ctx context.Context, cc api.CallConfig) (models.Siteable, error) } func IsServiceEnabled( @@ -18,7 +19,7 @@ func IsServiceEnabled( gsr getSiteRooter, resource string, ) (bool, error) { - _, err := gsr.GetRoot(ctx) + _, err := gsr.GetRoot(ctx, api.CallConfig{}) if err != nil { if clues.HasLabel(err, graph.LabelsNoSharePointLicense) { return false, nil diff --git a/src/internal/m365/service/sharepoint/enabled_test.go b/src/internal/m365/service/sharepoint/enabled_test.go index af9fe01fb..49d59b909 100644 --- a/src/internal/m365/service/sharepoint/enabled_test.go +++ b/src/internal/m365/service/sharepoint/enabled_test.go @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type EnabledUnitSuite struct { @@ -29,7 +30,10 @@ type mockGSR struct { err error } -func (m mockGSR) GetRoot(context.Context) (models.Siteable, error) { +func (m mockGSR) GetRoot( + context.Context, + api.CallConfig, +) (models.Siteable, error) { return m.response, m.err } diff --git a/src/pkg/services/m365/api/sites.go b/src/pkg/services/m365/api/sites.go index 2ad3751f1..0865a4f47 100644 --- a/src/pkg/services/m365/api/sites.go +++ b/src/pkg/services/m365/api/sites.go @@ -35,11 +35,16 @@ type Sites struct { // api calls // --------------------------------------------------------------------------- -func (c Sites) GetRoot(ctx context.Context) (models.Siteable, error) { +func (c Sites) GetRoot( + ctx context.Context, + cc CallConfig, +) (models.Siteable, error) { options := &sites.SiteItemRequestBuilderGetRequestConfiguration{ - QueryParameters: &sites.SiteItemRequestBuilderGetQueryParameters{ - Expand: []string{"drive"}, - }, + QueryParameters: &sites.SiteItemRequestBuilderGetQueryParameters{}, + } + + if len(cc.Expand) > 0 { + options.QueryParameters.Expand = cc.Expand } resp, err := c.Stable. diff --git a/src/pkg/services/m365/api/sites_test.go b/src/pkg/services/m365/api/sites_test.go index 64b0387f6..ada517c92 100644 --- a/src/pkg/services/m365/api/sites_test.go +++ b/src/pkg/services/m365/api/sites_test.go @@ -256,7 +256,7 @@ func (suite *SitesIntgSuite) TestGetRoot() { ctx, flush := tester.NewContext(t) defer flush() - result, err := suite.its.ac.Sites().GetRoot(ctx) + result, err := suite.its.ac.Sites().GetRoot(ctx, api.CallConfig{Expand: []string{"drive"}}) require.NoError(t, err) require.NotNil(t, result, "must find the root site") require.NotEmpty(t, ptr.Val(result.GetId()), "must have an id") From f7ab320117f87eb1371f769f998b96cb2d821e3b Mon Sep 17 00:00:00 2001 From: jules <130390278+juleslasarte@users.noreply.github.com> Date: Tue, 26 Sep 2023 16:22:14 -0700 Subject: [PATCH 07/14] fix getting owner of sites (#4376) Fixes an issue where Get Site by id is looking for the `group` details in the additional data of the site items. It should be looking in `site.drive.owner` instead. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [x] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E Added a new test and tested using the `10rqc2` test tenant. ``` CORSO_CI_TESTS=true go test -run TestSiteIntegrationSuite/TestSites_GetByID -v === RUN TestSiteIntegrationSuite tester.go:44: TestSiteIntegrationSuite run at 2023-09-26T17:34:05.383766Z === RUN TestSiteIntegrationSuite/TestSites_GetByID tester.go:44: TestSiteIntegrationSuite/TestSites_GetByID run at 2023-09-26T17:34:05.3883Z === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,96772abb-cfbe-496d-a770-301650888b3a,7ff3cdd8-1bf4-4e5d-999d-830a276c8490 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8be0b20e-6eff-4ac8-8505-53c7030c9fb8,316e3a01-1d93-412b-9f31-8758a267bb4a === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,32c3d871-e17d-47de-abff-b5d7e3491a31,316e3a01-1d93-412b-9f31-8758a267bb4a === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,7a8d49ec-d275-4589-8fc1-a8088ed31332,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,0bd5608f-159c-488c-ac51-cd80f747bc1d,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4aae01c8-1d30-4202-9962-9e7b64b6bc10,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,d530b5ab-0dd9-4858-a07c-9d76a8f09b83,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,c87270a2-f375-44ec-b582-09f1b7ddd842,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,055d896c-88bc-45c3-aa4b-5af71f326bcc,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,13a2ff8b-537f-428f-aaf2-f2e4b08e4bde,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,1e16d84e-9e42-406e-b628-5d4b303309ed,41e5f79b-fe25-47c0-996b-d3e77571dbb1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,2f9743f8-4ca3-411f-b85d-32e6441eefd2,11810f05-e8bd-45e2-9728-92e434260750 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4d799bb6-222e-46ce-a626-51051eb2f311,11810f05-e8bd-45e2-9728-92e434260750 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,b0d4432d-432b-4681-9cdb-2ab789dfe145,ce88f380-fbaa-48aa-a3a1-e04599594e95 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,f10c5f9a-1b2b-4764-8685-d08e9883888c,ce88f380-fbaa-48aa-a3a1-e04599594e95 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8df7082a-62eb-4165-97f9-ead8cf04c0d8,11810f05-e8bd-45e2-9728-92e434260750 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,053684d8-ca6c-4376-a03e-2567816bb091,9b3e9abe-6a5e-4084-8b44-ea5a356fe02c === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,ba5dbd17-6f68-4405-b1b6-e1160c0ba746,8e229f4f-e962-42ef-a087-dc54daa2e48c === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,e946c519-47f6-443f-9544-574ab9f3966f,8e229f4f-e962-42ef-a087-dc54daa2e48c === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,846d8363-c646-496b-a603-06a2a22e0196,9b3e9abe-6a5e-4084-8b44-ea5a356fe02c === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,bf300606-e52e-41d7-a860-e5927f57a46d,8e229f4f-e962-42ef-a087-dc54daa2e48c === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,bf83915f-7454-4f2a-b3c1-1932251c714f,bf2bed00-f5ec-4551-96c1-dc7069392eb8 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,038e0095-4509-4f53-b045-0c86a7971b6a,3086932f-402c-494b-9aea-bf2bb1bf8303 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,25ca6d57-b92b-4731-9b87-23050e080eac,cf53a987-f1bb-4cea-a27e-62ee282ecca7 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8b672056-0409-46f6-98bb-4a9f5626830f,cf53a987-f1bb-4cea-a27e-62ee282ecca7 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,e1d12403-62e6-44d0-9798-15fd194858f7,69ca49fc-e607-438b-ba24-3bb40fc7baba === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4892edf5-2ebf-46be-a6e5-a40b2cbf1c1a,38ab6d06-fc82-4417-af93-22d8733c22be === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,6884eaa6-108e-42ee-ac1e-8f8d344d5e53,42c29db5-bf88-46b7-af47-05a5ca15c1b3 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,5cdb15ec-4440-4115-be94-d9221b9135b3,42c29db5-bf88-46b7-af47-05a5ca15c1b3 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4637d353-1c6d-4967-98a3-a75db4e1a925,42c29db5-bf88-46b7-af47-05a5ca15c1b3 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8b510176-8837-4a81-a26e-2020565adb92,276af97a-8a5e-4001-aa86-53958ac9548c === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,3d374d63-f5fa-4c6a-b5c3-119280bd5442,42c29db5-bf88-46b7-af47-05a5ca15c1b3 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8fa2edf7-1f13-47e5-834d-ccdb81879302,276af97a-8a5e-4001-aa86-53958ac9548c === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,7a8dccfa-aa6b-4a75-b7bd-eab3f85af9b0,859ccc3c-2ed3-4258-84d1-ec735b39adc1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,54bc7530-a81c-4d90-8cfb-a8625168187f,859ccc3c-2ed3-4258-84d1-ec735b39adc1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,2d6ba44f-0638-4950-8fee-537c854e9e28,859ccc3c-2ed3-4258-84d1-ec735b39adc1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,23283ae3-170b-4db6-b50f-895b12373524,859ccc3c-2ed3-4258-84d1-ec735b39adc1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,f238d00a-af70-4273-9017-6042a6d8a8e1,859ccc3c-2ed3-4258-84d1-ec735b39adc1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,ee9af367-a8ef-4f56-99ad-72f53d1cd64f,859ccc3c-2ed3-4258-84d1-ec735b39adc1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,291e91dd-f9f9-4e0f-8abc-269ade15e1be,a470fff3-c3a7-456a-98c3-ad51d166e222 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,5edef5e8-6654-4fb7-870b-52365d9818f4,859ccc3c-2ed3-4258-84d1-ec735b39adc1 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,203db13c-aa6b-4cb4-bb11-7b5ce9c2b5b6,7f2cd296-9f7c-44e1-af81-3c06d0d43007 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,b8fff07f-350b-4f62-b35f-854b0920f180,7f2cd296-9f7c-44e1-af81-3c06d0d43007 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,26b58876-c2c2-4bee-9fae-a2acbb0ebeb0,7f2cd296-9f7c-44e1-af81-3c06d0d43007 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,b39c8064-43bf-4740-83cf-8d4814ee8591,7f2cd296-9f7c-44e1-af81-3c06d0d43007 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,01af6484-492e-4bae-bd3b-ed7d14ed3369,7f2cd296-9f7c-44e1-af81-3c06d0d43007 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,6e27eab3-0ccc-4ce1-80ae-b32912c9468d,7f2cd296-9f7c-44e1-af81-3c06d0d43007 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,bb5d5b4a-e089-4e66-9868-9e263ecc635d,4fa3a2c0-fa81-4e6f-8e8b-1479a8927bc6 === RUN TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,009d6803-aa00-4db7-8233-e2cbd7d9f503,5408143c-ebd0-4130-82e6-767375686853 --- PASS: TestSiteIntegrationSuite (30.98s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID (30.97s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,96772abb-cfbe-496d-a770-301650888b3a,7ff3cdd8-1bf4-4e5d-999d-830a276c8490 (0.61s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8be0b20e-6eff-4ac8-8505-53c7030c9fb8,316e3a01-1d93-412b-9f31-8758a267bb4a (0.64s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,32c3d871-e17d-47de-abff-b5d7e3491a31,316e3a01-1d93-412b-9f31-8758a267bb4a (0.81s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,7a8d49ec-d275-4589-8fc1-a8088ed31332,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 (0.59s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,0bd5608f-159c-488c-ac51-cd80f747bc1d,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 (0.61s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4aae01c8-1d30-4202-9962-9e7b64b6bc10,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 (0.65s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,d530b5ab-0dd9-4858-a07c-9d76a8f09b83,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 (0.61s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,c87270a2-f375-44ec-b582-09f1b7ddd842,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 (0.69s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,055d896c-88bc-45c3-aa4b-5af71f326bcc,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 (0.71s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,13a2ff8b-537f-428f-aaf2-f2e4b08e4bde,cd8ae214-0f28-43e1-8ec4-ad3b9fb11809 (0.65s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,1e16d84e-9e42-406e-b628-5d4b303309ed,41e5f79b-fe25-47c0-996b-d3e77571dbb1 (0.57s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,2f9743f8-4ca3-411f-b85d-32e6441eefd2,11810f05-e8bd-45e2-9728-92e434260750 (0.62s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4d799bb6-222e-46ce-a626-51051eb2f311,11810f05-e8bd-45e2-9728-92e434260750 (0.79s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,b0d4432d-432b-4681-9cdb-2ab789dfe145,ce88f380-fbaa-48aa-a3a1-e04599594e95 (0.52s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,f10c5f9a-1b2b-4764-8685-d08e9883888c,ce88f380-fbaa-48aa-a3a1-e04599594e95 (0.51s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8df7082a-62eb-4165-97f9-ead8cf04c0d8,11810f05-e8bd-45e2-9728-92e434260750 (0.62s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,053684d8-ca6c-4376-a03e-2567816bb091,9b3e9abe-6a5e-4084-8b44-ea5a356fe02c (0.55s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,ba5dbd17-6f68-4405-b1b6-e1160c0ba746,8e229f4f-e962-42ef-a087-dc54daa2e48c (0.63s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,e946c519-47f6-443f-9544-574ab9f3966f,8e229f4f-e962-42ef-a087-dc54daa2e48c (0.66s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,846d8363-c646-496b-a603-06a2a22e0196,9b3e9abe-6a5e-4084-8b44-ea5a356fe02c (0.50s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,bf300606-e52e-41d7-a860-e5927f57a46d,8e229f4f-e962-42ef-a087-dc54daa2e48c (0.73s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,bf83915f-7454-4f2a-b3c1-1932251c714f,bf2bed00-f5ec-4551-96c1-dc7069392eb8 (0.56s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,038e0095-4509-4f53-b045-0c86a7971b6a,3086932f-402c-494b-9aea-bf2bb1bf8303 (0.60s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,25ca6d57-b92b-4731-9b87-23050e080eac,cf53a987-f1bb-4cea-a27e-62ee282ecca7 (0.54s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8b672056-0409-46f6-98bb-4a9f5626830f,cf53a987-f1bb-4cea-a27e-62ee282ecca7 (0.52s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,e1d12403-62e6-44d0-9798-15fd194858f7,69ca49fc-e607-438b-ba24-3bb40fc7baba (0.51s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4892edf5-2ebf-46be-a6e5-a40b2cbf1c1a,38ab6d06-fc82-4417-af93-22d8733c22be (0.49s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,6884eaa6-108e-42ee-ac1e-8f8d344d5e53,42c29db5-bf88-46b7-af47-05a5ca15c1b3 (0.53s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,5cdb15ec-4440-4115-be94-d9221b9135b3,42c29db5-bf88-46b7-af47-05a5ca15c1b3 (0.50s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,4637d353-1c6d-4967-98a3-a75db4e1a925,42c29db5-bf88-46b7-af47-05a5ca15c1b3 (0.55s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8b510176-8837-4a81-a26e-2020565adb92,276af97a-8a5e-4001-aa86-53958ac9548c (0.53s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,3d374d63-f5fa-4c6a-b5c3-119280bd5442,42c29db5-bf88-46b7-af47-05a5ca15c1b3 (0.54s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,8fa2edf7-1f13-47e5-834d-ccdb81879302,276af97a-8a5e-4001-aa86-53958ac9548c (0.53s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,7a8dccfa-aa6b-4a75-b7bd-eab3f85af9b0,859ccc3c-2ed3-4258-84d1-ec735b39adc1 (0.61s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,54bc7530-a81c-4d90-8cfb-a8625168187f,859ccc3c-2ed3-4258-84d1-ec735b39adc1 (0.54s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,2d6ba44f-0638-4950-8fee-537c854e9e28,859ccc3c-2ed3-4258-84d1-ec735b39adc1 (0.67s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,23283ae3-170b-4db6-b50f-895b12373524,859ccc3c-2ed3-4258-84d1-ec735b39adc1 (0.80s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,f238d00a-af70-4273-9017-6042a6d8a8e1,859ccc3c-2ed3-4258-84d1-ec735b39adc1 (0.74s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,ee9af367-a8ef-4f56-99ad-72f53d1cd64f,859ccc3c-2ed3-4258-84d1-ec735b39adc1 (0.76s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,291e91dd-f9f9-4e0f-8abc-269ade15e1be,a470fff3-c3a7-456a-98c3-ad51d166e222 (0.61s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,5edef5e8-6654-4fb7-870b-52365d9818f4,859ccc3c-2ed3-4258-84d1-ec735b39adc1 (0.59s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,203db13c-aa6b-4cb4-bb11-7b5ce9c2b5b6,7f2cd296-9f7c-44e1-af81-3c06d0d43007 (0.63s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,b8fff07f-350b-4f62-b35f-854b0920f180,7f2cd296-9f7c-44e1-af81-3c06d0d43007 (0.62s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,26b58876-c2c2-4bee-9fae-a2acbb0ebeb0,7f2cd296-9f7c-44e1-af81-3c06d0d43007 (0.58s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,b39c8064-43bf-4740-83cf-8d4814ee8591,7f2cd296-9f7c-44e1-af81-3c06d0d43007 (0.67s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,01af6484-492e-4bae-bd3b-ed7d14ed3369,7f2cd296-9f7c-44e1-af81-3c06d0d43007 (0.69s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,6e27eab3-0ccc-4ce1-80ae-b32912c9468d,7f2cd296-9f7c-44e1-af81-3c06d0d43007 (0.63s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,bb5d5b4a-e089-4e66-9868-9e263ecc635d,4fa3a2c0-fa81-4e6f-8e8b-1479a8927bc6 (0.67s) --- PASS: TestSiteIntegrationSuite/TestSites_GetByID/site_10rqc2.sharepoint.com,009d6803-aa00-4db7-8233-e2cbd7d9f503,5408143c-ebd0-4130-82e6-767375686853 (0.57s) PASS ok github.com/alcionai/corso/src/pkg/services/m365 31.635s ``` --- src/pkg/services/m365/sites.go | 28 ++++++++++++++++------------ src/pkg/services/m365/sites_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 12 deletions(-) diff --git a/src/pkg/services/m365/sites.go b/src/pkg/services/m365/sites.go index 439f53616..a70553208 100644 --- a/src/pkg/services/m365/sites.go +++ b/src/pkg/services/m365/sites.go @@ -114,22 +114,26 @@ func ParseSite(item models.Siteable) *Site { if item.GetDrive() != nil && item.GetDrive().GetOwner() != nil && - item.GetDrive().GetOwner().GetUser() != nil { + item.GetDrive().GetOwner().GetUser() != nil && + // some users might come back with a nil ID + // most likely in the case of deleted users + item.GetDrive().GetOwner().GetUser().GetId() != nil { s.OwnerType = SiteOwnerUser s.OwnerID = ptr.Val(item.GetDrive().GetOwner().GetUser().GetId()) - } + } else if item.GetDrive() != nil && item.GetDrive().GetOwner() != nil { + ownerItem := item.GetDrive().GetOwner() + if _, ok := ownerItem.GetAdditionalData()["group"]; ok { + s.OwnerType = SiteOwnerGroup - if _, ok := item.GetAdditionalData()["group"]; ok { - s.OwnerType = SiteOwnerGroup + group, err := tform.AnyValueToT[map[string]any]("group", ownerItem.GetAdditionalData()) + if err != nil { + return s + } - group, err := tform.AnyValueToT[map[string]any]("group", item.GetAdditionalData()) - if err != nil { - return s - } - - s.OwnerID, err = str.AnyValueToString("id", group) - if err != nil { - return s + s.OwnerID, err = str.AnyValueToString("id", group) + if err != nil { + return s + } } } diff --git a/src/pkg/services/m365/sites_test.go b/src/pkg/services/m365/sites_test.go index de67a6de2..b23d84226 100644 --- a/src/pkg/services/m365/sites_test.go +++ b/src/pkg/services/m365/sites_test.go @@ -61,6 +61,34 @@ func (suite *siteIntegrationSuite) TestSites() { } } +func (suite *siteIntegrationSuite) TestSites_GetByID() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + acct := tconfig.NewM365Account(t) + + sites, err := Sites(ctx, acct, fault.New(true)) + assert.NoError(t, err, clues.ToCore(err)) + assert.NotEmpty(t, sites) + + for _, s := range sites { + suite.Run("site_"+s.ID, func() { + t := suite.T() + site, err := SiteByID(ctx, acct, s.ID) + assert.NoError(t, err, clues.ToCore(err)) + assert.NotEmpty(t, site.WebURL) + assert.NotEmpty(t, site.ID) + assert.NotEmpty(t, site.DisplayName) + if site.OwnerType != SiteOwnerUnknown { + assert.NotEmpty(t, site.OwnerID) + assert.NotEmpty(t, site.OwnerType) + } + }) + } +} + func (suite *siteIntegrationSuite) TestSites_InvalidCredentials() { table := []struct { name string From b7c02e6c7a701760d7267ef93e8b14e813d0ba28 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 05:32:40 +0000 Subject: [PATCH 08/14] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20sass=20from?= =?UTF-8?q?=201.67.0=20to=201.68.0=20in=20/website=20(#4380)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [sass](https://github.com/sass/dart-sass) from 1.67.0 to 1.68.0.
Release notes

Sourced from sass's releases.

Dart Sass 1.68.0

To install Sass 1.68.0, download one of the packages below and add it to your PATH, or see the Sass website for full installation instructions.

Changes

  • Fix the source spans associated with the abs-percent deprecation.

JS API

  • Non-filesystem importers can now set the nonCanonicalScheme field, which declares that one or more URL schemes (without :) will never be used for URLs returned by the canonicalize() method.

  • Add a containingUrl field to the canonicalize() and findFileUrl() methods of importers, which is set to the canonical URL of the stylesheet that contains the current load. For filesystem importers, this is always set; for other importers, it's set only if the current load has no URL scheme, or if its URL scheme is declared as non-canonical by the importer.

Dart API

  • Add AsyncImporter.isNonCanonicalScheme, which importers (async or sync) can use to indicate that a certain URL scheme will never be used for URLs returned by the canonicalize() method.

  • Add AsyncImporter.containingUrl, which is set during calls to the canonicalize() method to the canonical URL of the stylesheet that contains the current load. This is set only if the current load has no URL scheme, or if its URL scheme is declared as non-canonical by the importer.

Embedded Sass

  • The CalculationValue.interpolation field is deprecated and will be removed in a future version. It will no longer be set by the compiler, and if the host sets it it will be treated as equivalent to CalculationValue.string except that "(" and ")" will be added to the beginning and end of the string values.

  • Properly include TypeScript types in the sass-embedded package.

See the full changelog for changes in earlier releases.

Changelog

Sourced from sass's changelog.

1.68.0

  • Fix the source spans associated with the abs-percent deprecation.

JS API

  • Non-filesystem importers can now set the nonCanonicalScheme field, which declares that one or more URL schemes (without :) will never be used for URLs returned by the canonicalize() method.

  • Add a containingUrl field to the canonicalize() and findFileUrl() methods of importers, which is set to the canonical URL of the stylesheet that contains the current load. For filesystem importers, this is always set; for other importers, it's set only if the current load has no URL scheme, or if its URL scheme is declared as non-canonical by the importer.

Dart API

  • Add AsyncImporter.isNonCanonicalScheme, which importers (async or sync) can use to indicate that a certain URL scheme will never be used for URLs returned by the canonicalize() method.

  • Add AsyncImporter.containingUrl, which is set during calls to the canonicalize() method to the canonical URL of the stylesheet that contains the current load. This is set only if the current load has no URL scheme, or if its URL scheme is declared as non-canonical by the importer.

Embedded Sass

  • The CalculationValue.interpolation field is deprecated and will be removed in a future version. It will no longer be set by the compiler, and if the host sets it it will be treated as equivalent to CalculationValue.string except that "(" and ")" will be added to the beginning and end of the string values.

  • Properly include TypeScript types in the sass-embedded package.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sass&package-manager=npm_and_yarn&previous-version=1.67.0&new-version=1.68.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- website/package-lock.json | 14 +++++++------- website/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index 704e4c27c..267aa055c 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -24,7 +24,7 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.67.0", + "sass": "^1.68.0", "tiny-slider": "^2.9.4", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" @@ -12658,9 +12658,9 @@ "license": "MIT" }, "node_modules/sass": { - "version": "1.67.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.67.0.tgz", - "integrity": "sha512-SVrO9ZeX/QQyEGtuZYCVxoeAL5vGlYjJ9p4i4HFuekWl8y/LtJ7tJc10Z+ck1c8xOuoBm2MYzcLfTAffD0pl/A==", + "version": "1.68.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.68.0.tgz", + "integrity": "sha512-Lmj9lM/fef0nQswm1J2HJcEsBUba4wgNx2fea6yJHODREoMFnwRpZydBnX/RjyXw2REIwdkbqE4hrTo4qfDBUA==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -23971,9 +23971,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sass": { - "version": "1.67.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.67.0.tgz", - "integrity": "sha512-SVrO9ZeX/QQyEGtuZYCVxoeAL5vGlYjJ9p4i4HFuekWl8y/LtJ7tJc10Z+ck1c8xOuoBm2MYzcLfTAffD0pl/A==", + "version": "1.68.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.68.0.tgz", + "integrity": "sha512-Lmj9lM/fef0nQswm1J2HJcEsBUba4wgNx2fea6yJHODREoMFnwRpZydBnX/RjyXw2REIwdkbqE4hrTo4qfDBUA==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", diff --git a/website/package.json b/website/package.json index f65d2dd2d..08ddd9305 100644 --- a/website/package.json +++ b/website/package.json @@ -30,7 +30,7 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.67.0", + "sass": "^1.68.0", "tiny-slider": "^2.9.4", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" From 91c0709d0974bfd981265a9f579ce247628e0c99 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 15:44:22 +0000 Subject: [PATCH 09/14] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/pu?= =?UTF-8?q?zpuzpuz/xsync/v2=20from=202.5.0=20to=202.5.1=20in=20/src=20(#43?= =?UTF-8?q?81)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/puzpuzpuz/xsync/v2](https://github.com/puzpuzpuz/xsync) from 2.5.0 to 2.5.1.
Release notes

Sourced from github.com/puzpuzpuz/xsync/v2's releases.

v2.5.1

  • Speed up built-in string hash function (#106)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/puzpuzpuz/xsync/v2&package-manager=go_modules&previous-version=2.5.0&new-version=2.5.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index d20785bf6..96d163731 100644 --- a/src/go.mod +++ b/src/go.mod @@ -21,7 +21,7 @@ require ( github.com/microsoftgraph/msgraph-sdk-go v1.19.0 github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/pkg/errors v0.9.1 - github.com/puzpuzpuz/xsync/v2 v2.5.0 + github.com/puzpuzpuz/xsync/v2 v2.5.1 github.com/rudderlabs/analytics-go v3.3.3+incompatible github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 github.com/spf13/cast v1.5.1 diff --git a/src/go.sum b/src/go.sum index cda31894a..054c65071 100644 --- a/src/go.sum +++ b/src/go.sum @@ -360,8 +360,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/puzpuzpuz/xsync/v2 v2.5.0 h1:2k4qrO/orvmEXZ3hmtHqIy9XaQtPTwzMZk1+iErpE8c= -github.com/puzpuzpuz/xsync/v2 v2.5.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= +github.com/puzpuzpuz/xsync/v2 v2.5.1 h1:mVGYAvzDSu52+zaGyNjC+24Xw2bQi3kTr4QJ6N9pIIU= +github.com/puzpuzpuz/xsync/v2 v2.5.1/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= From 38ef3b6ef62fc27e9389e7a46eaca3f1f16d3d89 Mon Sep 17 00:00:00 2001 From: neha_gupta Date: Wed, 27 Sep 2023 22:07:15 +0530 Subject: [PATCH 10/14] call repo connect in connect S3 cmd (#4383) Call repo.connect() in S3 connect command. Was this missed as part of - https://github.com/alcionai/corso/pull/4343/files or am I misunderstanding something. #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included #### Type of change - [ ] :bug: Bugfix #### Issue(s) * # #### Test Plan - [ ] :muscle: Manua --- src/cli/repo/s3.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cli/repo/s3.go b/src/cli/repo/s3.go index 343dddf03..a450def04 100644 --- a/src/cli/repo/s3.go +++ b/src/cli/repo/s3.go @@ -222,6 +222,10 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error { opts, repoID) if err != nil { + return Only(ctx, clues.Wrap(err, "Failed to create a repository controller")) + } + + if err := r.Connect(ctx); err != nil { return Only(ctx, clues.Wrap(err, "Failed to connect to the S3 repository")) } From 3c96eb64372026a907e6ac4ff10165e4edac9dd5 Mon Sep 17 00:00:00 2001 From: Keepers Date: Wed, 27 Sep 2023 11:17:31 -0600 Subject: [PATCH 11/14] quick cleanup before next step (#4347) adds a container-of-things to reduce mostly-unused return value bloat, and updates some func names to be more appropriate to their behavior. No logical changes, just renaming/movement. --- #### Does this PR need a docs update or release note? - [x] :no_entry: No #### Type of change - [x] :broom: Tech Debt/Cleanup --- src/cli/backup/backup.go | 10 ++---- src/cli/backup/exchange.go | 12 ++++--- src/cli/backup/groups.go | 12 ++++--- src/cli/backup/onedrive.go | 12 ++++--- src/cli/backup/sharepoint.go | 12 ++++--- src/cli/export/export.go | 5 +-- src/cli/restore/restore.go | 5 +-- src/cli/utils/utils.go | 49 ++++++++++++++++------------- src/cmd/longevity_test/longevity.go | 6 +--- 9 files changed, 61 insertions(+), 62 deletions(-) diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index 51f712e53..71cb4595c 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -265,10 +265,7 @@ func genericDeleteCommand( ctx := clues.Add(cmd.Context(), "delete_backup_id", bID) - r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - pst) + r, _, err := utils.GetAccountAndConnect(ctx, cmd, pst) if err != nil { return Only(ctx, err) } @@ -298,10 +295,7 @@ func genericListCommand( return nil } - r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - service) + r, _, err := utils.GetAccountAndConnect(ctx, cmd, service) if err != nil { return Only(ctx, err) } diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 85b474bbc..d4f0d9534 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -275,17 +275,19 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error { ctx := cmd.Context() opts := utils.MakeExchangeOpts(cmd) - r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - path.ExchangeService) + r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService) if err != nil { return Only(ctx, err) } defer utils.CloseRepo(ctx, r) - ds, err := runDetailsExchangeCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) + ds, err := runDetailsExchangeCmd( + ctx, + r, + flags.BackupIDFV, + opts, + rdao.Opts.SkipReduce) if err != nil { return Only(ctx, err) } diff --git a/src/cli/backup/groups.go b/src/cli/backup/groups.go index 84db6ebde..c8be220f3 100644 --- a/src/cli/backup/groups.go +++ b/src/cli/backup/groups.go @@ -228,17 +228,19 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error { ctx := cmd.Context() opts := utils.MakeGroupsOpts(cmd) - r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - path.GroupsService) + r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService) if err != nil { return Only(ctx, err) } defer utils.CloseRepo(ctx, r) - ds, err := runDetailsGroupsCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) + ds, err := runDetailsGroupsCmd( + ctx, + r, + flags.BackupIDFV, + opts, + rdao.Opts.SkipReduce) if err != nil { return Only(ctx, err) } diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 0eeb13308..fa8170f64 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -232,17 +232,19 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error { ctx := cmd.Context() opts := utils.MakeOneDriveOpts(cmd) - r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - path.OneDriveService) + r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService) if err != nil { return Only(ctx, err) } defer utils.CloseRepo(ctx, r) - ds, err := runDetailsOneDriveCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) + ds, err := runDetailsOneDriveCmd( + ctx, + r, + flags.BackupIDFV, + opts, + rdao.Opts.SkipReduce) if err != nil { return Only(ctx, err) } diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index 71a5ca524..507a4a6d2 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -327,17 +327,19 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error { ctx := cmd.Context() opts := utils.MakeSharePointOpts(cmd) - r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - path.SharePointService) + r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService) if err != nil { return Only(ctx, err) } defer utils.CloseRepo(ctx, r) - ds, err := runDetailsSharePointCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) + ds, err := runDetailsSharePointCmd( + ctx, + r, + flags.BackupIDFV, + opts, + rdao.Opts.SkipReduce) if err != nil { return Only(ctx, err) } diff --git a/src/cli/export/export.go b/src/cli/export/export.go index f106f87bc..db48f466a 100644 --- a/src/cli/export/export.go +++ b/src/cli/export/export.go @@ -67,10 +67,7 @@ func runExport( return Only(ctx, err) } - r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - sel.PathService()) + r, _, err := utils.GetAccountAndConnect(ctx, cmd, sel.PathService()) if err != nil { return Only(ctx, err) } diff --git a/src/cli/restore/restore.go b/src/cli/restore/restore.go index f9da46416..9dad4ca1c 100644 --- a/src/cli/restore/restore.go +++ b/src/cli/restore/restore.go @@ -100,10 +100,7 @@ func runRestore( return Only(ctx, err) } - r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( - ctx, - cmd, - sel.PathService()) + r, _, err := utils.GetAccountAndConnect(ctx, cmd, sel.PathService()) if err != nil { return Only(ctx, err) } diff --git a/src/cli/utils/utils.go b/src/cli/utils/utils.go index e573bdb0a..2a4e3de34 100644 --- a/src/cli/utils/utils.go +++ b/src/cli/utils/utils.go @@ -22,30 +22,35 @@ import ( "github.com/alcionai/corso/src/pkg/storage" ) +type RepoDetailsAndOpts struct { + Repo config.RepoDetails + Opts control.Options +} + var ErrNotYetImplemented = clues.New("not yet implemented") -// GetAccountAndConnectWithOverrides is a wrapper for GetAccountAndConnect -// that also gets the storage provider and any storage provider specific +// GetAccountAndConnect is a wrapper for GetAccountAndConnectWithOverrides +// that automatically gets the storage provider and any storage provider specific // flag overrides from the command line. -func GetAccountAndConnectWithOverrides( +func GetAccountAndConnect( ctx context.Context, cmd *cobra.Command, pst path.ServiceType, -) (repository.Repositoryer, *storage.Storage, *account.Account, *control.Options, error) { +) (repository.Repositoryer, RepoDetailsAndOpts, error) { provider, overrides, err := GetStorageProviderAndOverrides(ctx, cmd) if err != nil { - return nil, nil, nil, nil, clues.Stack(err) + return nil, RepoDetailsAndOpts{}, clues.Stack(err) } - return GetAccountAndConnect(ctx, pst, provider, overrides) + return GetAccountAndConnectWithOverrides(ctx, pst, provider, overrides) } -func GetAccountAndConnect( +func GetAccountAndConnectWithOverrides( ctx context.Context, pst path.ServiceType, provider storage.ProviderType, overrides map[string]string, -) (repository.Repositoryer, *storage.Storage, *account.Account, *control.Options, error) { +) (repository.Repositoryer, RepoDetailsAndOpts, error) { cfg, err := config.GetConfigRepoDetails( ctx, provider, @@ -53,7 +58,7 @@ func GetAccountAndConnect( true, overrides) if err != nil { - return nil, nil, nil, nil, err + return nil, RepoDetailsAndOpts{}, err } repoID := cfg.RepoID @@ -70,20 +75,25 @@ func GetAccountAndConnect( opts, repoID) if err != nil { - return nil, nil, nil, nil, clues.Wrap(err, "creating a repository controller") + return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller") } if err := r.Connect(ctx); err != nil { - return nil, nil, nil, nil, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository") + return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository") } // this initializes our graph api client configurations, // including control options such as concurency limitations. if _, err := r.ConnectToM365(ctx, pst); err != nil { - return nil, nil, nil, nil, clues.Wrap(err, "connecting to m365") + return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365") } - return r, &cfg.Storage, &cfg.Account, &opts, nil + rdao := RepoDetailsAndOpts{ + Repo: cfg, + Opts: opts, + } + + return r, rdao, nil } func AccountConnectAndWriteRepoConfig( @@ -91,22 +101,19 @@ func AccountConnectAndWriteRepoConfig( cmd *cobra.Command, pst path.ServiceType, ) (repository.Repositoryer, *account.Account, error) { - r, stg, acc, opts, err := GetAccountAndConnectWithOverrides( - ctx, - cmd, - pst) + r, rdao, err := GetAccountAndConnect(ctx, cmd, pst) if err != nil { logger.CtxErr(ctx, err).Info("getting and connecting account") return nil, nil, err } - sc, err := stg.StorageConfig() + sc, err := rdao.Repo.Storage.StorageConfig() if err != nil { logger.CtxErr(ctx, err).Info("getting storage configuration") return nil, nil, err } - m365Config, err := acc.M365Config() + m365Config, err := rdao.Repo.Account.M365Config() if err != nil { logger.CtxErr(ctx, err).Info("getting m365 configuration") return nil, nil, err @@ -114,13 +121,13 @@ func AccountConnectAndWriteRepoConfig( // repo config gets set during repo connect and init. // This call confirms we have the correct values. - err = config.WriteRepoConfig(ctx, sc, m365Config, opts.Repo, r.GetID()) + err = config.WriteRepoConfig(ctx, sc, m365Config, rdao.Opts.Repo, r.GetID()) if err != nil { logger.CtxErr(ctx, err).Info("writing to repository configuration") return nil, nil, err } - return r, acc, nil + return r, &rdao.Repo.Account, nil } // CloseRepo handles closing a repo. diff --git a/src/cmd/longevity_test/longevity.go b/src/cmd/longevity_test/longevity.go index 0b47ea518..ec7862191 100644 --- a/src/cmd/longevity_test/longevity.go +++ b/src/cmd/longevity_test/longevity.go @@ -31,11 +31,7 @@ func deleteBackups( ) ([]string, error) { ctx = clues.Add(ctx, "cutoff_days", deletionDays) - r, _, _, _, err := utils.GetAccountAndConnect( - ctx, - service, - storage.ProviderS3, - nil) + r, _, err := utils.GetAccountAndConnectWithOverrides(ctx, service, storage.ProviderS3, nil) if err != nil { return nil, clues.Wrap(err, "connecting to account").WithClues(ctx) } From db9596d2236977f4e74ebf46e95ef8ef52ec5914 Mon Sep 17 00:00:00 2001 From: ashmrtn <3891298+ashmrtn@users.noreply.github.com> Date: Wed, 27 Sep 2023 16:07:59 -0700 Subject: [PATCH 12/14] Use non-zero time as fallback (#4390) Use a non-zero time as the fallback for the mod time of an item during enumeration. This should avoid an issue where the mod time in details differs from the mod time in kopia due to a serialization issue. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/pkg/services/m365/api/item_pager.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/pkg/services/m365/api/item_pager.go b/src/pkg/services/m365/api/item_pager.go index 3cd1deb53..5effcb7a6 100644 --- a/src/pkg/services/m365/api/item_pager.go +++ b/src/pkg/services/m365/api/item_pager.go @@ -289,7 +289,15 @@ func addedAndRemovedByDeletedDateTime[T any]( var modTime time.Time if mt, ok := giaddt.(getModTimer); ok { - modTime = ptr.Val(mt.GetLastModifiedDateTime()) + // Make sure to get a non-zero mod time if the item doesn't have one for + // some reason. Otherwise we can hit an issue where kopia has a + // different mod time for the file than the details does. This occurs + // due to a conversion kopia does on the time from + // time.Time -> nanoseconds for serialization. During incremental + // backups, kopia goes from nanoseconds -> time.Time but there's an + // overflow which yields a different timestamp. + // https://github.com/gohugoio/hugo/issues/6161#issuecomment-725915786 + modTime = ptr.OrNow(mt.GetLastModifiedDateTime()) } added[ptr.Val(giaddt.GetId())] = modTime From 7964364e619ceec7c72aa79be67049a33046ef02 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Thu, 28 Sep 2023 11:55:14 +0530 Subject: [PATCH 13/14] Fix nightly tests for exchange, onedrive and groups (#4384) We were trying to delete a single backupOp multiple times across different tests. Added another backupOp to for use in the new test. --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [x] :robot: Supportability/Tests - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [ ] :muscle: Manual - [ ] :zap: Unit test - [x] :green_heart: E2E --- src/cli/backup/exchange_e2e_test.go | 37 +++++++++++-------------- src/cli/backup/groups_e2e_test.go | 36 ++++++++++--------------- src/cli/backup/onedrive_e2e_test.go | 42 ++++++++++++----------------- 3 files changed, 46 insertions(+), 69 deletions(-) diff --git a/src/cli/backup/exchange_e2e_test.go b/src/cli/backup/exchange_e2e_test.go index 9965652d6..2175a50e6 100644 --- a/src/cli/backup/exchange_e2e_test.go +++ b/src/cli/backup/exchange_e2e_test.go @@ -561,9 +561,8 @@ func runExchangeDetailsCmdTest(suite *PreparedBackupExchangeE2ESuite, category p type BackupDeleteExchangeE2ESuite struct { tester.Suite - dpnd dependencies - backupOp operations.BackupOperation - secondaryBackupOp operations.BackupOperation + dpnd dependencies + backupOps [3]operations.BackupOperation } func TestBackupDeleteExchangeE2ESuite(t *testing.T) { @@ -589,21 +588,15 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() { sel := selectors.NewExchangeBackup(users) sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch())) - backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) - require.NoError(t, err, clues.ToCore(err)) + for i := 0; i < cap(suite.backupOps); i++ { + backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) + require.NoError(t, err, clues.ToCore(err)) - suite.backupOp = backupOp + suite.backupOps[i] = backupOp - err = suite.backupOp.Run(ctx) - require.NoError(t, err, clues.ToCore(err)) - - backupOp2, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) - require.NoError(t, err, clues.ToCore(err)) - - suite.secondaryBackupOp = backupOp2 - - err = suite.secondaryBackupOp.Run(ctx) - require.NoError(t, err, clues.ToCore(err)) + err = suite.backupOps[i].Run(ctx) + require.NoError(t, err, clues.ToCore(err)) + } } func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { @@ -619,8 +612,8 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { "--config-file", suite.dpnd.configFilePath, "--"+flags.BackupIDsFN, fmt.Sprintf("%s,%s", - string(suite.backupOp.Results.BackupID), - string(suite.secondaryBackupOp.Results.BackupID))) + string(suite.backupOps[0].Results.BackupID), + string(suite.backupOps[1].Results.BackupID))) cli.BuildCommandTree(cmd) // run the command @@ -631,7 +624,7 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { cmd = cliTD.StubRootCmd( "backup", "details", "exchange", "--config-file", suite.dpnd.configFilePath, - "--backup", string(suite.backupOp.Results.BackupID)) + "--backup", string(suite.backupOps[0].Results.BackupID)) cli.BuildCommandTree(cmd) err = cmd.ExecuteContext(ctx) @@ -641,7 +634,7 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { cmd = cliTD.StubRootCmd( "backup", "details", "exchange", "--config-file", suite.dpnd.configFilePath, - "--backup", string(suite.secondaryBackupOp.Results.BackupID)) + "--backup", string(suite.backupOps[1].Results.BackupID)) cli.BuildCommandTree(cmd) err = cmd.ExecuteContext(ctx) @@ -660,7 +653,7 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_SingleID( "backup", "delete", "exchange", "--config-file", suite.dpnd.configFilePath, "--"+flags.BackupFN, - string(suite.backupOp.Results.BackupID)) + string(suite.backupOps[2].Results.BackupID)) cli.BuildCommandTree(cmd) // run the command @@ -671,7 +664,7 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_SingleID( cmd = cliTD.StubRootCmd( "backup", "details", "exchange", "--config-file", suite.dpnd.configFilePath, - "--backup", string(suite.secondaryBackupOp.Results.BackupID)) + "--backup", string(suite.backupOps[2].Results.BackupID)) cli.BuildCommandTree(cmd) err = cmd.ExecuteContext(ctx) diff --git a/src/cli/backup/groups_e2e_test.go b/src/cli/backup/groups_e2e_test.go index 66de86eb4..986979a4f 100644 --- a/src/cli/backup/groups_e2e_test.go +++ b/src/cli/backup/groups_e2e_test.go @@ -497,9 +497,8 @@ func runGroupsDetailsCmdTest(suite *PreparedBackupGroupsE2ESuite, category path. type BackupDeleteGroupsE2ESuite struct { tester.Suite - dpnd dependencies - backupOp operations.BackupOperation - secondaryBackupOp operations.BackupOperation + dpnd dependencies + backupOps [3]operations.BackupOperation } func TestBackupDeleteGroupsE2ESuite(t *testing.T) { @@ -525,22 +524,15 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() { sel := selectors.NewGroupsBackup(groups) sel.Include(selTD.GroupsBackupChannelScope(sel)) - backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) - require.NoError(t, err, clues.ToCore(err)) + for i := 0; i < cap(suite.backupOps); i++ { + backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) + require.NoError(t, err, clues.ToCore(err)) - suite.backupOp = backupOp + suite.backupOps[i] = backupOp - err = suite.backupOp.Run(ctx) - require.NoError(t, err, clues.ToCore(err)) - - // secondary backup - secondaryBackupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) - require.NoError(t, err, clues.ToCore(err)) - - suite.secondaryBackupOp = secondaryBackupOp - - err = suite.secondaryBackupOp.Run(ctx) - require.NoError(t, err, clues.ToCore(err)) + err = suite.backupOps[i].Run(ctx) + require.NoError(t, err, clues.ToCore(err)) + } } func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() { @@ -556,8 +548,8 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() { "--config-file", suite.dpnd.configFilePath, "--"+flags.BackupIDsFN, fmt.Sprintf("%s,%s", - string(suite.backupOp.Results.BackupID), - string(suite.secondaryBackupOp.Results.BackupID))) + string(suite.backupOps[0].Results.BackupID), + string(suite.backupOps[1].Results.BackupID))) cli.BuildCommandTree(cmd) // run the command @@ -568,7 +560,7 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() { cmd = cliTD.StubRootCmd( "backup", "details", "groups", "--config-file", suite.dpnd.configFilePath, - "--backups", string(suite.backupOp.Results.BackupID)) + "--backups", string(suite.backupOps[0].Results.BackupID)) cli.BuildCommandTree(cmd) err = cmd.ExecuteContext(ctx) @@ -587,7 +579,7 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_SingleID() { "backup", "delete", "groups", "--config-file", suite.dpnd.configFilePath, "--"+flags.BackupFN, - string(suite.backupOp.Results.BackupID)) + string(suite.backupOps[2].Results.BackupID)) cli.BuildCommandTree(cmd) // run the command @@ -598,7 +590,7 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_SingleID() { cmd = cliTD.StubRootCmd( "backup", "details", "groups", "--config-file", suite.dpnd.configFilePath, - "--backup", string(suite.backupOp.Results.BackupID)) + "--backup", string(suite.backupOps[2].Results.BackupID)) cli.BuildCommandTree(cmd) err = cmd.ExecuteContext(ctx) diff --git a/src/cli/backup/onedrive_e2e_test.go b/src/cli/backup/onedrive_e2e_test.go index 77d599862..f4b2c0bdc 100644 --- a/src/cli/backup/onedrive_e2e_test.go +++ b/src/cli/backup/onedrive_e2e_test.go @@ -121,9 +121,8 @@ func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupCmd_userNotInTenant() { type BackupDeleteOneDriveE2ESuite struct { tester.Suite - dpnd dependencies - backupOp operations.BackupOperation - secondaryBackupOp operations.BackupOperation + dpnd dependencies + backupOps [3]operations.BackupOperation } func TestBackupDeleteOneDriveE2ESuite(t *testing.T) { @@ -152,22 +151,15 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() { sel := selectors.NewOneDriveBackup(users) sel.Include(selTD.OneDriveBackupFolderScope(sel)) - backupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins) - require.NoError(t, err, clues.ToCore(err)) + for i := 0; i < cap(suite.backupOps); i++ { + backupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins) + require.NoError(t, err, clues.ToCore(err)) - suite.backupOp = backupOp + suite.backupOps[i] = backupOp - err = suite.backupOp.Run(ctx) - require.NoError(t, err, clues.ToCore(err)) - - // secondary backup - secondaryBackupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins) - require.NoError(t, err, clues.ToCore(err)) - - suite.secondaryBackupOp = secondaryBackupOp - - err = suite.secondaryBackupOp.Run(ctx) - require.NoError(t, err, clues.ToCore(err)) + err = suite.backupOps[i].Run(ctx) + require.NoError(t, err, clues.ToCore(err)) + } } func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() { @@ -185,8 +177,8 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() { "--config-file", suite.dpnd.configFilePath, "--"+flags.BackupIDsFN, fmt.Sprintf("%s,%s", - string(suite.backupOp.Results.BackupID), - string(suite.secondaryBackupOp.Results.BackupID))) + string(suite.backupOps[0].Results.BackupID), + string(suite.backupOps[1].Results.BackupID))) cli.BuildCommandTree(cmd) cmd.SetErr(&suite.dpnd.recorder) @@ -201,14 +193,14 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() { strings.HasSuffix( result, fmt.Sprintf("Deleted OneDrive backup [%s %s]\n", - string(suite.backupOp.Results.BackupID), - string(suite.secondaryBackupOp.Results.BackupID)))) + string(suite.backupOps[0].Results.BackupID), + string(suite.backupOps[1].Results.BackupID)))) // a follow-up details call should fail, due to the backup ID being deleted cmd = cliTD.StubRootCmd( "backup", "details", "onedrive", "--config-file", suite.dpnd.configFilePath, - "--backups", string(suite.backupOp.Results.BackupID)) + "--backups", string(suite.backupOps[0].Results.BackupID)) cli.BuildCommandTree(cmd) err = cmd.ExecuteContext(ctx) @@ -229,7 +221,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_SingleID( "backup", "delete", "onedrive", "--config-file", suite.dpnd.configFilePath, "--"+flags.BackupFN, - string(suite.backupOp.Results.BackupID)) + string(suite.backupOps[2].Results.BackupID)) cli.BuildCommandTree(cmd) cmd.SetErr(&suite.dpnd.recorder) @@ -244,13 +236,13 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_SingleID( strings.HasSuffix( result, fmt.Sprintf("Deleted OneDrive backup [%s]\n", - string(suite.backupOp.Results.BackupID)))) + string(suite.backupOps[2].Results.BackupID)))) // a follow-up details call should fail, due to the backup ID being deleted cmd = cliTD.StubRootCmd( "backup", "details", "onedrive", "--config-file", suite.dpnd.configFilePath, - "--backup", string(suite.backupOp.Results.BackupID)) + "--backup", string(suite.backupOps[0].Results.BackupID)) cli.BuildCommandTree(cmd) err = cmd.ExecuteContext(ctx) From 61de865660043f9e9d131da73356a7348facebf7 Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Thu, 28 Sep 2023 19:06:15 +0530 Subject: [PATCH 14/14] Revert sanity tests for Groups incrementals and restore (#4392) This reverts commit bc25869173df9052e358a055867539f32af8935c. #4373 We need more work for groups restore and export sanity tests to be functional. Reverting this to make the CI pass for now. Example successful run: https://github.com/alcionai/corso/actions/runs/6336809043/job/17210633457 --- #### Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No #### Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Supportability/Tests - [x] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup #### Issue(s) * # #### Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- .github/workflows/sanity-test.yaml | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/.github/workflows/sanity-test.yaml b/.github/workflows/sanity-test.yaml index c24dc1141..096924dba 100644 --- a/.github/workflows/sanity-test.yaml +++ b/.github/workflows/sanity-test.yaml @@ -364,34 +364,10 @@ jobs: service: groups kind: initial backup-args: '--group "${{ vars.CORSO_M365_TEST_TEAM_ID }}"' - restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-sharepoint.outputs.result }}' test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' log-dir: ${{ env.CORSO_LOG_DIR }} - with-export: true - # generate some more enteries for incremental check - - name: Groups - Create new data (for incremental) - working-directory: ./src/cmd/factory - run: | - go run . sharepoint files \ - --site ${{ secrets.CORSO_M365_TEST_SITE_URL }} \ - --user ${{ env.TEST_USER }} \ - --secondaryuser ${{ env.CORSO_SECONDARY_M365_TEST_USER_ID }} \ - --tenant ${{ secrets.TENANT_ID }} \ - --destination ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }} \ - --count 4 - - - name: Groups - Incremental backup - id: groups-incremental - uses: ./.github/actions/backup-restore-test - with: - service: groups - kind: incremental - backup-args: '--site "${{ secrets.CORSO_M365_TEST_SITE_URL }}"' - restore-args: '--folder ${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' - test-folder: '${{ env.RESTORE_DEST_PFX }}${{ steps.new-data-creation-groups.outputs.result }}' - log-dir: ${{ env.CORSO_LOG_DIR }} - with-export: true + # TODO: incrementals ##########################################################################################################################################