Merge branch 'main' into updateKopiaPassword

This commit is contained in:
neha_gupta 2023-09-28 22:53:41 +05:30 committed by GitHub
commit a8001f2aa9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 650 additions and 240 deletions

View File

@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Enables local or network-attached storage for Corso repositories. - Enables local or network-attached storage for Corso repositories.
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes. - Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
- Increase Exchange backup performance by lazily fetching data only for items whose content changed. - Increase Exchange backup performance by lazily fetching data only for items whose content changed.
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
## Fixed
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.
## [v0.13.0] (beta) - 2023-09-18 ## [v0.13.0] (beta) - 2023-09-18

View File

@ -252,8 +252,8 @@ func runBackups(
func genericDeleteCommand( func genericDeleteCommand(
cmd *cobra.Command, cmd *cobra.Command,
pst path.ServiceType, pst path.ServiceType,
bID, designation string, designation string,
args []string, bID, args []string,
) error { ) error {
if utils.HasNoFlagsAndShownHelp(cmd) { if utils.HasNoFlagsAndShownHelp(cmd) {
return nil return nil
@ -265,21 +265,18 @@ func genericDeleteCommand(
ctx := clues.Add(cmd.Context(), "delete_backup_id", bID) ctx := clues.Add(cmd.Context(), "delete_backup_id", bID)
r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( r, _, err := utils.GetAccountAndConnect(ctx, cmd, pst)
ctx,
cmd,
pst)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
if err := r.DeleteBackups(ctx, true, bID); err != nil { if err := r.DeleteBackups(ctx, true, bID...); err != nil {
return Only(ctx, clues.Wrap(err, "Deleting backup "+bID)) return Only(ctx, clues.Wrap(err, fmt.Sprintf("Deleting backup %v", bID)))
} }
Infof(ctx, "Deleted %s backup %s", designation, bID) Infof(ctx, "Deleted %s backup %v", designation, bID)
return nil return nil
} }
@ -298,10 +295,7 @@ func genericListCommand(
return nil return nil
} }
r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( r, _, err := utils.GetAccountAndConnect(ctx, cmd, service)
ctx,
cmd,
service)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }

View File

@ -32,7 +32,7 @@ const (
const ( const (
exchangeServiceCommand = "exchange" exchangeServiceCommand = "exchange"
exchangeServiceCommandCreateUseSuffix = "--mailbox <email> | '" + flags.Wildcard + "'" exchangeServiceCommandCreateUseSuffix = "--mailbox <email> | '" + flags.Wildcard + "'"
exchangeServiceCommandDeleteUseSuffix = "--backup <backupId>" exchangeServiceCommandDeleteUseSuffix = "--backups <backupId>"
exchangeServiceCommandDetailsUseSuffix = "--backup <backupId>" exchangeServiceCommandDetailsUseSuffix = "--backup <backupId>"
) )
@ -46,8 +46,9 @@ corso backup create exchange --mailbox alice@example.com,bob@example.com --data
# Backup all Exchange data for all M365 users # Backup all Exchange data for all M365 users
corso backup create exchange --mailbox '*'` corso backup create exchange --mailbox '*'`
exchangeServiceCommandDeleteExamples = `# Delete Exchange backup with ID 1234abcd-12ab-cd34-56de-1234abcd exchangeServiceCommandDeleteExamples = `# Delete Exchange backup with IDs 1234abcd-12ab-cd34-56de-1234abcd \
corso backup delete exchange --backup 1234abcd-12ab-cd34-56de-1234abcd` and 1234abcd-12ab-cd34-56de-1234abce
corso backup delete exchange --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce`
exchangeServiceCommandDetailsExamples = `# Explore items in Alice's latest backup (1234abcd...) exchangeServiceCommandDetailsExamples = `# Explore items in Alice's latest backup (1234abcd...)
corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd corso backup details exchange --backup 1234abcd-12ab-cd34-56de-1234abcd
@ -121,7 +122,8 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
c.Use = c.Use + " " + exchangeServiceCommandDeleteUseSuffix c.Use = c.Use + " " + exchangeServiceCommandDeleteUseSuffix
c.Example = exchangeServiceCommandDeleteExamples c.Example = exchangeServiceCommandDeleteExamples
flags.AddBackupIDFlag(c, true) flags.AddMultipleBackupIDsFlag(c, false)
flags.AddBackupIDFlag(c, false)
} }
return c return c
@ -273,17 +275,19 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeExchangeOpts(cmd) opts := utils.MakeExchangeOpts(cmd)
r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService)
ctx,
cmd,
path.ExchangeService)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
ds, err := runDetailsExchangeCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) ds, err := runDetailsExchangeCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
@ -352,5 +356,15 @@ func exchangeDeleteCmd() *cobra.Command {
// deletes an exchange service backup. // deletes an exchange service backup.
func deleteExchangeCmd(cmd *cobra.Command, args []string) error { func deleteExchangeCmd(cmd *cobra.Command, args []string) error {
return genericDeleteCommand(cmd, path.ExchangeService, flags.BackupIDFV, "Exchange", args) var backupIDValue []string
if len(flags.BackupIDsFV) > 0 {
backupIDValue = flags.BackupIDsFV
} else if len(flags.BackupIDFV) > 0 {
backupIDValue = append(backupIDValue, flags.BackupIDFV)
} else {
return clues.New("either --backup or --backups flag is required")
}
return genericDeleteCommand(cmd, path.ExchangeService, "Exchange", backupIDValue, args)
} }

View File

@ -561,8 +561,8 @@ func runExchangeDetailsCmdTest(suite *PreparedBackupExchangeE2ESuite, category p
type BackupDeleteExchangeE2ESuite struct { type BackupDeleteExchangeE2ESuite struct {
tester.Suite tester.Suite
dpnd dependencies dpnd dependencies
backupOp operations.BackupOperation backupOps [3]operations.BackupOperation
} }
func TestBackupDeleteExchangeE2ESuite(t *testing.T) { func TestBackupDeleteExchangeE2ESuite(t *testing.T) {
@ -588,13 +588,15 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() {
sel := selectors.NewExchangeBackup(users) sel := selectors.NewExchangeBackup(users)
sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch())) sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) for i := 0; i < cap(suite.backupOps); i++ {
require.NoError(t, err, clues.ToCore(err)) backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector)
require.NoError(t, err, clues.ToCore(err))
suite.backupOp = backupOp suite.backupOps[i] = backupOp
err = suite.backupOp.Run(ctx) err = suite.backupOps[i].Run(ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
}
} }
func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() { func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() {
@ -608,7 +610,10 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() {
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "exchange", "backup", "delete", "exchange",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) "--"+flags.BackupIDsFN,
fmt.Sprintf("%s,%s",
string(suite.backupOps[0].Results.BackupID),
string(suite.backupOps[1].Results.BackupID)))
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
// run the command // run the command
@ -619,7 +624,47 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd() {
cmd = cliTD.StubRootCmd( cmd = cliTD.StubRootCmd(
"backup", "details", "exchange", "backup", "details", "exchange",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--backup", string(suite.backupOp.Results.BackupID)) "--backup", string(suite.backupOps[0].Results.BackupID))
cli.BuildCommandTree(cmd)
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
// a follow-up details call should fail, due to the backup ID being deleted
cmd = cliTD.StubRootCmd(
"backup", "details", "exchange",
"--config-file", suite.dpnd.configFilePath,
"--backup", string(suite.backupOps[1].Results.BackupID))
cli.BuildCommandTree(cmd)
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}
func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_SingleID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
ctx = config.SetViper(ctx, suite.dpnd.vpr)
defer flush()
cmd := cliTD.StubRootCmd(
"backup", "delete", "exchange",
"--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN,
string(suite.backupOps[2].Results.BackupID))
cli.BuildCommandTree(cmd)
// run the command
err := cmd.ExecuteContext(ctx)
require.NoError(t, err, clues.ToCore(err))
// a follow-up details call should fail, due to the backup ID being deleted
cmd = cliTD.StubRootCmd(
"backup", "details", "exchange",
"--config-file", suite.dpnd.configFilePath,
"--backup", string(suite.backupOps[2].Results.BackupID))
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
err = cmd.ExecuteContext(ctx) err = cmd.ExecuteContext(ctx)
@ -637,10 +682,28 @@ func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_UnknownID
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "exchange", "backup", "delete", "exchange",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, uuid.NewString()) "--"+flags.BackupIDsFN, uuid.NewString())
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
// unknown backupIDs should error since the modelStore can't find the backup // unknown backupIDs should error since the modelStore can't find the backup
err := cmd.ExecuteContext(ctx) err := cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
} }
func (suite *BackupDeleteExchangeE2ESuite) TestExchangeBackupDeleteCmd_NoBackupID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
ctx = config.SetViper(ctx, suite.dpnd.vpr)
defer flush()
cmd := cliTD.StubRootCmd(
"backup", "delete", "exchange",
"--config-file", suite.dpnd.configFilePath)
cli.BuildCommandTree(cmd)
// empty backupIDs should error since no data provided
err := cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}

View File

@ -32,7 +32,7 @@ const (
groupsServiceCommand = "groups" groupsServiceCommand = "groups"
teamsServiceCommand = "teams" teamsServiceCommand = "teams"
groupsServiceCommandCreateUseSuffix = "--group <groupName> | '" + flags.Wildcard + "'" groupsServiceCommandCreateUseSuffix = "--group <groupName> | '" + flags.Wildcard + "'"
groupsServiceCommandDeleteUseSuffix = "--backup <backupId>" groupsServiceCommandDeleteUseSuffix = "--backups <backupId>"
groupsServiceCommandDetailsUseSuffix = "--backup <backupId>" groupsServiceCommandDetailsUseSuffix = "--backup <backupId>"
) )
@ -46,8 +46,9 @@ corso backup create groups --group Marketing --data messages
# Backup all Groups and Teams data for all groups # Backup all Groups and Teams data for all groups
corso backup create groups --group '*'` corso backup create groups --group '*'`
groupsServiceCommandDeleteExamples = `# Delete Groups backup with ID 1234abcd-12ab-cd34-56de-1234abcd groupsServiceCommandDeleteExamples = `# Delete Groups backup with ID 1234abcd-12ab-cd34-56de-1234abcd \
corso backup delete groups --backup 1234abcd-12ab-cd34-56de-1234abcd` and 1234abcd-12ab-cd34-56de-1234abce
corso backup delete groups --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce`
groupsServiceCommandDetailsExamples = `# Explore items in Marketing's latest backup (1234abcd...) groupsServiceCommandDetailsExamples = `# Explore items in Marketing's latest backup (1234abcd...)
corso backup details groups --backup 1234abcd-12ab-cd34-56de-1234abcd corso backup details groups --backup 1234abcd-12ab-cd34-56de-1234abcd
@ -110,7 +111,8 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command {
c.Use = c.Use + " " + groupsServiceCommandDeleteUseSuffix c.Use = c.Use + " " + groupsServiceCommandDeleteUseSuffix
c.Example = groupsServiceCommandDeleteExamples c.Example = groupsServiceCommandDeleteExamples
flags.AddBackupIDFlag(c, true) flags.AddMultipleBackupIDsFlag(c, false)
flags.AddBackupIDFlag(c, false)
} }
return c return c
@ -226,17 +228,19 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeGroupsOpts(cmd) opts := utils.MakeGroupsOpts(cmd)
r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService)
ctx,
cmd,
path.GroupsService)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
ds, err := runDetailsGroupsCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) ds, err := runDetailsGroupsCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
@ -305,7 +309,17 @@ func groupsDeleteCmd() *cobra.Command {
// deletes an groups service backup. // deletes an groups service backup.
func deleteGroupsCmd(cmd *cobra.Command, args []string) error { func deleteGroupsCmd(cmd *cobra.Command, args []string) error {
return genericDeleteCommand(cmd, path.GroupsService, flags.BackupIDFV, "Groups", args) backupIDValue := []string{}
if len(flags.BackupIDsFV) > 0 {
backupIDValue = flags.BackupIDsFV
} else if len(flags.BackupIDFV) > 0 {
backupIDValue = append(backupIDValue, flags.BackupIDFV)
} else {
return clues.New("either --backup or --backups flag is required")
}
return genericDeleteCommand(cmd, path.GroupsService, "Groups", backupIDValue, args)
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -497,8 +497,8 @@ func runGroupsDetailsCmdTest(suite *PreparedBackupGroupsE2ESuite, category path.
type BackupDeleteGroupsE2ESuite struct { type BackupDeleteGroupsE2ESuite struct {
tester.Suite tester.Suite
dpnd dependencies dpnd dependencies
backupOp operations.BackupOperation backupOps [3]operations.BackupOperation
} }
func TestBackupDeleteGroupsE2ESuite(t *testing.T) { func TestBackupDeleteGroupsE2ESuite(t *testing.T) {
@ -524,13 +524,15 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() {
sel := selectors.NewGroupsBackup(groups) sel := selectors.NewGroupsBackup(groups)
sel.Include(selTD.GroupsBackupChannelScope(sel)) sel.Include(selTD.GroupsBackupChannelScope(sel))
backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector) for i := 0; i < cap(suite.backupOps); i++ {
require.NoError(t, err, clues.ToCore(err)) backupOp, err := suite.dpnd.repo.NewBackup(ctx, sel.Selector)
require.NoError(t, err, clues.ToCore(err))
suite.backupOp = backupOp suite.backupOps[i] = backupOp
err = suite.backupOp.Run(ctx) err = suite.backupOps[i].Run(ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
}
} }
func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() { func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() {
@ -544,7 +546,10 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() {
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "groups", "backup", "delete", "groups",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) "--"+flags.BackupIDsFN,
fmt.Sprintf("%s,%s",
string(suite.backupOps[0].Results.BackupID),
string(suite.backupOps[1].Results.BackupID)))
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
// run the command // run the command
@ -555,7 +560,37 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd() {
cmd = cliTD.StubRootCmd( cmd = cliTD.StubRootCmd(
"backup", "details", "groups", "backup", "details", "groups",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--backup", string(suite.backupOp.Results.BackupID)) "--backups", string(suite.backupOps[0].Results.BackupID))
cli.BuildCommandTree(cmd)
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}
func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_SingleID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
ctx = config.SetViper(ctx, suite.dpnd.vpr)
defer flush()
cmd := cliTD.StubRootCmd(
"backup", "delete", "groups",
"--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN,
string(suite.backupOps[2].Results.BackupID))
cli.BuildCommandTree(cmd)
// run the command
err := cmd.ExecuteContext(ctx)
require.NoError(t, err, clues.ToCore(err))
// a follow-up details call should fail, due to the backup ID being deleted
cmd = cliTD.StubRootCmd(
"backup", "details", "groups",
"--config-file", suite.dpnd.configFilePath,
"--backup", string(suite.backupOps[2].Results.BackupID))
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
err = cmd.ExecuteContext(ctx) err = cmd.ExecuteContext(ctx)
@ -573,7 +608,7 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_UnknownID() {
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "groups", "backup", "delete", "groups",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, uuid.NewString()) "--"+flags.BackupIDsFN, uuid.NewString())
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
// unknown backupIDs should error since the modelStore can't find the backup // unknown backupIDs should error since the modelStore can't find the backup
@ -581,6 +616,24 @@ func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_UnknownID() {
require.Error(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
} }
func (suite *BackupDeleteGroupsE2ESuite) TestGroupsBackupDeleteCmd_NoBackupID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
ctx = config.SetViper(ctx, suite.dpnd.vpr)
defer flush()
cmd := cliTD.StubRootCmd(
"backup", "delete", "groups",
"--config-file", suite.dpnd.configFilePath)
cli.BuildCommandTree(cmd)
// empty backupIDs should error since no data provided
err := cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// helpers // helpers
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -26,7 +26,7 @@ import (
const ( const (
oneDriveServiceCommand = "onedrive" oneDriveServiceCommand = "onedrive"
oneDriveServiceCommandCreateUseSuffix = "--user <email> | '" + flags.Wildcard + "'" oneDriveServiceCommandCreateUseSuffix = "--user <email> | '" + flags.Wildcard + "'"
oneDriveServiceCommandDeleteUseSuffix = "--backup <backupId>" oneDriveServiceCommandDeleteUseSuffix = "--backups <backupId>"
oneDriveServiceCommandDetailsUseSuffix = "--backup <backupId>" oneDriveServiceCommandDetailsUseSuffix = "--backup <backupId>"
) )
@ -40,8 +40,9 @@ corso backup create onedrive --user alice@example.com,bob@example.com
# Backup all OneDrive data for all M365 users # Backup all OneDrive data for all M365 users
corso backup create onedrive --user '*'` corso backup create onedrive --user '*'`
oneDriveServiceCommandDeleteExamples = `# Delete OneDrive backup with ID 1234abcd-12ab-cd34-56de-1234abcd oneDriveServiceCommandDeleteExamples = `# Delete OneDrive backup with ID 1234abcd-12ab-cd34-56de-1234abcd \
corso backup delete onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd` and 1234abcd-12ab-cd34-56de-1234abce
corso backup delete onedrive --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce`
oneDriveServiceCommandDetailsExamples = `# Explore items in Bob's latest backup (1234abcd...) oneDriveServiceCommandDetailsExamples = `# Explore items in Bob's latest backup (1234abcd...)
corso backup details onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd corso backup details onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd
@ -100,7 +101,8 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
c.Use = c.Use + " " + oneDriveServiceCommandDeleteUseSuffix c.Use = c.Use + " " + oneDriveServiceCommandDeleteUseSuffix
c.Example = oneDriveServiceCommandDeleteExamples c.Example = oneDriveServiceCommandDeleteExamples
flags.AddBackupIDFlag(c, true) flags.AddMultipleBackupIDsFlag(c, false)
flags.AddBackupIDFlag(c, false)
} }
return c return c
@ -230,17 +232,19 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeOneDriveOpts(cmd) opts := utils.MakeOneDriveOpts(cmd)
r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
ctx,
cmd,
path.OneDriveService)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
ds, err := runDetailsOneDriveCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) ds, err := runDetailsOneDriveCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
@ -306,5 +310,15 @@ func oneDriveDeleteCmd() *cobra.Command {
// deletes a oneDrive service backup. // deletes a oneDrive service backup.
func deleteOneDriveCmd(cmd *cobra.Command, args []string) error { func deleteOneDriveCmd(cmd *cobra.Command, args []string) error {
return genericDeleteCommand(cmd, path.OneDriveService, flags.BackupIDFV, "OneDrive", args) backupIDValue := []string{}
if len(flags.BackupIDsFV) > 0 {
backupIDValue = flags.BackupIDsFV
} else if len(flags.BackupIDFV) > 0 {
backupIDValue = append(backupIDValue, flags.BackupIDFV)
} else {
return clues.New("either --backup or --backups flag is required")
}
return genericDeleteCommand(cmd, path.OneDriveService, "OneDrive", backupIDValue, args)
} }

View File

@ -121,8 +121,8 @@ func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupCmd_userNotInTenant() {
type BackupDeleteOneDriveE2ESuite struct { type BackupDeleteOneDriveE2ESuite struct {
tester.Suite tester.Suite
dpnd dependencies dpnd dependencies
backupOp operations.BackupOperation backupOps [3]operations.BackupOperation
} }
func TestBackupDeleteOneDriveE2ESuite(t *testing.T) { func TestBackupDeleteOneDriveE2ESuite(t *testing.T) {
@ -151,13 +151,15 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() {
sel := selectors.NewOneDriveBackup(users) sel := selectors.NewOneDriveBackup(users)
sel.Include(selTD.OneDriveBackupFolderScope(sel)) sel.Include(selTD.OneDriveBackupFolderScope(sel))
backupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins) for i := 0; i < cap(suite.backupOps); i++ {
require.NoError(t, err, clues.ToCore(err)) backupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins)
require.NoError(t, err, clues.ToCore(err))
suite.backupOp = backupOp suite.backupOps[i] = backupOp
err = suite.backupOp.Run(ctx) err = suite.backupOps[i].Run(ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
}
} }
func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() { func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() {
@ -173,7 +175,10 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() {
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "onedrive", "backup", "delete", "onedrive",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) "--"+flags.BackupIDsFN,
fmt.Sprintf("%s,%s",
string(suite.backupOps[0].Results.BackupID),
string(suite.backupOps[1].Results.BackupID)))
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
cmd.SetErr(&suite.dpnd.recorder) cmd.SetErr(&suite.dpnd.recorder)
@ -187,13 +192,57 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd() {
assert.True(t, assert.True(t,
strings.HasSuffix( strings.HasSuffix(
result, result,
fmt.Sprintf("Deleted OneDrive backup %s\n", string(suite.backupOp.Results.BackupID)))) fmt.Sprintf("Deleted OneDrive backup [%s %s]\n",
string(suite.backupOps[0].Results.BackupID),
string(suite.backupOps[1].Results.BackupID))))
// a follow-up details call should fail, due to the backup ID being deleted // a follow-up details call should fail, due to the backup ID being deleted
cmd = cliTD.StubRootCmd( cmd = cliTD.StubRootCmd(
"backup", "details", "onedrive", "backup", "details", "onedrive",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--backup", string(suite.backupOp.Results.BackupID)) "--backups", string(suite.backupOps[0].Results.BackupID))
cli.BuildCommandTree(cmd)
err = cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}
func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_SingleID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
ctx = config.SetViper(ctx, suite.dpnd.vpr)
defer flush()
suite.dpnd.recorder.Reset()
cmd := cliTD.StubRootCmd(
"backup", "delete", "onedrive",
"--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN,
string(suite.backupOps[2].Results.BackupID))
cli.BuildCommandTree(cmd)
cmd.SetErr(&suite.dpnd.recorder)
ctx = print.SetRootCmd(ctx, cmd)
// run the command
err := cmd.ExecuteContext(ctx)
require.NoError(t, err, clues.ToCore(err))
result := suite.dpnd.recorder.String()
assert.True(t,
strings.HasSuffix(
result,
fmt.Sprintf("Deleted OneDrive backup [%s]\n",
string(suite.backupOps[2].Results.BackupID))))
// a follow-up details call should fail, due to the backup ID being deleted
cmd = cliTD.StubRootCmd(
"backup", "details", "onedrive",
"--config-file", suite.dpnd.configFilePath,
"--backup", string(suite.backupOps[0].Results.BackupID))
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
err = cmd.ExecuteContext(ctx) err = cmd.ExecuteContext(ctx)
@ -211,10 +260,28 @@ func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_unknownID
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "onedrive", "backup", "delete", "onedrive",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, uuid.NewString()) "--"+flags.BackupIDsFN, uuid.NewString())
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
// unknown backupIDs should error since the modelStore can't find the backup // unknown backupIDs should error since the modelStore can't find the backup
err := cmd.ExecuteContext(ctx) err := cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
} }
func (suite *BackupDeleteOneDriveE2ESuite) TestOneDriveBackupDeleteCmd_NoBackupID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
ctx = config.SetViper(ctx, suite.dpnd.vpr)
defer flush()
cmd := cliTD.StubRootCmd(
"backup", "delete", "onedrive",
"--config-file", suite.dpnd.configFilePath)
cli.BuildCommandTree(cmd)
// empty backupIDs should error since no data provided
err := cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}

View File

@ -30,7 +30,7 @@ import (
const ( const (
sharePointServiceCommand = "sharepoint" sharePointServiceCommand = "sharepoint"
sharePointServiceCommandCreateUseSuffix = "--site <siteURL> | '" + flags.Wildcard + "'" sharePointServiceCommandCreateUseSuffix = "--site <siteURL> | '" + flags.Wildcard + "'"
sharePointServiceCommandDeleteUseSuffix = "--backup <backupId>" sharePointServiceCommandDeleteUseSuffix = "--backups <backupId>"
sharePointServiceCommandDetailsUseSuffix = "--backup <backupId>" sharePointServiceCommandDetailsUseSuffix = "--backup <backupId>"
) )
@ -44,8 +44,9 @@ corso backup create sharepoint --site https://example.com/hr,https://example.com
# Backup all SharePoint data for all Sites # Backup all SharePoint data for all Sites
corso backup create sharepoint --site '*'` corso backup create sharepoint --site '*'`
sharePointServiceCommandDeleteExamples = `# Delete SharePoint backup with ID 1234abcd-12ab-cd34-56de-1234abcd sharePointServiceCommandDeleteExamples = `# Delete SharePoint backup with ID 1234abcd-12ab-cd34-56de-1234abcd \
corso backup delete sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd` and 1234abcd-12ab-cd34-56de-1234abce
corso backup delete sharepoint --backups 1234abcd-12ab-cd34-56de-1234abcd,1234abcd-12ab-cd34-56de-1234abce`
sharePointServiceCommandDetailsExamples = `# Explore items in the HR site's latest backup (1234abcd...) sharePointServiceCommandDetailsExamples = `# Explore items in the HR site's latest backup (1234abcd...)
corso backup details sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd corso backup details sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd
@ -111,7 +112,8 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
c.Use = c.Use + " " + sharePointServiceCommandDeleteUseSuffix c.Use = c.Use + " " + sharePointServiceCommandDeleteUseSuffix
c.Example = sharePointServiceCommandDeleteExamples c.Example = sharePointServiceCommandDeleteExamples
flags.AddBackupIDFlag(c, true) flags.AddMultipleBackupIDsFlag(c, false)
flags.AddBackupIDFlag(c, false)
} }
return c return c
@ -284,7 +286,17 @@ func sharePointDeleteCmd() *cobra.Command {
// deletes a sharePoint service backup. // deletes a sharePoint service backup.
func deleteSharePointCmd(cmd *cobra.Command, args []string) error { func deleteSharePointCmd(cmd *cobra.Command, args []string) error {
return genericDeleteCommand(cmd, path.SharePointService, flags.BackupIDFV, "SharePoint", args) backupIDValue := []string{}
if len(flags.BackupIDsFV) > 0 {
backupIDValue = flags.BackupIDsFV
} else if len(flags.BackupIDFV) > 0 {
backupIDValue = append(backupIDValue, flags.BackupIDFV)
} else {
return clues.New("either --backup or --backups flag is required")
}
return genericDeleteCommand(cmd, path.SharePointService, "SharePoint", backupIDValue, args)
} }
// ------------------------------------------------------------------------------------------------ // ------------------------------------------------------------------------------------------------
@ -315,17 +327,19 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
opts := utils.MakeSharePointOpts(cmd) opts := utils.MakeSharePointOpts(cmd)
r, _, _, ctrlOpts, err := utils.GetAccountAndConnectWithOverrides( r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService)
ctx,
cmd,
path.SharePointService)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }
defer utils.CloseRepo(ctx, r) defer utils.CloseRepo(ctx, r)
ds, err := runDetailsSharePointCmd(ctx, r, flags.BackupIDFV, opts, ctrlOpts.SkipReduce) ds, err := runDetailsSharePointCmd(
ctx,
r,
flags.BackupIDFV,
opts,
rdao.Opts.SkipReduce)
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }

View File

@ -84,8 +84,9 @@ func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() {
type BackupDeleteSharePointE2ESuite struct { type BackupDeleteSharePointE2ESuite struct {
tester.Suite tester.Suite
dpnd dependencies dpnd dependencies
backupOp operations.BackupOperation backupOp operations.BackupOperation
secondaryBackupOp operations.BackupOperation
} }
func TestBackupDeleteSharePointE2ESuite(t *testing.T) { func TestBackupDeleteSharePointE2ESuite(t *testing.T) {
@ -121,6 +122,15 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() {
err = suite.backupOp.Run(ctx) err = suite.backupOp.Run(ctx)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
// secondary backup
secondaryBackupOp, err := suite.dpnd.repo.NewBackupWithLookup(ctx, sel.Selector, ins)
require.NoError(t, err, clues.ToCore(err))
suite.secondaryBackupOp = secondaryBackupOp
err = suite.secondaryBackupOp.Run(ctx)
require.NoError(t, err, clues.ToCore(err))
} }
func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd() { func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd() {
@ -136,7 +146,10 @@ func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd() {
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "sharepoint", "backup", "delete", "sharepoint",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, string(suite.backupOp.Results.BackupID)) "--"+flags.BackupIDsFN,
fmt.Sprintf("%s,%s",
string(suite.backupOp.Results.BackupID),
string(suite.secondaryBackupOp.Results.BackupID)))
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
cmd.SetErr(&suite.dpnd.recorder) cmd.SetErr(&suite.dpnd.recorder)
@ -150,7 +163,9 @@ func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd() {
assert.True(t, assert.True(t,
strings.HasSuffix( strings.HasSuffix(
result, result,
fmt.Sprintf("Deleted SharePoint backup %s\n", string(suite.backupOp.Results.BackupID)))) fmt.Sprintf("Deleted SharePoint backup [%s %s]\n",
string(suite.backupOp.Results.BackupID),
string(suite.secondaryBackupOp.Results.BackupID))))
} }
// moved out of the func above to make the linter happy // moved out of the func above to make the linter happy
@ -175,10 +190,28 @@ func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd_unkno
cmd := cliTD.StubRootCmd( cmd := cliTD.StubRootCmd(
"backup", "delete", "sharepoint", "backup", "delete", "sharepoint",
"--config-file", suite.dpnd.configFilePath, "--config-file", suite.dpnd.configFilePath,
"--"+flags.BackupFN, uuid.NewString()) "--"+flags.BackupIDsFN, uuid.NewString())
cli.BuildCommandTree(cmd) cli.BuildCommandTree(cmd)
// unknown backupIDs should error since the modelStore can't find the backup // unknown backupIDs should error since the modelStore can't find the backup
err := cmd.ExecuteContext(ctx) err := cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err)) require.Error(t, err, clues.ToCore(err))
} }
func (suite *BackupDeleteSharePointE2ESuite) TestSharePointBackupDeleteCmd_NoBackupID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
ctx = config.SetViper(ctx, suite.dpnd.vpr)
defer flush()
cmd := cliTD.StubRootCmd(
"backup", "delete", "groups",
"--config-file", suite.dpnd.configFilePath)
cli.BuildCommandTree(cmd)
// empty backupIDs should error since no data provided
err := cmd.ExecuteContext(ctx)
require.Error(t, err, clues.ToCore(err))
}

View File

@ -67,10 +67,7 @@ func runExport(
return Only(ctx, err) return Only(ctx, err)
} }
r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( r, _, err := utils.GetAccountAndConnect(ctx, cmd, sel.PathService())
ctx,
cmd,
sel.PathService())
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }

View File

@ -6,6 +6,7 @@ import (
const ( const (
BackupFN = "backup" BackupFN = "backup"
BackupIDsFN = "backups"
AWSAccessKeyFN = "aws-access-key" AWSAccessKeyFN = "aws-access-key"
AWSSecretAccessKeyFN = "aws-secret-access-key" AWSSecretAccessKeyFN = "aws-secret-access-key"
AWSSessionTokenFN = "aws-session-token" AWSSessionTokenFN = "aws-session-token"
@ -18,6 +19,7 @@ const (
var ( var (
BackupIDFV string BackupIDFV string
BackupIDsFV []string
AWSAccessKeyFV string AWSAccessKeyFV string
AWSSecretAccessKeyFV string AWSSecretAccessKeyFV string
AWSSessionTokenFV string AWSSessionTokenFV string
@ -26,6 +28,18 @@ var (
SucceedIfExistsFV bool SucceedIfExistsFV bool
) )
// AddMultipleBackupIDsFlag adds the --backups flag.
func AddMultipleBackupIDsFlag(cmd *cobra.Command, require bool) {
cmd.Flags().StringSliceVar(
&BackupIDsFV,
BackupIDsFN, nil,
"',' separated IDs of the backup to retrieve")
if require {
cobra.CheckErr(cmd.MarkFlagRequired(BackupIDsFN))
}
}
// AddBackupIDFlag adds the --backup flag. // AddBackupIDFlag adds the --backup flag.
func AddBackupIDFlag(cmd *cobra.Command, require bool) { func AddBackupIDFlag(cmd *cobra.Command, require bool) {
cmd.Flags().StringVar(&BackupIDFV, BackupFN, "", "ID of the backup to retrieve.") cmd.Flags().StringVar(&BackupIDFV, BackupFN, "", "ID of the backup to retrieve.")

View File

@ -86,7 +86,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
) )
// init the repo first // init the repo first
r, err := repository.New( suite.repo, err = repository.New(
ctx, ctx,
suite.acct, suite.acct,
suite.st, suite.st,
@ -94,7 +94,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
repository.NewRepoID) repository.NewRepoID)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
err = r.Initialize(ctx, ctrlRepo.Retention{}) err = suite.repo.Initialize(ctx, ctrlRepo.Retention{})
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation) suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)

View File

@ -46,7 +46,7 @@ const (
corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef
# Restore the file with ID 98765abcdef without its associated permissions # Restore the file with ID 98765abcdef without its associated permissions
corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --skip-permissions corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --no-permissions
# Restore all files named "FY2021 Planning.xlsx" # Restore all files named "FY2021 Planning.xlsx"
corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file "FY2021 Planning.xlsx" corso restore groups --backup 1234abcd-12ab-cd34-56de-1234abcd --file "FY2021 Planning.xlsx"

View File

@ -100,10 +100,7 @@ func runRestore(
return Only(ctx, err) return Only(ctx, err)
} }
r, _, _, _, err := utils.GetAccountAndConnectWithOverrides( r, _, err := utils.GetAccountAndConnect(ctx, cmd, sel.PathService())
ctx,
cmd,
sel.PathService())
if err != nil { if err != nil {
return Only(ctx, err) return Only(ctx, err)
} }

View File

@ -22,30 +22,35 @@ import (
"github.com/alcionai/corso/src/pkg/storage" "github.com/alcionai/corso/src/pkg/storage"
) )
type RepoDetailsAndOpts struct {
Repo config.RepoDetails
Opts control.Options
}
var ErrNotYetImplemented = clues.New("not yet implemented") var ErrNotYetImplemented = clues.New("not yet implemented")
// GetAccountAndConnectWithOverrides is a wrapper for GetAccountAndConnect // GetAccountAndConnect is a wrapper for GetAccountAndConnectWithOverrides
// that also gets the storage provider and any storage provider specific // that automatically gets the storage provider and any storage provider specific
// flag overrides from the command line. // flag overrides from the command line.
func GetAccountAndConnectWithOverrides( func GetAccountAndConnect(
ctx context.Context, ctx context.Context,
cmd *cobra.Command, cmd *cobra.Command,
pst path.ServiceType, pst path.ServiceType,
) (repository.Repositoryer, *storage.Storage, *account.Account, *control.Options, error) { ) (repository.Repositoryer, RepoDetailsAndOpts, error) {
provider, overrides, err := GetStorageProviderAndOverrides(ctx, cmd) provider, overrides, err := GetStorageProviderAndOverrides(ctx, cmd)
if err != nil { if err != nil {
return nil, nil, nil, nil, clues.Stack(err) return nil, RepoDetailsAndOpts{}, clues.Stack(err)
} }
return GetAccountAndConnect(ctx, pst, provider, overrides) return GetAccountAndConnectWithOverrides(ctx, pst, provider, overrides)
} }
func GetAccountAndConnect( func GetAccountAndConnectWithOverrides(
ctx context.Context, ctx context.Context,
pst path.ServiceType, pst path.ServiceType,
provider storage.ProviderType, provider storage.ProviderType,
overrides map[string]string, overrides map[string]string,
) (repository.Repositoryer, *storage.Storage, *account.Account, *control.Options, error) { ) (repository.Repositoryer, RepoDetailsAndOpts, error) {
cfg, err := config.GetConfigRepoDetails( cfg, err := config.GetConfigRepoDetails(
ctx, ctx,
provider, provider,
@ -53,7 +58,7 @@ func GetAccountAndConnect(
true, true,
overrides) overrides)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, RepoDetailsAndOpts{}, err
} }
repoID := cfg.RepoID repoID := cfg.RepoID
@ -70,20 +75,25 @@ func GetAccountAndConnect(
opts, opts,
repoID) repoID)
if err != nil { if err != nil {
return nil, nil, nil, nil, clues.Wrap(err, "creating a repository controller") return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller")
} }
if err := r.Connect(ctx); err != nil { if err := r.Connect(ctx); err != nil {
return nil, nil, nil, nil, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository") return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository")
} }
// this initializes our graph api client configurations, // this initializes our graph api client configurations,
// including control options such as concurency limitations. // including control options such as concurency limitations.
if _, err := r.ConnectToM365(ctx, pst); err != nil { if _, err := r.ConnectToM365(ctx, pst); err != nil {
return nil, nil, nil, nil, clues.Wrap(err, "connecting to m365") return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365")
} }
return r, &cfg.Storage, &cfg.Account, &opts, nil rdao := RepoDetailsAndOpts{
Repo: cfg,
Opts: opts,
}
return r, rdao, nil
} }
func AccountConnectAndWriteRepoConfig( func AccountConnectAndWriteRepoConfig(
@ -91,22 +101,19 @@ func AccountConnectAndWriteRepoConfig(
cmd *cobra.Command, cmd *cobra.Command,
pst path.ServiceType, pst path.ServiceType,
) (repository.Repositoryer, *account.Account, error) { ) (repository.Repositoryer, *account.Account, error) {
r, stg, acc, opts, err := GetAccountAndConnectWithOverrides( r, rdao, err := GetAccountAndConnect(ctx, cmd, pst)
ctx,
cmd,
pst)
if err != nil { if err != nil {
logger.CtxErr(ctx, err).Info("getting and connecting account") logger.CtxErr(ctx, err).Info("getting and connecting account")
return nil, nil, err return nil, nil, err
} }
sc, err := stg.StorageConfig() sc, err := rdao.Repo.Storage.StorageConfig()
if err != nil { if err != nil {
logger.CtxErr(ctx, err).Info("getting storage configuration") logger.CtxErr(ctx, err).Info("getting storage configuration")
return nil, nil, err return nil, nil, err
} }
m365Config, err := acc.M365Config() m365Config, err := rdao.Repo.Account.M365Config()
if err != nil { if err != nil {
logger.CtxErr(ctx, err).Info("getting m365 configuration") logger.CtxErr(ctx, err).Info("getting m365 configuration")
return nil, nil, err return nil, nil, err
@ -114,13 +121,13 @@ func AccountConnectAndWriteRepoConfig(
// repo config gets set during repo connect and init. // repo config gets set during repo connect and init.
// This call confirms we have the correct values. // This call confirms we have the correct values.
err = config.WriteRepoConfig(ctx, sc, m365Config, opts.Repo, r.GetID()) err = config.WriteRepoConfig(ctx, sc, m365Config, rdao.Opts.Repo, r.GetID())
if err != nil { if err != nil {
logger.CtxErr(ctx, err).Info("writing to repository configuration") logger.CtxErr(ctx, err).Info("writing to repository configuration")
return nil, nil, err return nil, nil, err
} }
return r, acc, nil return r, &rdao.Repo.Account, nil
} }
// CloseRepo handles closing a repo. // CloseRepo handles closing a repo.

View File

@ -31,11 +31,7 @@ func deleteBackups(
) ([]string, error) { ) ([]string, error) {
ctx = clues.Add(ctx, "cutoff_days", deletionDays) ctx = clues.Add(ctx, "cutoff_days", deletionDays)
r, _, _, _, err := utils.GetAccountAndConnect( r, _, err := utils.GetAccountAndConnectWithOverrides(ctx, service, storage.ProviderS3, nil)
ctx,
service,
storage.ProviderS3,
nil)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "connecting to account").WithClues(ctx) return nil, clues.Wrap(err, "connecting to account").WithClues(ctx)
} }

View File

@ -21,7 +21,7 @@ require (
github.com/microsoftgraph/msgraph-sdk-go v1.19.0 github.com/microsoftgraph/msgraph-sdk-go v1.19.0
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/puzpuzpuz/xsync/v2 v2.5.0 github.com/puzpuzpuz/xsync/v2 v2.5.1
github.com/rudderlabs/analytics-go v3.3.3+incompatible github.com/rudderlabs/analytics-go v3.3.3+incompatible
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1
github.com/spf13/cast v1.5.1 github.com/spf13/cast v1.5.1

View File

@ -360,8 +360,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/puzpuzpuz/xsync/v2 v2.5.0 h1:2k4qrO/orvmEXZ3hmtHqIy9XaQtPTwzMZk1+iErpE8c= github.com/puzpuzpuz/xsync/v2 v2.5.1 h1:mVGYAvzDSu52+zaGyNjC+24Xw2bQi3kTr4QJ6N9pIIU=
github.com/puzpuzpuz/xsync/v2 v2.5.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= github.com/puzpuzpuz/xsync/v2 v2.5.1/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=

View File

@ -95,6 +95,10 @@ var (
// https://learn.microsoft.com/en-us/graph/errors#code-property // https://learn.microsoft.com/en-us/graph/errors#code-property
ErrInvalidDelta = clues.New("invalid delta token") ErrInvalidDelta = clues.New("invalid delta token")
// Not all systems support delta queries. This must be handled separately
// from invalid delta token cases.
ErrDeltaNotSupported = clues.New("delta not supported")
// ErrItemAlreadyExistsConflict denotes that a post or put attempted to create // ErrItemAlreadyExistsConflict denotes that a post or put attempted to create
// an item which already exists by some unique identifier. The identifier is // an item which already exists by some unique identifier. The identifier is
// not always the id. For example, in onedrive, this error can be produced // not always the id. For example, in onedrive, this error can be produced
@ -122,8 +126,8 @@ var (
) )
func IsErrApplicationThrottled(err error) bool { func IsErrApplicationThrottled(err error) bool {
return hasErrorCode(err, applicationThrottled) || return errors.Is(err, ErrApplicationThrottled) ||
errors.Is(err, ErrApplicationThrottled) hasErrorCode(err, applicationThrottled)
} }
func IsErrAuthenticationError(err error) bool { func IsErrAuthenticationError(err error) bool {
@ -151,9 +155,13 @@ func IsErrItemNotFound(err error) bool {
} }
func IsErrInvalidDelta(err error) bool { func IsErrInvalidDelta(err error) bool {
return hasErrorCode(err, syncStateNotFound, resyncRequired, syncStateInvalid) || return errors.Is(err, ErrInvalidDelta) ||
hasErrorMessage(err, parameterDeltaTokenNotSupported) || hasErrorCode(err, syncStateNotFound, resyncRequired, syncStateInvalid)
errors.Is(err, ErrInvalidDelta) }
func IsErrDeltaNotSupported(err error) bool {
return errors.Is(err, ErrDeltaNotSupported) ||
hasErrorMessage(err, parameterDeltaTokenNotSupported)
} }
func IsErrQuotaExceeded(err error) bool { func IsErrQuotaExceeded(err error) bool {
@ -190,7 +198,8 @@ func IsErrCannotOpenFileAttachment(err error) bool {
} }
func IsErrAccessDenied(err error) bool { func IsErrAccessDenied(err error) bool {
return hasErrorCode(err, ErrorAccessDenied) || clues.HasLabel(err, LabelStatus(http.StatusForbidden)) return hasErrorCode(err, ErrorAccessDenied) ||
clues.HasLabel(err, LabelStatus(http.StatusForbidden))
} }
func IsErrTimeout(err error) bool { func IsErrTimeout(err error) bool {
@ -218,8 +227,8 @@ func IsErrUnauthorized(err error) bool {
} }
func IsErrItemAlreadyExistsConflict(err error) bool { func IsErrItemAlreadyExistsConflict(err error) bool {
return hasErrorCode(err, nameAlreadyExists) || return errors.Is(err, ErrItemAlreadyExistsConflict) ||
errors.Is(err, ErrItemAlreadyExistsConflict) hasErrorCode(err, nameAlreadyExists)
} }
// LabelStatus transforms the provided statusCode into // LabelStatus transforms the provided statusCode into
@ -298,7 +307,7 @@ func hasErrorMessage(err error, msgs ...errorMessage) bool {
cs[i] = string(c) cs[i] = string(c)
} }
return filters.Contains(cs).Compare(msg) return filters.In(cs).Compare(msg)
} }
// Wrap is a helper function that extracts ODataError metadata from // Wrap is a helper function that extracts ODataError metadata from

View File

@ -247,11 +247,6 @@ func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() {
err: odErr(string(syncStateInvalid)), err: odErr(string(syncStateInvalid)),
expect: assert.True, expect: assert.True,
}, },
{
name: "deltatoken not supported oDataErrMsg",
err: odErrMsg("fnords", string(parameterDeltaTokenNotSupported)),
expect: assert.True,
},
// next two tests are to make sure the checks are case insensitive // next two tests are to make sure the checks are case insensitive
{ {
name: "resync-required oDataErr camelcase", name: "resync-required oDataErr camelcase",
@ -271,6 +266,55 @@ func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() {
} }
} }
func (suite *GraphErrorsUnitSuite) TestIsErrDeltaNotSupported() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "as",
err: ErrDeltaNotSupported,
expect: assert.True,
},
{
name: "non-matching oDataErr",
err: odErr("fnords"),
expect: assert.False,
},
{
name: "non-matching oDataErrMsg",
err: odErrMsg("fnords", "deltatoken not supported"),
expect: assert.False,
},
{
name: "deltatoken not supported oDataErrMsg",
err: odErrMsg("fnords", string(parameterDeltaTokenNotSupported)),
expect: assert.True,
},
{
name: "deltatoken not supported oDataErrMsg with punctuation",
err: odErrMsg("fnords", string(parameterDeltaTokenNotSupported)+"."),
expect: assert.True,
},
}
for _, test := range table {
suite.Run(test.name, func() {
test.expect(suite.T(), IsErrDeltaNotSupported(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrQuotaExceeded() { func (suite *GraphErrorsUnitSuite) TestIsErrQuotaExceeded() {
table := []struct { table := []struct {
name string name string

View File

@ -105,7 +105,7 @@ func (hw httpWrapper) Request(
// retry in the event of a `stream error`, which is not // retry in the event of a `stream error`, which is not
// a common expectation. // a common expectation.
for i := 0; i < hw.config.maxConnectionRetries+1; i++ { for i := 0; i < hw.config.maxConnectionRetries+1; i++ {
ictx := clues.Add(ctx, "request_retry_iter", i) ctx = clues.Add(ctx, "request_retry_iter", i)
resp, err = hw.client.Do(req) resp, err = hw.client.Do(req)
@ -114,15 +114,15 @@ func (hw httpWrapper) Request(
} }
if IsErrApplicationThrottled(err) { if IsErrApplicationThrottled(err) {
return nil, Stack(ictx, clues.Stack(ErrApplicationThrottled, err)) return nil, Stack(ctx, clues.Stack(ErrApplicationThrottled, err))
} }
var http2StreamErr http2.StreamError var http2StreamErr http2.StreamError
if !errors.As(err, &http2StreamErr) { if !errors.As(err, &http2StreamErr) {
return nil, Stack(ictx, err) return nil, Stack(ctx, err)
} }
logger.Ctx(ictx).Debug("http2 stream error") logger.Ctx(ctx).Debug("http2 stream error")
events.Inc(events.APICall, "streamerror") events.Inc(events.APICall, "streamerror")
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
@ -132,6 +132,8 @@ func (hw httpWrapper) Request(
return nil, Stack(ctx, err) return nil, Stack(ctx, err)
} }
logResp(ctx, resp)
return resp, nil return resp, nil
} }

View File

@ -0,0 +1,72 @@
package graph
import (
"context"
"net/http"
"net/http/httputil"
"os"
"github.com/alcionai/corso/src/pkg/logger"
)
const (
// 1 MB
logMBLimit = 1 * 1024 * 1024
logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS"
log2xxGraphRequestsEnvKey = "LOG_2XX_GRAPH_REQUESTS"
log2xxGraphResponseEnvKey = "LOG_2XX_GRAPH_RESPONSES"
)
// special cases where we always dump the response body, since the response
// details might be critical to understanding the response when debugging.
func shouldLogRespBody(resp *http.Response) bool {
return logger.DebugAPIFV ||
os.Getenv(logGraphRequestsEnvKey) != "" ||
resp.StatusCode == http.StatusBadRequest ||
resp.StatusCode == http.StatusForbidden ||
resp.StatusCode == http.StatusConflict
}
func logResp(ctx context.Context, resp *http.Response) {
var (
log = logger.Ctx(ctx)
respClass = resp.StatusCode / 100
// special cases where we always dump the response body, since the response
// details might be critical to understanding the response when debugging.
logBody = shouldLogRespBody(resp)
)
// special case: always info-level status 429 logs
if resp.StatusCode == http.StatusTooManyRequests {
log.With("response", getRespDump(ctx, resp, logBody)).
Info("graph api throttling")
return
}
// Log api calls according to api debugging configurations.
switch respClass {
case 2:
if logBody {
// only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log.
dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit)
log.Infow("2xx graph api resp", "response", dump)
}
case 3:
log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))).
With("response", getRespDump(ctx, resp, false)).
Info("graph api redirect: " + resp.Status)
default:
log.With("response", getRespDump(ctx, resp, logBody)).
Error("graph api error: " + resp.Status)
}
}
func getRespDump(ctx context.Context, resp *http.Response, getBody bool) string {
respDump, err := httputil.DumpResponse(resp, getBody)
if err != nil {
logger.CtxErr(ctx, err).Error("dumping http response")
}
return string(respDump)
}

View File

@ -4,8 +4,6 @@ import (
"context" "context"
"io" "io"
"net/http" "net/http"
"net/http/httputil"
"os"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -115,9 +113,6 @@ func LoggableURL(url string) pii.SafeURL {
} }
} }
// 1 MB
const logMBLimit = 1 * 1048576
func (mw *LoggingMiddleware) Intercept( func (mw *LoggingMiddleware) Intercept(
pipeline khttp.Pipeline, pipeline khttp.Pipeline,
middlewareIndex int, middlewareIndex int,
@ -138,56 +133,11 @@ func (mw *LoggingMiddleware) Intercept(
"resp_status_code", resp.StatusCode, "resp_status_code", resp.StatusCode,
"resp_content_len", resp.ContentLength) "resp_content_len", resp.ContentLength)
var ( logResp(ctx, resp)
log = logger.Ctx(ctx)
respClass = resp.StatusCode / 100
// special cases where we always dump the response body, since the response
// details might be critical to understanding the response when debugging.
logBody = logger.DebugAPIFV ||
os.Getenv(logGraphRequestsEnvKey) != "" ||
resp.StatusCode == http.StatusBadRequest ||
resp.StatusCode == http.StatusForbidden ||
resp.StatusCode == http.StatusConflict
)
// special case: always info-level status 429 logs
if resp.StatusCode == http.StatusTooManyRequests {
log.With("response", getRespDump(ctx, resp, logBody)).
Info("graph api throttling")
return resp, err
}
// Log api calls according to api debugging configurations.
switch respClass {
case 2:
if logBody {
// only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log.
dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit)
log.Infow("2xx graph api resp", "response", dump)
}
case 3:
log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))).
With("response", getRespDump(ctx, resp, false)).
Info("graph api redirect: " + resp.Status)
default:
log.With("response", getRespDump(ctx, resp, logBody)).
Error("graph api error: " + resp.Status)
}
return resp, err return resp, err
} }
func getRespDump(ctx context.Context, resp *http.Response, getBody bool) string {
respDump, err := httputil.DumpResponse(resp, getBody)
if err != nil {
logger.CtxErr(ctx, err).Error("dumping http response")
}
return string(respDump)
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Retry & Backoff // Retry & Backoff
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -23,18 +23,15 @@ import (
) )
const ( const (
logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS" defaultMaxRetries = 3
log2xxGraphRequestsEnvKey = "LOG_2XX_GRAPH_REQUESTS" defaultDelay = 3 * time.Second
log2xxGraphResponseEnvKey = "LOG_2XX_GRAPH_RESPONSES" locationHeader = "Location"
defaultMaxRetries = 3 rateLimitHeader = "RateLimit-Limit"
defaultDelay = 3 * time.Second rateRemainingHeader = "RateLimit-Remaining"
locationHeader = "Location" rateResetHeader = "RateLimit-Reset"
rateLimitHeader = "RateLimit-Limit" retryAfterHeader = "Retry-After"
rateRemainingHeader = "RateLimit-Remaining" retryAttemptHeader = "Retry-Attempt"
rateResetHeader = "RateLimit-Reset" defaultHTTPClientTimeout = 1 * time.Hour
retryAfterHeader = "Retry-After"
retryAttemptHeader = "Retry-Attempt"
defaultHTTPClientTimeout = 1 * time.Hour
) )
type QueryParams struct { type QueryParams struct {

View File

@ -7,10 +7,11 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
type getSiteRooter interface { type getSiteRooter interface {
GetRoot(ctx context.Context) (models.Siteable, error) GetRoot(ctx context.Context, cc api.CallConfig) (models.Siteable, error)
} }
func IsServiceEnabled( func IsServiceEnabled(
@ -18,7 +19,7 @@ func IsServiceEnabled(
gsr getSiteRooter, gsr getSiteRooter,
resource string, resource string,
) (bool, error) { ) (bool, error) {
_, err := gsr.GetRoot(ctx) _, err := gsr.GetRoot(ctx, api.CallConfig{})
if err != nil { if err != nil {
if clues.HasLabel(err, graph.LabelsNoSharePointLicense) { if clues.HasLabel(err, graph.LabelsNoSharePointLicense) {
return false, nil return false, nil

View File

@ -12,6 +12,7 @@ import (
"github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/services/m365/api"
) )
type EnabledUnitSuite struct { type EnabledUnitSuite struct {
@ -29,7 +30,10 @@ type mockGSR struct {
err error err error
} }
func (m mockGSR) GetRoot(context.Context) (models.Siteable, error) { func (m mockGSR) GetRoot(
context.Context,
api.CallConfig,
) (models.Siteable, error) {
return m.response, m.err return m.response, m.err
} }

View File

@ -138,6 +138,11 @@ func deltaEnumerateItems[T any](
// Loop through all pages returned by Graph API. // Loop through all pages returned by Graph API.
for len(nextLink) > 0 { for len(nextLink) > 0 {
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC))
if graph.IsErrDeltaNotSupported(err) {
logger.Ctx(ctx).Infow("delta queries not supported")
return nil, DeltaUpdate{}, clues.Stack(graph.ErrDeltaNotSupported, err)
}
if graph.IsErrInvalidDelta(err) { if graph.IsErrInvalidDelta(err) {
logger.Ctx(ctx).Infow("invalid previous delta", "delta_link", prevDeltaLink) logger.Ctx(ctx).Infow("invalid previous delta", "delta_link", prevDeltaLink)
@ -191,7 +196,7 @@ func getAddedAndRemovedItemIDs[T any](
) (map[string]time.Time, bool, []string, DeltaUpdate, error) { ) (map[string]time.Time, bool, []string, DeltaUpdate, error) {
if canMakeDeltaQueries { if canMakeDeltaQueries {
ts, du, err := deltaEnumerateItems[T](ctx, deltaPager, prevDeltaLink) ts, du, err := deltaEnumerateItems[T](ctx, deltaPager, prevDeltaLink)
if err != nil && (!graph.IsErrInvalidDelta(err) || len(prevDeltaLink) == 0) { if err != nil && !graph.IsErrInvalidDelta(err) && !graph.IsErrDeltaNotSupported(err) {
return nil, false, nil, DeltaUpdate{}, graph.Stack(ctx, err) return nil, false, nil, DeltaUpdate{}, graph.Stack(ctx, err)
} }
@ -284,7 +289,15 @@ func addedAndRemovedByDeletedDateTime[T any](
var modTime time.Time var modTime time.Time
if mt, ok := giaddt.(getModTimer); ok { if mt, ok := giaddt.(getModTimer); ok {
modTime = ptr.Val(mt.GetLastModifiedDateTime()) // Make sure to get a non-zero mod time if the item doesn't have one for
// some reason. Otherwise we can hit an issue where kopia has a
// different mod time for the file than the details does. This occurs
// due to a conversion kopia does on the time from
// time.Time -> nanoseconds for serialization. During incremental
// backups, kopia goes from nanoseconds -> time.Time but there's an
// overflow which yields a different timestamp.
// https://github.com/gohugoio/hugo/issues/6161#issuecomment-725915786
modTime = ptr.OrNow(mt.GetLastModifiedDateTime())
} }
added[ptr.Val(giaddt.GetId())] = modTime added[ptr.Val(giaddt.GetId())] = modTime

View File

@ -35,11 +35,16 @@ type Sites struct {
// api calls // api calls
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
func (c Sites) GetRoot(ctx context.Context) (models.Siteable, error) { func (c Sites) GetRoot(
ctx context.Context,
cc CallConfig,
) (models.Siteable, error) {
options := &sites.SiteItemRequestBuilderGetRequestConfiguration{ options := &sites.SiteItemRequestBuilderGetRequestConfiguration{
QueryParameters: &sites.SiteItemRequestBuilderGetQueryParameters{ QueryParameters: &sites.SiteItemRequestBuilderGetQueryParameters{},
Expand: []string{"drive"}, }
},
if len(cc.Expand) > 0 {
options.QueryParameters.Expand = cc.Expand
} }
resp, err := c.Stable. resp, err := c.Stable.

View File

@ -256,7 +256,7 @@ func (suite *SitesIntgSuite) TestGetRoot() {
ctx, flush := tester.NewContext(t) ctx, flush := tester.NewContext(t)
defer flush() defer flush()
result, err := suite.its.ac.Sites().GetRoot(ctx) result, err := suite.its.ac.Sites().GetRoot(ctx, api.CallConfig{Expand: []string{"drive"}})
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, result, "must find the root site") require.NotNil(t, result, "must find the root site")
require.NotEmpty(t, ptr.Val(result.GetId()), "must have an id") require.NotEmpty(t, ptr.Val(result.GetId()), "must have an id")

View File

@ -114,22 +114,26 @@ func ParseSite(item models.Siteable) *Site {
if item.GetDrive() != nil && if item.GetDrive() != nil &&
item.GetDrive().GetOwner() != nil && item.GetDrive().GetOwner() != nil &&
item.GetDrive().GetOwner().GetUser() != nil { item.GetDrive().GetOwner().GetUser() != nil &&
// some users might come back with a nil ID
// most likely in the case of deleted users
item.GetDrive().GetOwner().GetUser().GetId() != nil {
s.OwnerType = SiteOwnerUser s.OwnerType = SiteOwnerUser
s.OwnerID = ptr.Val(item.GetDrive().GetOwner().GetUser().GetId()) s.OwnerID = ptr.Val(item.GetDrive().GetOwner().GetUser().GetId())
} } else if item.GetDrive() != nil && item.GetDrive().GetOwner() != nil {
ownerItem := item.GetDrive().GetOwner()
if _, ok := ownerItem.GetAdditionalData()["group"]; ok {
s.OwnerType = SiteOwnerGroup
if _, ok := item.GetAdditionalData()["group"]; ok { group, err := tform.AnyValueToT[map[string]any]("group", ownerItem.GetAdditionalData())
s.OwnerType = SiteOwnerGroup if err != nil {
return s
}
group, err := tform.AnyValueToT[map[string]any]("group", item.GetAdditionalData()) s.OwnerID, err = str.AnyValueToString("id", group)
if err != nil { if err != nil {
return s return s
} }
s.OwnerID, err = str.AnyValueToString("id", group)
if err != nil {
return s
} }
} }

View File

@ -61,6 +61,34 @@ func (suite *siteIntegrationSuite) TestSites() {
} }
} }
func (suite *siteIntegrationSuite) TestSites_GetByID() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
acct := tconfig.NewM365Account(t)
sites, err := Sites(ctx, acct, fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
assert.NotEmpty(t, sites)
for _, s := range sites {
suite.Run("site_"+s.ID, func() {
t := suite.T()
site, err := SiteByID(ctx, acct, s.ID)
assert.NoError(t, err, clues.ToCore(err))
assert.NotEmpty(t, site.WebURL)
assert.NotEmpty(t, site.ID)
assert.NotEmpty(t, site.DisplayName)
if site.OwnerType != SiteOwnerUnknown {
assert.NotEmpty(t, site.OwnerID)
assert.NotEmpty(t, site.OwnerType)
}
})
}
}
func (suite *siteIntegrationSuite) TestSites_InvalidCredentials() { func (suite *siteIntegrationSuite) TestSites_InvalidCredentials() {
table := []struct { table := []struct {
name string name string

View File

@ -24,7 +24,7 @@
"prism-react-renderer": "^1.3.5", "prism-react-renderer": "^1.3.5",
"react": "^17.0.2", "react": "^17.0.2",
"react-dom": "^17.0.2", "react-dom": "^17.0.2",
"sass": "^1.67.0", "sass": "^1.68.0",
"tiny-slider": "^2.9.4", "tiny-slider": "^2.9.4",
"tw-elements": "^1.0.0-alpha13", "tw-elements": "^1.0.0-alpha13",
"wow.js": "^1.2.2" "wow.js": "^1.2.2"
@ -12658,9 +12658,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/sass": { "node_modules/sass": {
"version": "1.67.0", "version": "1.68.0",
"resolved": "https://registry.npmjs.org/sass/-/sass-1.67.0.tgz", "resolved": "https://registry.npmjs.org/sass/-/sass-1.68.0.tgz",
"integrity": "sha512-SVrO9ZeX/QQyEGtuZYCVxoeAL5vGlYjJ9p4i4HFuekWl8y/LtJ7tJc10Z+ck1c8xOuoBm2MYzcLfTAffD0pl/A==", "integrity": "sha512-Lmj9lM/fef0nQswm1J2HJcEsBUba4wgNx2fea6yJHODREoMFnwRpZydBnX/RjyXw2REIwdkbqE4hrTo4qfDBUA==",
"dependencies": { "dependencies": {
"chokidar": ">=3.0.0 <4.0.0", "chokidar": ">=3.0.0 <4.0.0",
"immutable": "^4.0.0", "immutable": "^4.0.0",
@ -23971,9 +23971,9 @@
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
}, },
"sass": { "sass": {
"version": "1.67.0", "version": "1.68.0",
"resolved": "https://registry.npmjs.org/sass/-/sass-1.67.0.tgz", "resolved": "https://registry.npmjs.org/sass/-/sass-1.68.0.tgz",
"integrity": "sha512-SVrO9ZeX/QQyEGtuZYCVxoeAL5vGlYjJ9p4i4HFuekWl8y/LtJ7tJc10Z+ck1c8xOuoBm2MYzcLfTAffD0pl/A==", "integrity": "sha512-Lmj9lM/fef0nQswm1J2HJcEsBUba4wgNx2fea6yJHODREoMFnwRpZydBnX/RjyXw2REIwdkbqE4hrTo4qfDBUA==",
"requires": { "requires": {
"chokidar": ">=3.0.0 <4.0.0", "chokidar": ">=3.0.0 <4.0.0",
"immutable": "^4.0.0", "immutable": "^4.0.0",

View File

@ -30,7 +30,7 @@
"prism-react-renderer": "^1.3.5", "prism-react-renderer": "^1.3.5",
"react": "^17.0.2", "react": "^17.0.2",
"react-dom": "^17.0.2", "react-dom": "^17.0.2",
"sass": "^1.67.0", "sass": "^1.68.0",
"tiny-slider": "^2.9.4", "tiny-slider": "^2.9.4",
"tw-elements": "^1.0.0-alpha13", "tw-elements": "^1.0.0-alpha13",
"wow.js": "^1.2.2" "wow.js": "^1.2.2"