diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e4fca312..1628256ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] (beta) +### Added +- Skips graph calls for expired item download URLs. + +## [v0.14.0] (beta) - 2023-10-09 + ### Added - Enables local or network-attached storage for Corso repositories. - Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes. @@ -14,17 +19,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `--backups` flag to delete multiple backups in `corso backup delete` command. - Backup now includes all sites that belongs to a team, not just the root site. -## Fixed +### Fixed - Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup. +### Known issues +- Restoring the data into a different Group from the one it was backed up from is not currently supported + +### Other +- Groups and Teams service support is still in feature preview + ## [v0.13.0] (beta) - 2023-09-18 ### Added - Groups and Teams service support available as a feature preview! Channel messages and Files are now available for backup and restore in the CLI: `corso backup create groups --group '*'` - * The cli commands for "groups" and "teams" can be used interchangably, and will operate on the same backup data. - * New permissions are required to backup Channel messages. See the [Corso Documentation](https://corsobackup.io/docs/setup/m365-access/#configure-required-permissions) for complete details. + - The cli commands for "groups" and "teams" can be used interchangeably, and will operate on the same backup data. + - New permissions are required to backup Channel messages. See the [Corso Documentation](https://corsobackup.io/docs/setup/m365-access/#configure-required-permissions) for complete details. Even though Channel message restoration is not available, message write permissions are included to cover future integration. - * This is a feature preview, and may be subject to breaking changes based on feedback and testing. + - This is a feature preview, and may be subject to breaking changes based on feedback and testing. ### Changed - Switched to Go 1.21 @@ -379,7 +390,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Miscellaneous - Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35)) -[Unreleased]: https://github.com/alcionai/corso/compare/v0.11.1...HEAD +[Unreleased]: https://github.com/alcionai/corso/compare/v0.14.0...HEAD +[v0.14.0]: https://github.com/alcionai/corso/compare/v0.13.0...v0.14.0 +[v0.13.0]: https://github.com/alcionai/corso/compare/v0.12.0...v0.13.0 +[v0.12.0]: https://github.com/alcionai/corso/compare/v0.11.1...v0.12.0 [v0.11.1]: https://github.com/alcionai/corso/compare/v0.11.0...v0.11.1 [v0.11.0]: https://github.com/alcionai/corso/compare/v0.10.0...v0.11.0 [v0.10.0]: https://github.com/alcionai/corso/compare/v0.9.0...v0.10.0 diff --git a/src/cli/backup/backup.go b/src/cli/backup/backup.go index 5d885e059..2d3db4597 100644 --- a/src/cli/backup/backup.go +++ b/src/cli/backup/backup.go @@ -317,6 +317,7 @@ func genericListCommand( b.Print(ctx) fe.PrintItems( ctx, + !ifShow(flags.ListAlertsFV), !ifShow(flags.ListFailedItemsFV), !ifShow(flags.ListSkippedItemsFV), !ifShow(flags.ListRecoveredErrorsFV)) diff --git a/src/cli/cli.go b/src/cli/cli.go index 8fb768c11..9b6eae05c 100644 --- a/src/cli/cli.go +++ b/src/cli/cli.go @@ -17,7 +17,6 @@ import ( "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/repo" "github.com/alcionai/corso/src/cli/restore" - "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/version" "github.com/alcionai/corso/src/pkg/logger" @@ -61,43 +60,6 @@ func preRun(cc *cobra.Command, args []string) error { print.Infof(ctx, "Logging to file: %s", logger.ResolvedLogFile) } - avoidTheseDescription := []string{ - "Initialize a repository.", - "Initialize a S3 repository", - "Connect to a S3 repository", - "Initialize a repository on local or network storage.", - "Connect to a repository on local or network storage.", - "Help about any command", - "Free, Secure, Open-Source Backup for M365.", - "env var guide", - } - - if !slices.Contains(avoidTheseDescription, cc.Short) { - provider, overrides, err := utils.GetStorageProviderAndOverrides(ctx, cc) - if err != nil { - return err - } - - cfg, err := config.GetConfigRepoDetails( - ctx, - provider, - true, - false, - overrides) - if err != nil { - log.Error("Error while getting config info to run command: ", cc.Use) - return err - } - - utils.SendStartCorsoEvent( - ctx, - cfg.Storage, - cfg.Account.ID(), - map[string]any{"command": cc.CommandPath()}, - cfg.RepoID, - utils.Control()) - } - // handle deprecated user flag in Backup exchange command if cc.CommandPath() == "corso backup create exchange" { handleMailBoxFlag(ctx, cc, flagSl) diff --git a/src/cli/flags/backup_list.go b/src/cli/flags/backup_list.go index 495120dac..3bfb5833f 100644 --- a/src/cli/flags/backup_list.go +++ b/src/cli/flags/backup_list.go @@ -8,6 +8,7 @@ func AddAllBackupListFlags(cmd *cobra.Command) { AddFailedItemsFN(cmd) AddSkippedItemsFN(cmd) AddRecoveredErrorsFN(cmd) + AddAlertsFN(cmd) } func AddFailedItemsFN(cmd *cobra.Command) { @@ -27,3 +28,9 @@ func AddRecoveredErrorsFN(cmd *cobra.Command) { &ListRecoveredErrorsFV, RecoveredErrorsFN, Show, "Toggles showing or hiding the list of errors which Corso recovered from.") } + +func AddAlertsFN(cmd *cobra.Command) { + cmd.Flags().StringVar( + &ListAlertsFV, AlertsFN, Show, + "Toggles showing or hiding the list of alerts produced during the operation.") +} diff --git a/src/cli/flags/options.go b/src/cli/flags/options.go index ba127092c..841e13169 100644 --- a/src/cli/flags/options.go +++ b/src/cli/flags/options.go @@ -5,6 +5,7 @@ import ( ) const ( + AlertsFN = "alerts" DeltaPageSizeFN = "delta-page-size" DisableConcurrencyLimiterFN = "disable-concurrency-limiter" DisableDeltaFN = "disable-delta" @@ -31,6 +32,7 @@ var ( EnableImmutableIDFV bool FailFastFV bool FetchParallelismFV int + ListAlertsFV string ListFailedItemsFV string ListSkippedItemsFV string ListRecoveredErrorsFV string diff --git a/src/cli/flags/restore_config.go b/src/cli/flags/restore_config.go index 4a1868d01..36868aaa6 100644 --- a/src/cli/flags/restore_config.go +++ b/src/cli/flags/restore_config.go @@ -19,7 +19,7 @@ var ( ) // AddRestoreConfigFlags adds the restore config flag set. -func AddRestoreConfigFlags(cmd *cobra.Command) { +func AddRestoreConfigFlags(cmd *cobra.Command, canRestoreToAlternate bool) { fs := cmd.Flags() fs.StringVar( &CollisionsFV, CollisionsFN, string(control.Skip), @@ -28,7 +28,10 @@ func AddRestoreConfigFlags(cmd *cobra.Command) { fs.StringVar( &DestinationFV, DestinationFN, "", "Overrides the folder where items get restored; '/' places items into their original location") - fs.StringVar( - &ToResourceFV, ToResourceFN, "", - "Overrides the protected resource (mailbox, site, user, etc) where data gets restored") + + if canRestoreToAlternate { + fs.StringVar( + &ToResourceFV, ToResourceFN, "", + "Overrides the protected resource (mailbox, site, user, etc) where data gets restored") + } } diff --git a/src/cli/flags/testdata/backup_list.go b/src/cli/flags/testdata/backup_list.go index 82b08646f..c76091b11 100644 --- a/src/cli/flags/testdata/backup_list.go +++ b/src/cli/flags/testdata/backup_list.go @@ -11,6 +11,7 @@ import ( func PreparedBackupListFlags() []string { return []string{ + "--" + flags.AlertsFN, flags.Show, "--" + flags.FailedItemsFN, flags.Show, "--" + flags.SkippedItemsFN, flags.Show, "--" + flags.RecoveredErrorsFN, flags.Show, @@ -18,6 +19,7 @@ func PreparedBackupListFlags() []string { } func AssertBackupListFlags(t *testing.T, cmd *cobra.Command) { + assert.Equal(t, flags.Show, flags.ListAlertsFV) assert.Equal(t, flags.Show, flags.ListFailedItemsFV) assert.Equal(t, flags.Show, flags.ListSkippedItemsFV) assert.Equal(t, flags.Show, flags.ListRecoveredErrorsFV) diff --git a/src/cli/repo/filesystem.go b/src/cli/repo/filesystem.go index f6a495f21..7a012e503 100644 --- a/src/cli/repo/filesystem.go +++ b/src/cli/repo/filesystem.go @@ -87,15 +87,6 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error { // Retention is not supported for filesystem repos. retentionOpts := ctrlRepo.Retention{} - // SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated - utils.SendStartCorsoEvent( - ctx, - cfg.Storage, - cfg.Account.ID(), - map[string]any{"command": "init repo"}, - cfg.Account.ID(), - opt) - storageCfg, err := cfg.Storage.ToFilesystemConfig() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration")) diff --git a/src/cli/repo/s3.go b/src/cli/repo/s3.go index 3fb0833e6..000513671 100644 --- a/src/cli/repo/s3.go +++ b/src/cli/repo/s3.go @@ -102,15 +102,6 @@ func initS3Cmd(cmd *cobra.Command, args []string) error { return Only(ctx, err) } - // SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated - utils.SendStartCorsoEvent( - ctx, - cfg.Storage, - cfg.Account.ID(), - map[string]any{"command": "init repo"}, - cfg.Account.ID(), - opt) - s3Cfg, err := cfg.Storage.ToS3Config() if err != nil { return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration")) diff --git a/src/cli/restore/exchange.go b/src/cli/restore/exchange.go index b1115e5a9..a7ffdbb08 100644 --- a/src/cli/restore/exchange.go +++ b/src/cli/restore/exchange.go @@ -28,7 +28,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command { flags.AddBackupIDFlag(c, true) flags.AddExchangeDetailsAndRestoreFlags(c) - flags.AddRestoreConfigFlags(c) + flags.AddRestoreConfigFlags(c, true) flags.AddFailFastFlag(c) } diff --git a/src/cli/restore/groups.go b/src/cli/restore/groups.go index 9e1f9cf5d..3d1f3df6e 100644 --- a/src/cli/restore/groups.go +++ b/src/cli/restore/groups.go @@ -30,7 +30,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command { flags.AddNoPermissionsFlag(c) flags.AddSharePointDetailsAndRestoreFlags(c) // for sp restores flags.AddSiteIDFlag(c) - flags.AddRestoreConfigFlags(c) + flags.AddRestoreConfigFlags(c, false) flags.AddFailFastFlag(c) } diff --git a/src/cli/restore/groups_test.go b/src/cli/restore/groups_test.go index c6753170b..58af79e09 100644 --- a/src/cli/restore/groups_test.go +++ b/src/cli/restore/groups_test.go @@ -65,7 +65,7 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { "--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput), "--" + flags.CollisionsFN, flagsTD.Collisions, "--" + flags.DestinationFN, flagsTD.Destination, - "--" + flags.ToResourceFN, flagsTD.ToResource, + // "--" + flags.ToResourceFN, flagsTD.ToResource, "--" + flags.NoPermissionsFN, }, flagsTD.PreparedProviderFlags(), @@ -91,7 +91,7 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() { assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore) assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions) assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination) - assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) + // assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource) assert.True(t, flags.NoPermissionsFV) flagsTD.AssertProviderFlags(t, cmd) flagsTD.AssertStorageFlags(t, cmd) diff --git a/src/cli/restore/onedrive.go b/src/cli/restore/onedrive.go index 6efbd4831..8b44d3758 100644 --- a/src/cli/restore/onedrive.go +++ b/src/cli/restore/onedrive.go @@ -29,7 +29,7 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command { flags.AddBackupIDFlag(c, true) flags.AddOneDriveDetailsAndRestoreFlags(c) flags.AddNoPermissionsFlag(c) - flags.AddRestoreConfigFlags(c) + flags.AddRestoreConfigFlags(c, true) flags.AddFailFastFlag(c) } diff --git a/src/cli/restore/sharepoint.go b/src/cli/restore/sharepoint.go index 56459aa19..c79756e7a 100644 --- a/src/cli/restore/sharepoint.go +++ b/src/cli/restore/sharepoint.go @@ -29,7 +29,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command { flags.AddBackupIDFlag(c, true) flags.AddSharePointDetailsAndRestoreFlags(c) flags.AddNoPermissionsFlag(c) - flags.AddRestoreConfigFlags(c) + flags.AddRestoreConfigFlags(c, true) flags.AddFailFastFlag(c) } diff --git a/src/cli/utils/utils.go b/src/cli/utils/utils.go index 2ee9ac090..c27d7d8c2 100644 --- a/src/cli/utils/utils.go +++ b/src/cli/utils/utils.go @@ -239,24 +239,6 @@ func splitFoldersIntoContainsAndPrefix(folders []string) ([]string, []string) { return containsFolders, prefixFolders } -// SendStartCorsoEvent utility sends corso start event at start of each action -func SendStartCorsoEvent( - ctx context.Context, - s storage.Storage, - tenID string, - data map[string]any, - repoID string, - opts control.Options, -) { - bus, err := events.NewBus(ctx, s, tenID, opts) - if err != nil { - logger.CtxErr(ctx, err).Info("sending start event") - } - - bus.SetRepoID(repoID) - bus.Event(ctx, events.CorsoStart, data) -} - // GetStorageProviderAndOverrides returns the storage provider type and // any flags specified on the command line which are storage provider specific. func GetStorageProviderAndOverrides( diff --git a/src/cmd/purge/scripts/onedrivePurge.ps1 b/src/cmd/purge/scripts/onedrivePurge.ps1 index e8f258b95..4204d5596 100644 --- a/src/cmd/purge/scripts/onedrivePurge.ps1 +++ b/src/cmd/purge/scripts/onedrivePurge.ps1 @@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) { } } else { - Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required" + Write-Host "User (for OneDrive) or Site (for Sharepoint) is required" Exit } diff --git a/src/go.mod b/src/go.mod index 146e144c6..4a5ca18c3 100644 --- a/src/go.mod +++ b/src/go.mod @@ -10,15 +10,16 @@ require ( github.com/armon/go-metrics v0.4.1 github.com/aws/aws-xray-sdk-go v1.8.2 github.com/cenkalti/backoff/v4 v4.2.1 + github.com/golang-jwt/jwt/v5 v5.0.0 github.com/google/uuid v1.3.1 github.com/h2non/gock v1.2.0 github.com/kopia/kopia v0.13.0 - github.com/microsoft/kiota-abstractions-go v1.2.1 + github.com/microsoft/kiota-abstractions-go v1.2.3 github.com/microsoft/kiota-authentication-azure-go v1.0.0 github.com/microsoft/kiota-http-go v1.1.0 github.com/microsoft/kiota-serialization-form-go v1.0.0 github.com/microsoft/kiota-serialization-json-go v1.0.4 - github.com/microsoftgraph/msgraph-sdk-go v1.19.0 + github.com/microsoftgraph/msgraph-sdk-go v1.20.0 github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 github.com/pkg/errors v0.9.1 github.com/puzpuzpuz/xsync/v2 v2.5.1 @@ -27,7 +28,7 @@ require ( github.com/spf13/cast v1.5.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.16.0 + github.com/spf13/viper v1.17.0 github.com/stretchr/testify v1.8.4 github.com/tidwall/pretty v1.2.1 github.com/tomlazar/table v0.1.2 @@ -35,7 +36,7 @@ require ( go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/time v0.3.0 - golang.org/x/tools v0.13.0 + golang.org/x/tools v0.14.0 gotest.tools/v3 v3.5.1 ) @@ -46,7 +47,6 @@ require ( github.com/aws/aws-sdk-go v1.45.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gofrs/flock v0.8.1 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect @@ -55,14 +55,17 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/microsoft/kiota-serialization-multipart-go v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/pelletier/go-toml/v2 v2.0.9 // indirect - github.com/spf13/afero v1.9.5 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.10.0 // indirect + github.com/std-uritemplate/std-uritemplate/go v0.0.42 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasthttp v1.48.0 // indirect - go.opentelemetry.io/otel/metric v1.18.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect ) require ( @@ -74,7 +77,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect github.com/cjlapao/common-go v0.0.39 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect @@ -84,7 +87,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/klauspost/reedsolomon v1.11.8 // indirect @@ -103,7 +106,7 @@ require ( github.com/natefinch/atomic v1.0.1 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect @@ -115,18 +118,17 @@ require ( github.com/tidwall/gjson v1.15.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/zeebo/blake3 v0.2.3 // indirect - go.opentelemetry.io/otel v1.18.0 // indirect - go.opentelemetry.io/otel/trace v1.18.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.15.0 - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/net v0.16.0 + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - google.golang.org/grpc v1.57.0 // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/src/go.sum b/src/go.sum index d381c9e69..48a0729b6 100644 --- a/src/go.sum +++ b/src/go.sum @@ -102,8 +102,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -246,8 +247,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= @@ -287,8 +288,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microsoft/kiota-abstractions-go v1.2.1 h1:TnLF7rjy1GfhuGK2ra/a3Vuz6piFXTR1OfdNoqesagA= -github.com/microsoft/kiota-abstractions-go v1.2.1/go.mod h1:rEeeaytcnal/If3f1tz6/spFz4V+Hiqvz3rxF+oWQFA= +github.com/microsoft/kiota-abstractions-go v1.2.3 h1:ir+p5o/0ytcLunikHSylhYyCm2Ojvoq3pXWSYomOACc= +github.com/microsoft/kiota-abstractions-go v1.2.3/go.mod h1:yPSuzNSOIVQSFFe1iT+3Lu5zmis22E8Wg+bkyjhd+pY= github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk= github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw= github.com/microsoft/kiota-http-go v1.1.0 h1:L5I93EiNtlP/X6YzeTlhjWt7Q1DxzC9CmWSVtX3b0tE= @@ -301,8 +302,8 @@ github.com/microsoft/kiota-serialization-multipart-go v1.0.0 h1:3O5sb5Zj+moLBiJy github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so= github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= -github.com/microsoftgraph/msgraph-sdk-go v1.19.0 h1:hx+SvDTm5ENYZFqmMIskF7tOn48zzT2Xv3OVFrxl2dc= -github.com/microsoftgraph/msgraph-sdk-go v1.19.0/go.mod h1:3DArbqPS7riix0VsJhdtYsgPaAFAH9Jer64psW55riI= +github.com/microsoftgraph/msgraph-sdk-go v1.20.0 h1:Hi8URs+Ll07+GojbY9lyuYUMj8rxI4mcYW+GISO7BTA= +github.com/microsoftgraph/msgraph-sdk-go v1.20.0/go.mod h1:UTUjxLPExc1K+YLmFeyEyep6vYd1GOj2bLMSd7/lPWE= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -327,8 +328,8 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= @@ -338,8 +339,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= @@ -373,26 +375,32 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rudderlabs/analytics-go v3.3.3+incompatible h1:OG0XlKoXfr539e2t1dXtTB+Gr89uFW+OUNQBVhHIIBY= github.com/rudderlabs/analytics-go v3.3.3+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= +github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N++y4= github.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 h1:lQ3JvmcVO1/AMFbabvUSJ4YtJRpEAX9Qza73p5j03sw= github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1/go.mod h1:4aKqcbhASNqjbrG0h9BmkzcWvPJGxbef4B+j0XfFrZo= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= +github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/std-uritemplate/std-uritemplate/go v0.0.42 h1:rG+XlE4drkVWs2NLfGS15N+vg+CUcjXElQKvJ0fctlI= +github.com/std-uritemplate/std-uritemplate/go v0.0.42/go.mod h1:Qov4Ay4U83j37XjgxMYevGJFLbnZ2o9cEOhGufBKgKY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -406,8 +414,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU= github.com/tg123/go-htpasswd v1.2.1/go.mod h1:erHp1B86KXdwQf1X5ZrLb7erXZnWueEQezb2dql4q58= github.com/tidwall/gjson v1.15.0 h1:5n/pM+v3r5ujuNl4YLZLsQ+UE5jlkLVm7jMzT5Mpolw= @@ -428,8 +436,6 @@ github.com/vbauerster/mpb/v8 v8.1.6 h1:EswHDkAsy4OQ7QBAmU1MUPz4vHzl6KlINjlh7vJox github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -449,12 +455,12 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs= -go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= -go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ= -go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k= -go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10= -go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -470,8 +476,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -508,8 +514,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -546,8 +552,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -568,8 +574,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -619,8 +625,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -688,8 +694,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -756,8 +762,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -774,8 +780,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/src/internal/common/jwt/jwt.go b/src/internal/common/jwt/jwt.go new file mode 100644 index 000000000..5d2aa6d2a --- /dev/null +++ b/src/internal/common/jwt/jwt.go @@ -0,0 +1,39 @@ +package jwt + +import ( + "time" + + "github.com/alcionai/clues" + jwt "github.com/golang-jwt/jwt/v5" +) + +// IsJWTExpired checks if the JWT token is past expiry by analyzing the +// "exp" claim present in the token. Token is considered expired if "exp" +// claim < current time. Missing "exp" claim is considered as non-expired. +// An error is returned if the supplied token is malformed. +func IsJWTExpired( + rawToken string, +) (bool, error) { + p := jwt.NewParser() + + // Note: Call to ParseUnverified is intentional since token verification is + // not our objective. We only care about the embed claims in the token. + // We assume the token signature is valid & verified by caller stack. + token, _, err := p.ParseUnverified(rawToken, &jwt.RegisteredClaims{}) + if err != nil { + return false, clues.Wrap(err, "invalid jwt") + } + + t, err := token.Claims.GetExpirationTime() + if err != nil { + return false, clues.Wrap(err, "getting token expiry time") + } + + if t == nil { + return false, nil + } + + expired := t.Before(time.Now()) + + return expired, nil +} diff --git a/src/internal/common/jwt/jwt_test.go b/src/internal/common/jwt/jwt_test.go new file mode 100644 index 000000000..1b7f334f0 --- /dev/null +++ b/src/internal/common/jwt/jwt_test.go @@ -0,0 +1,115 @@ +package jwt + +import ( + "testing" + "time" + + jwt "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" +) + +type JWTUnitSuite struct { + tester.Suite +} + +func TestJWTUnitSuite(t *testing.T) { + suite.Run(t, &JWTUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +// createJWTToken creates a JWT token with the specified expiration time. +func createJWTToken( + claims jwt.RegisteredClaims, +) (string, error) { + // build claims from map + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + + return token.SignedString([]byte("")) +} + +const ( + // Raw test token valid for 100 years. + rawToken = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9." + + "eyJuYmYiOiIxNjkxODE5NTc5IiwiZXhwIjoiMzk0NTUyOTE3OSIsImVuZHBvaW50dXJsTGVuZ3RoIjoiMTYw" + + "IiwiaXNsb29wYmFjayI6IlRydWUiLCJ2ZXIiOiJoYXNoZWRwcm9vZnRva2VuIiwicm9sZXMiOiJhbGxmaWxl" + + "cy53cml0ZSBhbGxzaXRlcy5mdWxsY29udHJvbCBhbGxwcm9maWxlcy5yZWFkIiwidHQiOiIxIiwiYWxnIjoi" + + "SFMyNTYifQ" + + ".signature" +) + +func (suite *JWTUnitSuite) TestIsJWTExpired() { + table := []struct { + name string + expect bool + getToken func() (string, error) + expectErr assert.ErrorAssertionFunc + }{ + { + name: "alive token", + getToken: func() (string, error) { + return createJWTToken( + jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour)), + }) + }, + expect: false, + expectErr: assert.NoError, + }, + { + name: "expired token", + getToken: func() (string, error) { + return createJWTToken( + jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(-time.Hour)), + }) + }, + expect: true, + expectErr: assert.NoError, + }, + // Test with a raw token which is not generated with go-jwt lib. + { + name: "alive raw token", + getToken: func() (string, error) { + return rawToken, nil + }, + expect: false, + expectErr: assert.NoError, + }, + { + name: "alive token, missing exp claim", + getToken: func() (string, error) { + return createJWTToken(jwt.RegisteredClaims{}) + }, + expect: false, + expectErr: assert.NoError, + }, + { + name: "malformed token", + getToken: func() (string, error) { + return "header.claims.signature", nil + }, + expect: false, + expectErr: assert.Error, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + _, flush := tester.NewContext(t) + defer flush() + + token, err := test.getToken() + require.NoError(t, err) + + expired, err := IsJWTExpired(token) + test.expectErr(t, err) + + assert.Equal(t, test.expect, expired) + }) + } +} diff --git a/src/internal/common/prefixmatcher/mock/mock.go b/src/internal/common/prefixmatcher/mock/mock.go index ad4568114..4516f8665 100644 --- a/src/internal/common/prefixmatcher/mock/mock.go +++ b/src/internal/common/prefixmatcher/mock/mock.go @@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap { func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) { if pm.Empty() { - require.True(t, r.Empty(), "both prefix maps are empty") + require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys()) return } diff --git a/src/internal/common/url.go b/src/internal/common/url.go new file mode 100644 index 000000000..b9946f84a --- /dev/null +++ b/src/internal/common/url.go @@ -0,0 +1,27 @@ +package common + +import ( + "net/url" + + "github.com/alcionai/clues" +) + +// GetQueryParamFromURL parses an URL and returns value of the specified +// query parameter. In case of multiple occurrences, first one is returned. +func GetQueryParamFromURL( + rawURL, queryParam string, +) (string, error) { + u, err := url.Parse(rawURL) + if err != nil { + return "", clues.Wrap(err, "parsing url") + } + + qp := u.Query() + + val := qp.Get(queryParam) + if len(val) == 0 { + return "", clues.New("query param not found").With("query_param", queryParam) + } + + return val, nil +} diff --git a/src/internal/common/url_test.go b/src/internal/common/url_test.go new file mode 100644 index 000000000..fa1d1cc20 --- /dev/null +++ b/src/internal/common/url_test.go @@ -0,0 +1,72 @@ +package common_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/tester" +) + +type URLUnitSuite struct { + tester.Suite +} + +func TestURLUnitSuite(t *testing.T) { + suite.Run(t, &URLUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *URLUnitSuite) TestGetQueryParamFromURL() { + qp := "tempauth" + table := []struct { + name string + rawURL string + queryParam string + expectedResult string + expect assert.ErrorAssertionFunc + }{ + { + name: "valid", + rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val", + queryParam: qp, + expectedResult: "h.c.s", + expect: assert.NoError, + }, + { + name: "query param not found", + rawURL: "http://localhost:8080?other=val", + queryParam: qp, + expect: assert.Error, + }, + { + name: "empty query param", + rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val", + queryParam: "", + expect: assert.Error, + }, + // In case of multiple occurrences, the first occurrence of param is returned. + { + name: "multiple occurrences", + rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val&" + qp + "=h1.c1.s1", + queryParam: qp, + expectedResult: "h.c.s", + expect: assert.NoError, + }, + } + + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + _, flush := tester.NewContext(t) + defer flush() + + token, err := common.GetQueryParamFromURL(test.rawURL, test.queryParam) + test.expect(t, err) + + assert.Equal(t, test.expectedResult, token) + }) + } +} diff --git a/src/internal/data/item.go b/src/internal/data/item.go index c6cb064e7..8bbcaca8f 100644 --- a/src/internal/data/item.go +++ b/src/internal/data/item.go @@ -16,23 +16,23 @@ import ( ) var ( - _ Item = &unindexedPrefetchedItem{} - _ ItemModTime = &unindexedPrefetchedItem{} - _ Item = &prefetchedItem{} - _ ItemInfo = &prefetchedItem{} _ ItemModTime = &prefetchedItem{} - _ Item = &unindexedLazyItem{} - _ ItemModTime = &unindexedLazyItem{} + _ Item = &prefetchedItemWithInfo{} + _ ItemInfo = &prefetchedItemWithInfo{} + _ ItemModTime = &prefetchedItemWithInfo{} _ Item = &lazyItem{} - _ ItemInfo = &lazyItem{} _ ItemModTime = &lazyItem{} + + _ Item = &lazyItemWithInfo{} + _ ItemInfo = &lazyItemWithInfo{} + _ ItemModTime = &lazyItemWithInfo{} ) func NewDeletedItem(itemID string) Item { - return &unindexedPrefetchedItem{ + return &prefetchedItem{ id: itemID, deleted: true, // TODO(ashmrtn): This really doesn't need to be set since deleted items are @@ -42,11 +42,11 @@ func NewDeletedItem(itemID string) Item { } } -func NewUnindexedPrefetchedItem( +func NewPrefetchedItem( reader io.ReadCloser, itemID string, modTime time.Time, -) (*unindexedPrefetchedItem, error) { +) (*prefetchedItem, error) { r, err := readers.NewVersionedBackupReader( readers.SerializationFormat{Version: readers.DefaultSerializationVersion}, reader) @@ -54,19 +54,18 @@ func NewUnindexedPrefetchedItem( return nil, clues.Stack(err) } - return &unindexedPrefetchedItem{ + return &prefetchedItem{ id: itemID, reader: r, modTime: modTime, }, nil } -// unindexedPrefetchedItem represents a single item retrieved from the remote -// service. +// prefetchedItem represents a single item retrieved from the remote service. // // This item doesn't implement ItemInfo so it's safe to use for items like // metadata that shouldn't appear in backup details. -type unindexedPrefetchedItem struct { +type prefetchedItem struct { id string reader io.ReadCloser // modTime is the modified time of the item. It should match the modTime in @@ -79,48 +78,49 @@ type unindexedPrefetchedItem struct { deleted bool } -func (i unindexedPrefetchedItem) ID() string { +func (i prefetchedItem) ID() string { return i.id } -func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser { +func (i *prefetchedItem) ToReader() io.ReadCloser { return i.reader } -func (i unindexedPrefetchedItem) Deleted() bool { +func (i prefetchedItem) Deleted() bool { return i.deleted } -func (i unindexedPrefetchedItem) ModTime() time.Time { +func (i prefetchedItem) ModTime() time.Time { return i.modTime } -func NewPrefetchedItem( +func NewPrefetchedItemWithInfo( reader io.ReadCloser, itemID string, info details.ItemInfo, -) (*prefetchedItem, error) { - inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified()) +) (*prefetchedItemWithInfo, error) { + inner, err := NewPrefetchedItem(reader, itemID, info.Modified()) if err != nil { return nil, clues.Stack(err) } - return &prefetchedItem{ - unindexedPrefetchedItem: inner, - info: info, + return &prefetchedItemWithInfo{ + prefetchedItem: inner, + info: info, }, nil } -// prefetchedItem represents a single item retrieved from the remote service. +// prefetchedItemWithInfo represents a single item retrieved from the remote +// service. // // This item implements ItemInfo so it should be used for things that need to // appear in backup details. -type prefetchedItem struct { - *unindexedPrefetchedItem +type prefetchedItemWithInfo struct { + *prefetchedItem info details.ItemInfo } -func (i prefetchedItem) Info() (details.ItemInfo, error) { +func (i prefetchedItemWithInfo) Info() (details.ItemInfo, error) { return i.info, nil } @@ -131,14 +131,14 @@ type ItemDataGetter interface { ) (io.ReadCloser, *details.ItemInfo, bool, error) } -func NewUnindexedLazyItem( +func NewLazyItem( ctx context.Context, itemGetter ItemDataGetter, itemID string, modTime time.Time, errs *fault.Bus, -) *unindexedLazyItem { - return &unindexedLazyItem{ +) *lazyItem { + return &lazyItem{ ctx: ctx, id: itemID, itemGetter: itemGetter, @@ -147,13 +147,13 @@ func NewUnindexedLazyItem( } } -// unindexedLazyItem represents a single item retrieved from the remote service. -// It lazily fetches the item's data when the first call to ToReader().Read() is +// lazyItem represents a single item retrieved from the remote service. It +// lazily fetches the item's data when the first call to ToReader().Read() is // made. // // This item doesn't implement ItemInfo so it's safe to use for items like // metadata that shouldn't appear in backup details. -type unindexedLazyItem struct { +type lazyItem struct { ctx context.Context mu sync.Mutex id string @@ -165,19 +165,19 @@ type unindexedLazyItem struct { // struct so we can tell if it's been set already or not. // // This also helps with garbage collection because now the golang garbage - // collector can collect the lazyItem struct once the storage engine is done - // with it. The ItemInfo struct needs to stick around until the end of the - // backup though as backup details is written last. + // collector can collect the lazyItemWithInfo struct once the storage engine + // is done with it. The ItemInfo struct needs to stick around until the end of + // the backup though as backup details is written last. info *details.ItemInfo delInFlight bool } -func (i *unindexedLazyItem) ID() string { +func (i *lazyItem) ID() string { return i.id } -func (i *unindexedLazyItem) ToReader() io.ReadCloser { +func (i *lazyItem) ToReader() io.ReadCloser { return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { // Don't allow getting Item info while trying to initialize said info. // GetData could be a long running call, but in theory nothing should happen @@ -219,23 +219,23 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser { }) } -func (i *unindexedLazyItem) Deleted() bool { +func (i *lazyItem) Deleted() bool { return false } -func (i *unindexedLazyItem) ModTime() time.Time { +func (i *lazyItem) ModTime() time.Time { return i.modTime } -func NewLazyItem( +func NewLazyItemWithInfo( ctx context.Context, itemGetter ItemDataGetter, itemID string, modTime time.Time, errs *fault.Bus, -) *lazyItem { - return &lazyItem{ - unindexedLazyItem: NewUnindexedLazyItem( +) *lazyItemWithInfo { + return &lazyItemWithInfo{ + lazyItem: NewLazyItem( ctx, itemGetter, itemID, @@ -244,17 +244,17 @@ func NewLazyItem( } } -// lazyItem represents a single item retrieved from the remote service. It -// lazily fetches the item's data when the first call to ToReader().Read() is +// lazyItemWithInfo represents a single item retrieved from the remote service. +// It lazily fetches the item's data when the first call to ToReader().Read() is // made. // // This item implements ItemInfo so it should be used for things that need to // appear in backup details. -type lazyItem struct { - *unindexedLazyItem +type lazyItemWithInfo struct { + *lazyItem } -func (i *lazyItem) Info() (details.ItemInfo, error) { +func (i *lazyItemWithInfo) Info() (details.ItemInfo, error) { i.mu.Lock() defer i.mu.Unlock() diff --git a/src/internal/data/item_test.go b/src/internal/data/item_test.go index f0c7e9009..16dc8b117 100644 --- a/src/internal/data/item_test.go +++ b/src/internal/data/item_test.go @@ -51,7 +51,7 @@ func TestItemUnitSuite(t *testing.T) { } func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() { - prefetch, err := data.NewUnindexedPrefetchedItem( + prefetch, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader([]byte{})), "foo", time.Time{}) @@ -69,7 +69,7 @@ func (suite *ItemUnitSuite) TestUnindexedLazyItem() { ctx, flush := tester.NewContext(t) defer flush() - lazy := data.NewUnindexedLazyItem( + lazy := data.NewLazyItem( ctx, nil, "foo", @@ -148,7 +148,7 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() { suite.Run(test.name, func() { t := suite.T() - item, err := data.NewPrefetchedItem(test.reader, id, test.info) + item, err := data.NewPrefetchedItemWithInfo(test.reader, id, test.info) require.NoError(t, err, clues.ToCore(err)) assert.Equal(t, id, item.ID(), "ID") @@ -291,7 +291,7 @@ func (suite *ItemUnitSuite) TestLazyItem() { defer test.mid.check(t, true) - item := data.NewLazyItem( + item := data.NewLazyItemWithInfo( ctx, test.mid, id, @@ -354,7 +354,7 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() { mid := &mockItemDataGetter{delInFlight: true} defer mid.check(t, true) - item := data.NewLazyItem(ctx, mid, id, now, errs) + item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs) assert.Equal(t, id, item.ID(), "ID") assert.False(t, item.Deleted(), "deleted") @@ -400,7 +400,7 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() { mid := &mockItemDataGetter{} defer mid.check(t, false) - item := data.NewLazyItem(ctx, mid, id, now, errs) + item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs) assert.Equal(t, id, item.ID(), "ID") assert.False(t, item.Deleted(), "deleted") diff --git a/src/internal/events/events.go b/src/internal/events/events.go index 63eaf6cb3..1aa2fc942 100644 --- a/src/internal/events/events.go +++ b/src/internal/events/events.go @@ -28,7 +28,6 @@ const ( tenantIDDeprecated = "m365_tenant_hash_deprecated" // Event Keys - CorsoStart = "Corso Start" RepoInit = "Repo Init" RepoConnect = "Repo Connect" BackupStart = "Backup Start" diff --git a/src/internal/kopia/wrapper_scale_test.go b/src/internal/kopia/wrapper_scale_test.go new file mode 100644 index 000000000..d980afaa0 --- /dev/null +++ b/src/internal/kopia/wrapper_scale_test.go @@ -0,0 +1,165 @@ +package kopia + +import ( + "context" + "fmt" + "testing" + + "github.com/alcionai/clues" + "github.com/kopia/kopia/repo/manifest" + "github.com/kopia/kopia/snapshot" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/alcionai/corso/src/internal/data" + exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/backup/identity" + "github.com/alcionai/corso/src/pkg/fault" + "github.com/alcionai/corso/src/pkg/path" +) + +func BenchmarkHierarchyMerge(b *testing.B) { + ctx, flush := tester.NewContext(b) + defer flush() + + c, err := openKopiaRepo(b, ctx) + require.NoError(b, err, clues.ToCore(err)) + + w := &Wrapper{c} + + defer func() { + err := w.Close(ctx) + assert.NoError(b, err, clues.ToCore(err)) + }() + + var ( + cols []data.BackupCollection + collectionLimit = 1000 + collectionItemsLimit = 3 + itemData = []byte("abcdefghijklmnopqrstuvwxyz") + ) + + baseStorePath, err := path.Build( + "a-tenant", + "a-user", + path.ExchangeService, + path.EmailCategory, + false, + "Inbox") + require.NoError(b, err, clues.ToCore(err)) + + for i := 0; i < collectionLimit; i++ { + folderName := fmt.Sprintf("folder%d", i) + + storePath, err := baseStorePath.Append(false, folderName) + require.NoError(b, err, clues.ToCore(err)) + + col := exchMock.NewCollection( + storePath, + storePath, + collectionItemsLimit) + + for j := 0; j < collectionItemsLimit; j++ { + itemName := fmt.Sprintf("item%d", j) + col.Names[j] = itemName + col.Data[j] = itemData + } + + cols = append(cols, col) + } + + reasons := []identity.Reasoner{ + NewReason( + testTenant, + baseStorePath.ProtectedResource(), + baseStorePath.Service(), + baseStorePath.Category()), + } + + type testCase struct { + name string + baseBackups func(base ManifestEntry) BackupBases + collections []data.BackupCollection + } + + // Initial backup. All files should be considered new by kopia. + baseBackupCase := testCase{ + name: "Setup", + baseBackups: func(ManifestEntry) BackupBases { + return NewMockBackupBases() + }, + collections: cols, + } + + runAndTestBackup := func( + t tester.TestT, + ctx context.Context, + test testCase, + base ManifestEntry, + ) ManifestEntry { + bbs := test.baseBackups(base) + + stats, _, _, err := w.ConsumeBackupCollections( + ctx, + reasons, + bbs, + test.collections, + nil, + nil, + true, + fault.New(true)) + require.NoError(t, err, clues.ToCore(err)) + + assert.Equal(t, 0, stats.IgnoredErrorCount) + assert.Equal(t, 0, stats.ErrorCount) + assert.False(t, stats.Incomplete) + + snap, err := snapshot.LoadSnapshot( + ctx, + w.c, + manifest.ID(stats.SnapshotID)) + require.NoError(t, err, clues.ToCore(err)) + + return ManifestEntry{ + Manifest: snap, + Reasons: reasons, + } + } + + b.Logf("setting up base backup\n") + + base := runAndTestBackup(b, ctx, baseBackupCase, ManifestEntry{}) + + table := []testCase{ + { + name: "Merge All", + baseBackups: func(base ManifestEntry) BackupBases { + return NewMockBackupBases().WithMergeBases(base) + }, + collections: func() []data.BackupCollection { + p, err := baseStorePath.Dir() + require.NoError(b, err, clues.ToCore(err)) + + col := exchMock.NewCollection(p, p, 0) + col.ColState = data.NotMovedState + col.PrevPath = p + + return []data.BackupCollection{col} + }(), + }, + } + + b.ResetTimer() + + for _, test := range table { + b.Run(fmt.Sprintf("num_dirs_%d", collectionLimit), func(b *testing.B) { + ctx, flush := tester.NewContext(b) + defer flush() + + for i := 0; i < b.N; i++ { + runAndTestBackup(b, ctx, test, base) + } + }) + } +} diff --git a/src/internal/m365/collection/drive/collection.go b/src/internal/m365/collection/drive/collection.go index b963cf6a7..8a632fe0c 100644 --- a/src/internal/m365/collection/drive/collection.go +++ b/src/internal/m365/collection/drive/collection.go @@ -575,7 +575,7 @@ func (oc *Collection) streamDriveItem( // This ensures that downloads won't be attempted unless that consumer // attempts to read bytes. Assumption is that kopia will check things // like file modtimes before attempting to read. - oc.data <- data.NewLazyItem( + oc.data <- data.NewLazyItemWithInfo( ctx, &lazyItemGetter{ info: &itemInfo, @@ -600,7 +600,7 @@ func (oc *Collection) streamDriveItem( return progReader, nil }) - storeItem, err := data.NewUnindexedPrefetchedItem( + storeItem, err := data.NewPrefetchedItem( metaReader, metaFileName+metaSuffix, // Metadata file should always use the latest time as diff --git a/src/internal/m365/collection/drive/collections.go b/src/internal/m365/collection/drive/collections.go index 17aee6217..cc94a118c 100644 --- a/src/internal/m365/collection/drive/collections.go +++ b/src/internal/m365/collection/drive/collections.go @@ -228,16 +228,16 @@ func (c *Collections) Get( ssmb *prefixmatcher.StringSetMatchBuilder, errs *fault.Bus, ) ([]data.BackupCollection, bool, error) { - prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata) + prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata) if err != nil { return nil, false, err } - ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup) + ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup) driveTombstones := map[string]struct{}{} - for driveID := range oldPathsByDriveID { + for driveID := range oldPrevPathsByDriveID { driveTombstones[driveID] = struct{}{} } @@ -255,76 +255,88 @@ func (c *Collections) Get( } var ( - // Drive ID -> delta URL for drive - deltaURLs = map[string]string{} - // Drive ID -> folder ID -> folder path - folderPaths = map[string]map[string]string{} - numPrevItems = 0 + driveIDToDeltaLink = map[string]string{} + driveIDToPrevPaths = map[string]map[string]string{} + numPrevItems = 0 ) for _, d := range drives { var ( - driveID = ptr.Val(d.GetId()) - driveName = ptr.Val(d.GetName()) - prevDelta = prevDeltas[driveID] - oldPaths = oldPathsByDriveID[driveID] - numOldDelta = 0 - ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName) + driveID = ptr.Val(d.GetId()) + driveName = ptr.Val(d.GetName()) + ictx = clues.Add( + ctx, + "drive_id", driveID, + "drive_name", clues.Hide(driveName)) + + excludedItemIDs = map[string]struct{}{} + oldPrevPaths = oldPrevPathsByDriveID[driveID] + prevDeltaLink = prevDriveIDToDelta[driveID] + + // itemCollection is used to identify which collection a + // file belongs to. This is useful to delete a file from the + // collection it was previously in, in case it was moved to a + // different collection within the same delta query + // item ID -> item ID + itemCollection = map[string]string{} ) delete(driveTombstones, driveID) + if _, ok := driveIDToPrevPaths[driveID]; !ok { + driveIDToPrevPaths[driveID] = map[string]string{} + } + if _, ok := c.CollectionMap[driveID]; !ok { c.CollectionMap[driveID] = map[string]*Collection{} } - if len(prevDelta) > 0 { - numOldDelta++ - } - logger.Ctx(ictx).Infow( "previous metadata for drive", - "num_paths_entries", len(oldPaths), - "num_deltas_entries", numOldDelta) + "num_paths_entries", len(oldPrevPaths)) - delta, paths, excluded, err := collectItems( + items, du, err := c.handler.EnumerateDriveItemsDelta( ictx, - c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()), driveID, - driveName, - c.UpdateCollections, - oldPaths, - prevDelta, - errs) + prevDeltaLink) if err != nil { return nil, false, err } - // Used for logging below. - numDeltas := 0 - // It's alright to have an empty folders map (i.e. no folders found) but not // an empty delta token. This is because when deserializing the metadata we // remove entries for which there is no corresponding delta token/folder. If // we leave empty delta tokens then we may end up setting the State field // for collections when not actually getting delta results. - if len(delta.URL) > 0 { - deltaURLs[driveID] = delta.URL - numDeltas++ + if len(du.URL) > 0 { + driveIDToDeltaLink[driveID] = du.URL + } + + newPrevPaths, err := c.UpdateCollections( + ctx, + driveID, + driveName, + items, + oldPrevPaths, + itemCollection, + excludedItemIDs, + du.Reset, + errs) + if err != nil { + return nil, false, clues.Stack(err) } // Avoid the edge case where there's no paths but we do have a valid delta // token. We can accomplish this by adding an empty paths map for this // drive. If we don't have this then the next backup won't use the delta // token because it thinks the folder paths weren't persisted. - folderPaths[driveID] = map[string]string{} - maps.Copy(folderPaths[driveID], paths) + driveIDToPrevPaths[driveID] = map[string]string{} + maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths) logger.Ctx(ictx).Infow( "persisted metadata for drive", - "num_paths_entries", len(paths), - "num_deltas_entries", numDeltas, - "delta_reset", delta.Reset) + "num_new_paths_entries", len(newPrevPaths), + "delta_reset", du.Reset) numDriveItems := c.NumItems - numPrevItems numPrevItems = c.NumItems @@ -336,7 +348,7 @@ func (c *Collections) Get( err = c.addURLCacheToDriveCollections( ictx, driveID, - prevDelta, + prevDeltaLink, errs) if err != nil { return nil, false, err @@ -345,8 +357,8 @@ func (c *Collections) Get( // For both cases we don't need to do set difference on folder map if the // delta token was valid because we should see all the changes. - if !delta.Reset { - if len(excluded) == 0 { + if !du.Reset { + if len(excludedItemIDs) == 0 { continue } @@ -355,7 +367,7 @@ func (c *Collections) Get( return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx) } - ssmb.Add(p.String(), excluded) + ssmb.Add(p.String(), excludedItemIDs) continue } @@ -370,13 +382,11 @@ func (c *Collections) Get( foundFolders[id] = struct{}{} } - for fldID, p := range oldPaths { + for fldID, p := range oldPrevPaths { if _, ok := foundFolders[fldID]; ok { continue } - delete(paths, fldID) - prevPath, err := path.FromDataLayerPath(p, false) if err != nil { err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p) @@ -446,14 +456,14 @@ func (c *Collections) Get( // empty/missing and default to a full backup. logger.CtxErr(ctx, err).Info("making metadata collection path prefixes") - return collections, canUsePreviousBackup, nil + return collections, canUsePrevBackup, nil } md, err := graph.MakeMetadataCollection( pathPrefix, []graph.MetadataCollectionEntry{ - graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths), - graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs), + graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths), + graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink), }, c.statusUpdater) @@ -466,7 +476,7 @@ func (c *Collections) Get( collections = append(collections, md) } - return collections, canUsePreviousBackup, nil + return collections, canUsePrevBackup, nil } // addURLCacheToDriveCollections adds an URL cache to all collections belonging to @@ -480,7 +490,7 @@ func (c *Collections) addURLCacheToDriveCollections( driveID, prevDelta, urlCacheRefreshInterval, - c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()), + c.handler, errs) if err != nil { return err @@ -536,22 +546,21 @@ func updateCollectionPaths( func (c *Collections) handleDelete( itemID, driveID string, - oldPaths, newPaths map[string]string, + oldPrevPaths, currPrevPaths, newPrevPaths map[string]string, isFolder bool, excluded map[string]struct{}, - itemCollection map[string]map[string]string, invalidPrevDelta bool, ) error { if !isFolder { // Try to remove the item from the Collection if an entry exists for this // item. This handles cases where an item was created and deleted during the // same delta query. - if parentID, ok := itemCollection[driveID][itemID]; ok { + if parentID, ok := currPrevPaths[itemID]; ok { if col := c.CollectionMap[driveID][parentID]; col != nil { col.Remove(itemID) } - delete(itemCollection[driveID], itemID) + delete(currPrevPaths, itemID) } // Don't need to add to exclude list if the delta is invalid since the @@ -572,7 +581,7 @@ func (c *Collections) handleDelete( var prevPath path.Path - prevPathStr, ok := oldPaths[itemID] + prevPathStr, ok := oldPrevPaths[itemID] if ok { var err error @@ -589,7 +598,7 @@ func (c *Collections) handleDelete( // Nested folders also return deleted delta results so we don't have to // worry about doing a prefix search in the map to remove the subtree of // the deleted folder/package. - delete(newPaths, itemID) + delete(newPrevPaths, itemID) if prevPath == nil || invalidPrevDelta { // It is possible that an item was created and deleted between two delta @@ -680,21 +689,29 @@ func (c *Collections) getCollectionPath( // UpdateCollections initializes and adds the provided drive items to Collections // A new collection is created for every drive folder (or package). -// oldPaths is the unchanged data that was loaded from the metadata file. -// newPaths starts as a copy of oldPaths and is updated as changes are found in -// the returned results. +// oldPrevPaths is the unchanged data that was loaded from the metadata file. +// This map is not modified during the call. +// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in +// the returned results. Items are added to this collection throughout the call. +// newPrevPaths, ie: the items added during this call, get returned as a map. func (c *Collections) UpdateCollections( ctx context.Context, driveID, driveName string, items []models.DriveItemable, - oldPaths map[string]string, - newPaths map[string]string, + oldPrevPaths map[string]string, + currPrevPaths map[string]string, excluded map[string]struct{}, - itemCollection map[string]map[string]string, invalidPrevDelta bool, errs *fault.Bus, -) error { - el := errs.Local() +) (map[string]string, error) { + var ( + el = errs.Local() + newPrevPaths = map[string]string{} + ) + + if !invalidPrevDelta { + maps.Copy(newPrevPaths, oldPrevPaths) + } for _, item := range items { if el.Failure() != nil { @@ -704,8 +721,12 @@ func (c *Collections) UpdateCollections( var ( itemID = ptr.Val(item.GetId()) itemName = ptr.Val(item.GetName()) - ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName)) isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil + ictx = clues.Add( + ctx, + "item_id", itemID, + "item_name", clues.Hide(itemName), + "item_is_folder", isFolder) ) if item.GetMalware() != nil { @@ -727,13 +748,13 @@ func (c *Collections) UpdateCollections( if err := c.handleDelete( itemID, driveID, - oldPaths, - newPaths, + oldPrevPaths, + currPrevPaths, + newPrevPaths, isFolder, excluded, - itemCollection, invalidPrevDelta); err != nil { - return clues.Stack(err).WithClues(ictx) + return nil, clues.Stack(err).WithClues(ictx) } continue @@ -759,13 +780,13 @@ func (c *Collections) UpdateCollections( // Deletions are handled above so this is just moves/renames. var prevPath path.Path - prevPathStr, ok := oldPaths[itemID] + prevPathStr, ok := oldPrevPaths[itemID] if ok { prevPath, err = path.FromDataLayerPath(prevPathStr, false) if err != nil { el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path"). WithClues(ictx). - With("path_string", prevPathStr)) + With("prev_path_string", path.LoggableDir(prevPathStr))) } } else if item.GetRoot() != nil { // Root doesn't move or get renamed. @@ -775,11 +796,11 @@ func (c *Collections) UpdateCollections( // Moved folders don't cause delta results for any subfolders nested in // them. We need to go through and update paths to handle that. We only // update newPaths so we don't accidentally clobber previous deletes. - updatePath(newPaths, itemID, collectionPath.String()) + updatePath(newPrevPaths, itemID, collectionPath.String()) found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath) if err != nil { - return clues.Stack(err).WithClues(ictx) + return nil, clues.Stack(err).WithClues(ictx) } if found { @@ -803,7 +824,7 @@ func (c *Collections) UpdateCollections( invalidPrevDelta, nil) if err != nil { - return clues.Stack(err).WithClues(ictx) + return nil, clues.Stack(err).WithClues(ictx) } col.driveName = driveName @@ -825,35 +846,38 @@ func (c *Collections) UpdateCollections( case item.GetFile() != nil: // Deletions are handled above so this is just moves/renames. if len(ptr.Val(item.GetParentReference().GetId())) == 0 { - return clues.New("file without parent ID").WithClues(ictx) + return nil, clues.New("file without parent ID").WithClues(ictx) } // Get the collection for this item. parentID := ptr.Val(item.GetParentReference().GetId()) ictx = clues.Add(ictx, "parent_id", parentID) - collection, found := c.CollectionMap[driveID][parentID] - if !found { - return clues.New("item seen before parent folder").WithClues(ictx) + collection, ok := c.CollectionMap[driveID][parentID] + if !ok { + return nil, clues.New("item seen before parent folder").WithClues(ictx) } - // Delete the file from previous collection. This will - // only kick in if the file was moved multiple times - // within a single delta query - icID, found := itemCollection[driveID][itemID] - if found { - pcollection, found := c.CollectionMap[driveID][icID] + // This will only kick in if the file was moved multiple times + // within a single delta query. We delete the file from the previous + // collection so that it doesn't appear in two places. + prevParentContainerID, ok := currPrevPaths[itemID] + if ok { + prevColl, found := c.CollectionMap[driveID][prevParentContainerID] if !found { - return clues.New("previous collection not found").WithClues(ictx) + return nil, clues.New("previous collection not found"). + With("prev_parent_container_id", prevParentContainerID). + WithClues(ictx) } - removed := pcollection.Remove(itemID) - if !removed { - return clues.New("removing from prev collection").WithClues(ictx) + if ok := prevColl.Remove(itemID); !ok { + return nil, clues.New("removing item from prev collection"). + With("prev_parent_container_id", prevParentContainerID). + WithClues(ictx) } } - itemCollection[driveID][itemID] = parentID + currPrevPaths[itemID] = parentID if collection.Add(item) { c.NumItems++ @@ -874,11 +898,13 @@ func (c *Collections) UpdateCollections( } default: - return clues.New("item type not supported").WithClues(ictx) + el.AddRecoverable(ictx, clues.New("item is neither folder nor file"). + WithClues(ictx). + Label(fault.LabelForceNoBackupCreation)) } } - return el.Failure() + return newPrevPaths, el.Failure() } type dirScopeChecker interface { diff --git a/src/internal/m365/collection/drive/collections_test.go b/src/internal/m365/collection/drive/collections_test.go index 2943447fe..bae8019a8 100644 --- a/src/internal/m365/collection/drive/collections_test.go +++ b/src/internal/m365/collection/drive/collections_test.go @@ -8,7 +8,6 @@ import ( "github.com/alcionai/clues" "github.com/google/uuid" "github.com/microsoftgraph/msgraph-sdk-go/models" - "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -138,7 +137,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath) tests := []struct { - testCase string + name string items []models.DriveItemable inputFolderMap map[string]string scope selectors.OneDriveScope @@ -148,11 +147,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedContainerCount int expectedFileCount int expectedSkippedCount int - expectedMetadataPaths map[string]string + expectedPrevPaths map[string]string expectedExcludes map[string]struct{} }{ { - testCase: "Invalid item", + name: "Invalid item", items: []models.DriveItemable{ driveRootItem("root"), driveItem("item", "item", testBaseDrivePath, "root", false, false, false), @@ -164,13 +163,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), }, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "Single File", + name: "Single File", items: []models.DriveItemable{ driveRootItem("root"), driveItem("file", "file", testBaseDrivePath, "root", true, false, false), @@ -185,13 +184,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 1, expectedContainerCount: 1, // Root folder is skipped since it's always present. - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: getDelList("file"), }, { - testCase: "Single Folder", + name: "Single Folder", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -203,7 +202,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), "folder": expectedStatePath(data.NewState, folder), }, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), }, @@ -212,7 +211,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "Single Package", + name: "Single Package", items: []models.DriveItemable{ driveRootItem("root"), driveItem("package", "package", testBaseDrivePath, "root", false, false, true), @@ -224,7 +223,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { "root": expectedStatePath(data.NotMovedState, ""), "package": expectedStatePath(data.NewState, pkg), }, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "package": expectedPath("/package"), }, @@ -233,7 +232,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections", + name: "1 root file, 1 folder, 1 package, 2 files, 3 collections", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -253,7 +252,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 5, expectedFileCount: 3, expectedContainerCount: 3, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "package": expectedPath("/package"), @@ -261,7 +260,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"), }, { - testCase: "contains folder selector", + name: "contains folder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -286,7 +285,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedContainerCount: 3, // just "folder" isn't added here because the include check is done on the // parent path since we only check later if something is a folder or not. - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), "folder2": expectedPath(folderSub + folder), @@ -294,7 +293,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("fileInFolder", "fileInFolder2"), }, { - testCase: "prefix subfolder selector", + name: "prefix subfolder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -317,14 +316,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 3, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "subfolder": expectedPath(folderSub), "folder2": expectedPath(folderSub + folder), }, expectedExcludes: getDelList("fileInFolder2"), }, { - testCase: "match subfolder selector", + name: "match subfolder selector", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -345,13 +344,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 1, expectedContainerCount: 1, // No child folders for subfolder so nothing here. - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "subfolder": expectedPath(folderSub), }, expectedExcludes: getDelList("fileInSubfolder"), }, { - testCase: "not moved folder tree", + name: "not moved folder tree", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -369,7 +368,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), @@ -377,7 +376,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "moved folder tree", + name: "moved folder tree", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -395,7 +394,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath(folderSub), @@ -403,7 +402,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "moved folder tree with file no previous", + name: "moved folder tree with file no previous", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -420,14 +419,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder2"), }, expectedExcludes: getDelList("file"), }, { - testCase: "moved folder tree with file no previous 1", + name: "moved folder tree with file no previous 1", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -443,14 +442,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), }, expectedExcludes: getDelList("file"), }, { - testCase: "moved folder tree and subfolder 1", + name: "moved folder tree and subfolder 1", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -470,7 +469,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 0, expectedContainerCount: 3, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath("/subfolder"), @@ -478,7 +477,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "moved folder tree and subfolder 2", + name: "moved folder tree and subfolder 2", items: []models.DriveItemable{ driveRootItem("root"), driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false), @@ -498,7 +497,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 0, expectedContainerCount: 3, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath(folder), "subfolder": expectedPath("/subfolder"), @@ -506,7 +505,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: map[string]struct{}{}, }, { - testCase: "move subfolder when moving parent", + name: "move subfolder when moving parent", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false), @@ -540,7 +539,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 5, expectedFileCount: 2, expectedContainerCount: 4, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "folder2": expectedPath("/folder2"), @@ -549,7 +548,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"), }, { - testCase: "moved folder tree multiple times", + name: "moved folder tree multiple times", items: []models.DriveItemable{ driveRootItem("root"), driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false), @@ -569,7 +568,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 2, expectedFileCount: 1, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder2"), "subfolder": expectedPath("/folder2/subfolder"), @@ -577,7 +576,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedExcludes: getDelList("file"), }, { - testCase: "deleted folder and package", + name: "deleted folder and package", items: []models.DriveItemable{ driveRootItem("root"), // root is always present, but not necessary here delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -598,13 +597,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "delete folder without previous", + name: "delete folder without previous", items: []models.DriveItemable{ driveRootItem("root"), delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -620,13 +619,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "delete folder tree move subfolder", + name: "delete folder tree move subfolder", items: []models.DriveItemable{ driveRootItem("root"), delItem("folder", testBaseDrivePath, "root", false, true, false), @@ -647,14 +646,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 0, expectedContainerCount: 2, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "subfolder": expectedPath("/subfolder"), }, expectedExcludes: map[string]struct{}{}, }, { - testCase: "delete file", + name: "delete file", items: []models.DriveItemable{ driveRootItem("root"), delItem("item", testBaseDrivePath, "root", true, false, false), @@ -670,13 +669,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 1, expectedFileCount: 1, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), }, expectedExcludes: getDelList("item"), }, { - testCase: "item before parent errors", + name: "item before parent errors", items: []models.DriveItemable{ driveRootItem("root"), driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false), @@ -691,13 +690,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedItemCount: 0, expectedFileCount: 0, expectedContainerCount: 1, - expectedMetadataPaths: map[string]string{ - "root": expectedPath(""), - }, - expectedExcludes: map[string]struct{}{}, + expectedPrevPaths: nil, + expectedExcludes: map[string]struct{}{}, }, { - testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware", + name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware", items: []models.DriveItemable{ driveRootItem("root"), driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false), @@ -718,7 +715,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { expectedFileCount: 2, expectedContainerCount: 3, expectedSkippedCount: 1, - expectedMetadataPaths: map[string]string{ + expectedPrevPaths: map[string]string{ "root": expectedPath(""), "folder": expectedPath("/folder"), "package": expectedPath("/package"), @@ -727,26 +724,23 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { }, } - for _, tt := range tests { - suite.Run(tt.testCase, func() { + for _, test := range tests { + suite.Run(test.name, func() { t := suite.T() ctx, flush := tester.NewContext(t) defer flush() var ( - excludes = map[string]struct{}{} - outputFolderMap = map[string]string{} - itemCollection = map[string]map[string]string{ - driveID: {}, - } - errs = fault.New(true) + excludes = map[string]struct{}{} + currPrevPaths = map[string]string{} + errs = fault.New(true) ) - maps.Copy(outputFolderMap, tt.inputFolderMap) + maps.Copy(currPrevPaths, test.inputFolderMap) c := NewCollections( - &itemBackupHandler{api.Drives{}, user, tt.scope}, + &itemBackupHandler{api.Drives{}, user, test.scope}, tenant, idname.NewProvider(user, user), nil, @@ -754,25 +748,24 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { c.CollectionMap[driveID] = map[string]*Collection{} - err := c.UpdateCollections( + newPrevPaths, err := c.UpdateCollections( ctx, driveID, "General", - tt.items, - tt.inputFolderMap, - outputFolderMap, + test.items, + test.inputFolderMap, + currPrevPaths, excludes, - itemCollection, false, errs) - tt.expect(t, err, clues.ToCore(err)) - assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections") - assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count") - assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count") - assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count") - assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items") + test.expect(t, err, clues.ToCore(err)) + assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections") + assert.Equal(t, test.expectedItemCount, c.NumItems, "item count") + assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count") + assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count") + assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items") - for id, sp := range tt.expectedCollectionIDs { + for id, sp := range test.expectedCollectionIDs { if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) { // Skip collections we don't find so we don't get an NPE. continue @@ -783,8 +776,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() { assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id) } - assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths") - assert.Equal(t, tt.expectedExcludes, excludes, "exclude list") + assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths") + assert.Equal(t, test.expectedExcludes, excludes, "exclude list") }) } } @@ -1306,7 +1299,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1344,7 +1338,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1421,7 +1416,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &empty, // probably will never happen with graph + DeltaLink: &empty, // probably will never happen with graph + ResetDelta: true, }, }, }, @@ -1458,7 +1454,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - NextLink: &next, + NextLink: &next, + ResetDelta: true, }, { Values: []models.DriveItemable{ @@ -1466,7 +1463,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1508,7 +1506,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, driveID2: { @@ -1518,7 +1517,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false), driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -1570,7 +1570,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, driveID2: { @@ -1580,7 +1581,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("folder", "folder", driveBasePath2, "root", false, true, false), driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -1638,87 +1640,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedFolderPaths: nil, expectedDelList: nil, }, - { - name: "OneDrive_OneItemPage_DeltaError", - drives: []models.Driveable{drive1}, - items: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID1: { - { - Err: getDeltaError(), - }, - { - Values: []models.DriveItemable{ - driveRootItem("root"), - driveItem("file", "file", driveBasePath1, "root", true, false, false), - }, - DeltaLink: &delta, - }, - }, - }, - canUsePreviousBackup: true, - errCheck: assert.NoError, - expectedCollections: map[string]map[data.CollectionState][]string{ - rootFolderPath1: {data.NotMovedState: {"file"}}, - }, - expectedDeltaURLs: map[string]string{ - driveID1: delta, - }, - expectedFolderPaths: map[string]map[string]string{ - driveID1: { - "root": rootFolderPath1, - }, - }, - expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), - doNotMergeItems: map[string]bool{ - rootFolderPath1: true, - }, - }, - { - name: "OneDrive_TwoItemPage_DeltaError", - drives: []models.Driveable{drive1}, - items: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID1: { - { - Err: getDeltaError(), - }, - { - Values: []models.DriveItemable{ - driveRootItem("root"), - driveItem("file", "file", driveBasePath1, "root", true, false, false), - }, - NextLink: &next, - }, - { - Values: []models.DriveItemable{ - driveRootItem("root"), - driveItem("folder", "folder", driveBasePath1, "root", false, true, false), - driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false), - }, - DeltaLink: &delta, - }, - }, - }, - canUsePreviousBackup: true, - errCheck: assert.NoError, - expectedCollections: map[string]map[data.CollectionState][]string{ - rootFolderPath1: {data.NotMovedState: {"file"}}, - expectedPath1("/folder"): {data.NewState: {"folder", "file2"}}, - }, - expectedDeltaURLs: map[string]string{ - driveID1: delta, - }, - expectedFolderPaths: map[string]map[string]string{ - driveID1: { - "root": rootFolderPath1, - "folder": folderPath1, - }, - }, - expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}), - doNotMergeItems: map[string]bool{ - rootFolderPath1: true, - folderPath1: true, - }, - }, { name: "OneDrive_TwoItemPage_NoDeltaError", drives: []models.Driveable{drive1}, @@ -1771,16 +1692,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1818,16 +1737,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), driveItem("folder2", "folder", driveBasePath1, "root", false, true, false), driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1884,7 +1801,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false), malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -1914,13 +1832,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { expectedSkippedCount: 2, }, { - name: "One Drive Delta Error Deleted Folder In New Results", + name: "One Drive Deleted Folder In New Results", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), @@ -1937,7 +1852,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { delItem("folder2", driveBasePath1, "root", false, true, false), delItem("file2", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -1972,19 +1888,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, { - name: "One Drive Delta Error Random Folder Delete", + name: "One Drive Random Folder Delete", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), delItem("folder", driveBasePath1, "root", false, true, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2015,19 +1929,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { }, }, { - name: "One Drive Delta Error Random Item Delete", + name: "One Drive Random Item Delete", drives: []models.Driveable{drive1}, items: map[string][]apiMock.PagerResult[models.DriveItemable]{ driveID1: { - { - Err: getDeltaError(), - }, { Values: []models.DriveItemable{ driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2073,7 +1985,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { delItem("folder", driveBasePath1, "root", false, true, false), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta2, + DeltaLink: &delta2, + ResetDelta: true, }, }, }, @@ -2116,7 +2029,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2154,7 +2068,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("folder", driveBasePath1, "root", false, true, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2189,7 +2104,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { driveRootItem("root"), delItem("file", driveBasePath1, "root", true, false, false), }, - DeltaLink: &delta, + DeltaLink: &delta, + ResetDelta: true, }, }, }, @@ -2271,6 +2187,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() { mbh := mock.DefaultOneDriveBH("a-user") mbh.DrivePagerV = mockDrivePager mbh.ItemPagerV = itemPagers + mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items) c := NewCollections( mbh, @@ -2501,121 +2418,6 @@ func delItem( return item } -func getDeltaError() error { - syncStateNotFound := "SyncStateNotFound" - me := odataerrors.NewMainError() - me.SetCode(&syncStateNotFound) - - deltaError := odataerrors.NewODataError() - deltaError.SetErrorEscaped(me) - - return deltaError -} - -func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() { - next := "next" - delta := "delta" - prevDelta := "prev-delta" - - table := []struct { - name string - items []apiMock.PagerResult[models.DriveItemable] - deltaURL string - prevDeltaSuccess bool - prevDelta string - err error - }{ - { - name: "delta on first run", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {DeltaLink: &delta}, - }, - prevDeltaSuccess: true, - prevDelta: prevDelta, - }, - { - name: "empty prev delta", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {DeltaLink: &delta}, - }, - prevDeltaSuccess: false, - prevDelta: "", - }, - { - name: "next then delta", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {NextLink: &next}, - {DeltaLink: &delta}, - }, - prevDeltaSuccess: true, - prevDelta: prevDelta, - }, - { - name: "invalid prev delta", - deltaURL: delta, - items: []apiMock.PagerResult[models.DriveItemable]{ - {Err: getDeltaError()}, - {DeltaLink: &delta}, // works on retry - }, - prevDelta: prevDelta, - prevDeltaSuccess: false, - }, - { - name: "fail a normal delta query", - items: []apiMock.PagerResult[models.DriveItemable]{ - {NextLink: &next}, - {Err: assert.AnError}, - }, - prevDelta: prevDelta, - prevDeltaSuccess: true, - err: assert.AnError, - }, - } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - itemPager := &apiMock.DeltaPager[models.DriveItemable]{ - ToReturn: test.items, - } - - collectorFunc := func( - ctx context.Context, - driveID, driveName string, - driveItems []models.DriveItemable, - oldPaths map[string]string, - newPaths map[string]string, - excluded map[string]struct{}, - itemCollection map[string]map[string]string, - doNotMergeItems bool, - errs *fault.Bus, - ) error { - return nil - } - - delta, _, _, err := collectItems( - ctx, - itemPager, - "", - "General", - collectorFunc, - map[string]string{}, - test.prevDelta, - fault.New(true)) - - require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err)) - require.Equal(t, test.deltaURL, delta.URL, "delta url") - require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset") - }) - } -} - func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() { driveID := "test-drive" collCount := 3 diff --git a/src/internal/m365/collection/drive/handlers.go b/src/internal/m365/collection/drive/handlers.go index eaa27aebb..4e83bcc8f 100644 --- a/src/internal/m365/collection/drive/handlers.go +++ b/src/internal/m365/collection/drive/handlers.go @@ -38,6 +38,7 @@ type BackupHandler interface { GetItemPermissioner GetItemer NewDrivePagerer + EnumerateDriveItemsDeltaer // PathPrefix constructs the service and category specific path prefix for // the given values. @@ -52,7 +53,7 @@ type BackupHandler interface { // ServiceCat returns the service and category used by this implementation. ServiceCat() (path.ServiceType, path.CategoryType) - NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable] + // FormatDisplayPath creates a human-readable string to represent the // provided path. FormatDisplayPath(driveName string, parentPath *path.Builder) string @@ -81,6 +82,17 @@ type GetItemer interface { ) (models.DriveItemable, error) } +type EnumerateDriveItemsDeltaer interface { + EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, + ) ( + []models.DriveItemable, + api.DeltaUpdate, + error, + ) +} + // --------------------------------------------------------------------------- // restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/item.go b/src/internal/m365/collection/drive/item.go index 19da4a30e..3756d0abd 100644 --- a/src/internal/m365/collection/drive/item.go +++ b/src/internal/m365/collection/drive/item.go @@ -10,17 +10,24 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "golang.org/x/exp/maps" + "github.com/alcionai/corso/src/internal/common" + jwt "github.com/alcionai/corso/src/internal/common/jwt" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/common/readers" "github.com/alcionai/corso/src/internal/common/str" "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata" "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( acceptHeaderKey = "Accept" acceptHeaderValue = "*/*" + + // JWTQueryParam is a query param embed in graph download URLs which holds + // JWT token. + JWTQueryParam = "tempauth" ) // downloadUrlKeys is used to find the download URL in a DriveItem response. @@ -121,6 +128,19 @@ func downloadFile( return nil, clues.New("empty file url").WithClues(ctx) } + // Precheck for url expiry before we make a call to graph to download the + // file. If the url is expired, we can return early and save a call to graph. + // + // Ignore all errors encountered during the check. We can rely on graph to + // return errors on malformed urls. Ignoring errors also future proofs against + // any sudden graph changes, for e.g. if graph decides to embed the token in a + // new query param. + expired, err := isURLExpired(ctx, url) + if err == nil && expired { + logger.Ctx(ctx).Debug("expired item download url") + return nil, graph.ErrTokenExpired + } + rc, err := readers.NewResetRetryHandler( ctx, &downloadWithRetries{ @@ -193,3 +213,27 @@ func setName(orig models.ItemReferenceable, driveName string) models.ItemReferen return orig } + +// isURLExpired inspects the jwt token embed in the item download url +// and returns true if it is expired. +func isURLExpired( + ctx context.Context, + url string, +) (bool, error) { + // Extract the raw JWT string from the download url. + rawJWT, err := common.GetQueryParamFromURL(url, JWTQueryParam) + if err != nil { + logger.CtxErr(ctx, err).Info("query param not found") + + return false, clues.Stack(err).WithClues(ctx) + } + + expired, err := jwt.IsJWTExpired(rawJWT) + if err != nil { + logger.CtxErr(ctx, err).Info("checking jwt expiry") + + return false, clues.Stack(err).WithClues(ctx) + } + + return expired, nil +} diff --git a/src/internal/m365/collection/drive/item_collector.go b/src/internal/m365/collection/drive/item_collector.go deleted file mode 100644 index b2ff41831..000000000 --- a/src/internal/m365/collection/drive/item_collector.go +++ /dev/null @@ -1,142 +0,0 @@ -package drive - -import ( - "context" - - "github.com/microsoftgraph/msgraph-sdk-go/models" - "golang.org/x/exp/maps" - - "github.com/alcionai/corso/src/internal/m365/graph" - "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/logger" - "github.com/alcionai/corso/src/pkg/services/m365/api" -) - -// DeltaUpdate holds the results of a current delta token. It normally -// gets produced when aggregating the addition and removal of items in -// a delta-queryable folder. -// FIXME: This is same as exchange.api.DeltaUpdate -type DeltaUpdate struct { - // the deltaLink itself - URL string - // true if the old delta was marked as invalid - Reset bool -} - -// itemCollector functions collect the items found in a drive -type itemCollector func( - ctx context.Context, - driveID, driveName string, - driveItems []models.DriveItemable, - oldPaths map[string]string, - newPaths map[string]string, - excluded map[string]struct{}, - itemCollections map[string]map[string]string, - validPrevDelta bool, - errs *fault.Bus, -) error - -// collectItems will enumerate all items in the specified drive and hand them to the -// provided `collector` method -func collectItems( - ctx context.Context, - pager api.DeltaPager[models.DriveItemable], - driveID, driveName string, - collector itemCollector, - oldPaths map[string]string, - prevDelta string, - errs *fault.Bus, -) ( - DeltaUpdate, - map[string]string, // newPaths - map[string]struct{}, // excluded - error, -) { - var ( - newDeltaURL = "" - newPaths = map[string]string{} - excluded = map[string]struct{}{} - invalidPrevDelta = len(prevDelta) == 0 - - // itemCollection is used to identify which collection a - // file belongs to. This is useful to delete a file from the - // collection it was previously in, in case it was moved to a - // different collection within the same delta query - // drive ID -> item ID -> item ID - itemCollection = map[string]map[string]string{ - driveID: {}, - } - ) - - if !invalidPrevDelta { - maps.Copy(newPaths, oldPaths) - pager.SetNextLink(prevDelta) - } - - for { - // assume delta urls here, which allows single-token consumption - page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) - - if graph.IsErrInvalidDelta(err) { - logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta) - - invalidPrevDelta = true - newPaths = map[string]string{} - - pager.Reset(ctx) - - continue - } - - if err != nil { - return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page") - } - - vals := page.GetValue() - - err = collector( - ctx, - driveID, - driveName, - vals, - oldPaths, - newPaths, - excluded, - itemCollection, - invalidPrevDelta, - errs) - if err != nil { - return DeltaUpdate{}, nil, nil, err - } - - nextLink, deltaLink := api.NextAndDeltaLink(page) - - if len(deltaLink) > 0 { - newDeltaURL = deltaLink - } - - // Check if there are more items - if len(nextLink) == 0 { - break - } - - logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink) - pager.SetNextLink(nextLink) - } - - return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil -} - -// newItem initializes a `models.DriveItemable` that can be used as input to `createItem` -func newItem(name string, folder bool) *models.DriveItem { - itemToCreate := models.NewDriveItem() - itemToCreate.SetName(&name) - - if folder { - itemToCreate.SetFolder(models.NewFolder()) - } else { - itemToCreate.SetFile(models.NewFile()) - } - - return itemToCreate -} diff --git a/src/internal/m365/collection/drive/item_handler.go b/src/internal/m365/collection/drive/item_handler.go index 0e72ec55f..a6e7d7c46 100644 --- a/src/internal/m365/collection/drive/item_handler.go +++ b/src/internal/m365/collection/drive/item_handler.go @@ -88,13 +88,6 @@ func (h itemBackupHandler) NewDrivePager( return h.ac.NewUserDrivePager(resourceOwner, fields) } -func (h itemBackupHandler) NewItemPager( - driveID, link string, - fields []string, -) api.DeltaPager[models.DriveItemable] { - return h.ac.NewDriveItemDeltaPager(driveID, link, fields) -} - func (h itemBackupHandler) AugmentItemInfo( dii details.ItemInfo, resource idname.Provider, @@ -141,6 +134,13 @@ func (h itemBackupHandler) IncludesDir(dir string) bool { return h.scope.Matches(selectors.OneDriveFolder, dir) } +func (h itemBackupHandler) EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, +) ([]models.DriveItemable, api.DeltaUpdate, error) { + return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) +} + // --------------------------------------------------------------------------- // Restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/item_test.go b/src/internal/m365/collection/drive/item_test.go index 05dcf9e5a..e3abfa60f 100644 --- a/src/internal/m365/collection/drive/item_test.go +++ b/src/internal/m365/collection/drive/item_test.go @@ -16,12 +16,11 @@ import ( "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" + "github.com/alcionai/corso/src/internal/m365/graph" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" - "github.com/alcionai/corso/src/pkg/fault" - "github.com/alcionai/corso/src/pkg/selectors" "github.com/alcionai/corso/src/pkg/services/m365/api" ) @@ -49,6 +48,8 @@ func (suite *ItemIntegrationSuite) SetupSuite() { suite.service = loadTestService(t) suite.user = tconfig.SecondaryM365UserID(t) + graph.InitializeConcurrencyLimiter(ctx, true, 4) + pager := suite.service.ac.Drives().NewUserDrivePager(suite.user, nil) odDrives, err := api.GetAllDrives(ctx, pager) @@ -60,83 +61,6 @@ func (suite *ItemIntegrationSuite) SetupSuite() { suite.userDriveID = ptr.Val(odDrives[0].GetId()) } -// TestItemReader is an integration test that makes a few assumptions -// about the test environment -// 1) It assumes the test user has a drive -// 2) It assumes the drive has a file it can use to test `driveItemReader` -// The test checks these in below -func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - var driveItem models.DriveItemable - // This item collector tries to find "a" drive item that is a non-empty - // file to test the reader function - itemCollector := func( - _ context.Context, - _, _ string, - items []models.DriveItemable, - _ map[string]string, - _ map[string]string, - _ map[string]struct{}, - _ map[string]map[string]string, - _ bool, - _ *fault.Bus, - ) error { - if driveItem != nil { - return nil - } - - for _, item := range items { - if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 { - driveItem = item - break - } - } - - return nil - } - - ip := suite.service.ac. - Drives(). - NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault()) - - _, _, _, err := collectItems( - ctx, - ip, - suite.userDriveID, - "General", - itemCollector, - map[string]string{}, - "", - fault.New(true)) - require.NoError(t, err, clues.ToCore(err)) - - // Test Requirement 2: Need a file - require.NotEmpty( - t, - driveItem, - "no file item found for user %s drive %s", - suite.user, - suite.userDriveID) - - bh := itemBackupHandler{ - suite.service.ac.Drives(), - suite.user, - (&selectors.OneDriveBackup{}).Folders(selectors.Any())[0], - } - - // Read data for the file - itemData, err := downloadItem(ctx, bh, driveItem) - require.NoError(t, err, clues.ToCore(err)) - - size, err := io.Copy(io.Discard, itemData) - require.NoError(t, err, clues.ToCore(err)) - require.NotZero(t, size) -} - // TestItemWriter is an integration test for uploading data to OneDrive // It creates a new folder with a new item and writes data to it func (suite *ItemIntegrationSuite) TestItemWriter() { @@ -171,7 +95,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { ctx, test.driveID, ptr.Val(root.GetId()), - newItem(newFolderName, true), + api.NewDriveItem(newFolderName, true), control.Copy) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, newFolder.GetId()) @@ -183,7 +107,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() { ctx, test.driveID, ptr.Val(newFolder.GetId()), - newItem(newItemName, false), + api.NewDriveItem(newItemName, false), control.Copy) require.NoError(t, err, clues.ToCore(err)) require.NotNil(t, newItem.GetId()) @@ -317,7 +241,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "success", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -336,7 +260,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "success, content url set instead of download url", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@content.downloadUrl": url, }) @@ -355,7 +279,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "api getter returns error", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -371,7 +295,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "download url is empty", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) return di }, GetFunc: func(ctx context.Context, url string) (*http.Response, error) { @@ -386,7 +310,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "malware", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -408,7 +332,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() { { name: "non-2xx http response", itemFunc: func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) @@ -457,7 +381,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead url = "https://example.com" itemFunc = func() models.DriveItemable { - di := newItem("test", false) + di := api.NewDriveItem("test", false) di.SetAdditionalData(map[string]any{ "@microsoft.graph.downloadUrl": url, }) diff --git a/src/internal/m365/collection/drive/library_handler.go b/src/internal/m365/collection/drive/library_handler.go index 51a9e5bed..b9835dbb4 100644 --- a/src/internal/m365/collection/drive/library_handler.go +++ b/src/internal/m365/collection/drive/library_handler.go @@ -91,13 +91,6 @@ func (h libraryBackupHandler) NewDrivePager( return h.ac.NewSiteDrivePager(resourceOwner, fields) } -func (h libraryBackupHandler) NewItemPager( - driveID, link string, - fields []string, -) api.DeltaPager[models.DriveItemable] { - return h.ac.NewDriveItemDeltaPager(driveID, link, fields) -} - func (h libraryBackupHandler) AugmentItemInfo( dii details.ItemInfo, resource idname.Provider, @@ -144,6 +137,13 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool { return h.scope.Matches(selectors.SharePointLibraryFolder, dir) } +func (h libraryBackupHandler) EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, +) ([]models.DriveItemable, api.DeltaUpdate, error) { + return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) +} + // --------------------------------------------------------------------------- // Restore // --------------------------------------------------------------------------- diff --git a/src/internal/m365/collection/drive/restore.go b/src/internal/m365/collection/drive/restore.go index 106896faa..e5eb9c8b7 100644 --- a/src/internal/m365/collection/drive/restore.go +++ b/src/internal/m365/collection/drive/restore.go @@ -671,7 +671,7 @@ func createFolder( ctx, driveID, parentFolderID, - newItem(folderName, true), + api.NewDriveItem(folderName, true), control.Replace) // ErrItemAlreadyExistsConflict can only occur for folders if the @@ -692,7 +692,7 @@ func createFolder( ctx, driveID, parentFolderID, - newItem(folderName, true), + api.NewDriveItem(folderName, true), control.Copy) if err != nil { return nil, clues.Wrap(err, "creating folder") @@ -733,7 +733,7 @@ func restoreFile( } var ( - item = newItem(name, false) + item = api.NewDriveItem(name, false) collisionKey = api.DriveItemCollisionKey(item) collision api.DriveItemIDType shouldDeleteOriginal bool diff --git a/src/internal/m365/collection/drive/url_cache.go b/src/internal/m365/collection/drive/url_cache.go index 1a8cc7899..ef78d48f5 100644 --- a/src/internal/m365/collection/drive/url_cache.go +++ b/src/internal/m365/collection/drive/url_cache.go @@ -12,7 +12,6 @@ import ( "github.com/alcionai/corso/src/internal/common/str" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" - "github.com/alcionai/corso/src/pkg/services/m365/api" ) const ( @@ -47,7 +46,7 @@ type urlCache struct { refreshMu sync.Mutex deltaQueryCount int - itemPager api.DeltaPager[models.DriveItemable] + edid EnumerateDriveItemsDeltaer errs *fault.Bus } @@ -56,13 +55,10 @@ type urlCache struct { func newURLCache( driveID, prevDelta string, refreshInterval time.Duration, - itemPager api.DeltaPager[models.DriveItemable], + edid EnumerateDriveItemsDeltaer, errs *fault.Bus, ) (*urlCache, error) { - err := validateCacheParams( - driveID, - refreshInterval, - itemPager) + err := validateCacheParams(driveID, refreshInterval, edid) if err != nil { return nil, clues.Wrap(err, "cache params") } @@ -71,9 +67,9 @@ func newURLCache( idToProps: make(map[string]itemProps), lastRefreshTime: time.Time{}, driveID: driveID, + edid: edid, prevDelta: prevDelta, refreshInterval: refreshInterval, - itemPager: itemPager, errs: errs, }, nil @@ -83,7 +79,7 @@ func newURLCache( func validateCacheParams( driveID string, refreshInterval time.Duration, - itemPager api.DeltaPager[models.DriveItemable], + edid EnumerateDriveItemsDeltaer, ) error { if len(driveID) == 0 { return clues.New("drive id is empty") @@ -93,8 +89,8 @@ func validateCacheParams( return clues.New("invalid refresh interval") } - if itemPager == nil { - return clues.New("nil item pager") + if edid == nil { + return clues.New("nil item enumerator") } return nil @@ -160,44 +156,23 @@ func (uc *urlCache) refreshCache( // Issue a delta query to graph logger.Ctx(ctx).Info("refreshing url cache") - err := uc.deltaQuery(ctx) + items, du, err := uc.edid.EnumerateDriveItemsDelta(ctx, uc.driveID, uc.prevDelta) if err != nil { - // clear cache uc.idToProps = make(map[string]itemProps) + return clues.Stack(err) + } - return err + uc.deltaQueryCount++ + + if err := uc.updateCache(ctx, items, uc.errs); err != nil { + return clues.Stack(err) } logger.Ctx(ctx).Info("url cache refreshed") // Update last refresh time uc.lastRefreshTime = time.Now() - - return nil -} - -// deltaQuery performs a delta query on the drive and update the cache -func (uc *urlCache) deltaQuery( - ctx context.Context, -) error { - logger.Ctx(ctx).Debug("starting delta query") - // Reset item pager to remove any previous state - uc.itemPager.Reset(ctx) - - _, _, _, err := collectItems( - ctx, - uc.itemPager, - uc.driveID, - "", - uc.updateCache, - map[string]string{}, - uc.prevDelta, - uc.errs) - if err != nil { - return clues.Wrap(err, "delta query") - } - - uc.deltaQueryCount++ + uc.prevDelta = du.URL return nil } @@ -224,13 +199,7 @@ func (uc *urlCache) readCache( // It assumes that cacheMu is held by caller in write mode func (uc *urlCache) updateCache( ctx context.Context, - _, _ string, items []models.DriveItemable, - _ map[string]string, - _ map[string]string, - _ map[string]struct{}, - _ map[string]map[string]string, - _ bool, errs *fault.Bus, ) error { el := errs.Local() diff --git a/src/internal/m365/collection/drive/url_cache_test.go b/src/internal/m365/collection/drive/url_cache_test.go index 5b35ddff2..c8e23864f 100644 --- a/src/internal/m365/collection/drive/url_cache_test.go +++ b/src/internal/m365/collection/drive/url_cache_test.go @@ -1,7 +1,6 @@ package drive import ( - "context" "errors" "io" "math/rand" @@ -18,15 +17,19 @@ import ( "github.com/alcionai/corso/src/internal/common/dttm" "github.com/alcionai/corso/src/internal/common/ptr" "github.com/alcionai/corso/src/internal/m365/graph" + "github.com/alcionai/corso/src/internal/m365/service/onedrive/mock" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/services/m365/api" - apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) +// --------------------------------------------------------------------------- +// integration +// --------------------------------------------------------------------------- + type URLCacheIntegrationSuite struct { tester.Suite ac api.Client @@ -68,11 +71,10 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() { // url cache func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { var ( - t = suite.T() - ac = suite.ac.Drives() - driveID = suite.driveID - newFolderName = testdata.DefaultRestoreConfig("folder").Location - driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault()) + t = suite.T() + ac = suite.ac.Drives() + driveID = suite.driveID + newFolderName = testdata.DefaultRestoreConfig("folder").Location ) ctx, flush := tester.NewContext(t) @@ -82,11 +84,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { root, err := ac.GetRootFolder(ctx, driveID) require.NoError(t, err, clues.ToCore(err)) - newFolder, err := ac.Drives().PostItemInContainer( + newFolder, err := ac.PostItemInContainer( ctx, driveID, ptr.Val(root.GetId()), - newItem(newFolderName, true), + api.NewDriveItem(newFolderName, true), control.Copy) require.NoError(t, err, clues.ToCore(err)) @@ -94,33 +96,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { nfid := ptr.Val(newFolder.GetId()) - collectorFunc := func( - context.Context, - string, - string, - []models.DriveItemable, - map[string]string, - map[string]string, - map[string]struct{}, - map[string]map[string]string, - bool, - *fault.Bus, - ) error { - return nil - } - // Get the previous delta to feed into url cache - prevDelta, _, _, err := collectItems( - ctx, - suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()), - suite.driveID, - "drive-name", - collectorFunc, - map[string]string{}, - "", - fault.New(true)) + _, du, err := ac.EnumerateDriveItemsDelta(ctx, suite.driveID, "") require.NoError(t, err, clues.ToCore(err)) - require.NotNil(t, prevDelta.URL) + require.NotEmpty(t, du.URL) // Create a bunch of files in the new folder var items []models.DriveItemable @@ -128,11 +107,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { for i := 0; i < 5; i++ { newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting) - item, err := ac.Drives().PostItemInContainer( + item, err := ac.PostItemInContainer( ctx, driveID, nfid, - newItem(newItemName, false), + api.NewDriveItem(newItemName, false), control.Copy) require.NoError(t, err, clues.ToCore(err)) @@ -142,9 +121,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { // Create a new URL cache with a long TTL uc, err := newURLCache( suite.driveID, - prevDelta.URL, + du.URL, 1*time.Hour, - driveItemPager, + suite.ac.Drives(), fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -195,6 +174,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() { require.Equal(t, 1, uc.deltaQueryCount) } +// --------------------------------------------------------------------------- +// unit +// --------------------------------------------------------------------------- + type URLCacheUnitSuite struct { tester.Suite } @@ -205,27 +188,20 @@ func TestURLCacheUnitSuite(t *testing.T) { func (suite *URLCacheUnitSuite) TestGetItemProperties() { deltaString := "delta" - next := "next" driveID := "drive1" table := []struct { name string - pagerResult map[string][]apiMock.PagerResult[models.DriveItemable] + pagerItems map[string][]models.DriveItemable + pagerErr map[string]error expectedItemProps map[string]itemProps expectedErr require.ErrorAssertionFunc cacheAssert func(*urlCache, time.Time) }{ { name: "single item in cache", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - }, - DeltaLink: &deltaString, - }, - }, + pagerItems: map[string][]models.DriveItemable{ + driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)}, }, expectedItemProps: map[string]itemProps{ "1": { @@ -242,18 +218,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "multiple items in cache", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("3", "file3", "root", "root", "https://dummy3.com", false), - fileItem("4", "file4", "root", "root", "https://dummy4.com", false), - fileItem("5", "file5", "root", "root", "https://dummy5.com", false), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("3", "file3", "root", "root", "https://dummy3.com", false), + fileItem("4", "file4", "root", "root", "https://dummy4.com", false), + fileItem("5", "file5", "root", "root", "https://dummy5.com", false), }, }, expectedItemProps: map[string]itemProps{ @@ -287,18 +258,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "duplicate items with potentially new urls", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("3", "file3", "root", "root", "https://dummy3.com", false), - fileItem("1", "file1", "root", "root", "https://test1.com", false), - fileItem("2", "file2", "root", "root", "https://test2.com", false), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("3", "file3", "root", "root", "https://dummy3.com", false), + fileItem("1", "file1", "root", "root", "https://test1.com", false), + fileItem("2", "file2", "root", "root", "https://test2.com", false), }, }, expectedItemProps: map[string]itemProps{ @@ -324,16 +290,11 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "deleted items", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - fileItem("1", "file1", "root", "root", "https://dummy1.com", true), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + fileItem("2", "file2", "root", "root", "https://dummy2.com", false), + fileItem("1", "file1", "root", "root", "https://dummy1.com", true), }, }, expectedItemProps: map[string]itemProps{ @@ -355,15 +316,8 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, { name: "item not found in cache", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - }, - DeltaLink: &deltaString, - }, - }, + pagerItems: map[string][]models.DriveItemable{ + driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)}, }, expectedItemProps: map[string]itemProps{ "2": {}, @@ -376,23 +330,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { }, }, { - name: "multi-page delta query error", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ - driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - }, - NextLink: &next, - }, - { - Values: []models.DriveItemable{ - fileItem("2", "file2", "root", "root", "https://dummy2.com", false), - }, - DeltaLink: &deltaString, - Err: errors.New("delta query error"), - }, - }, + name: "delta query error", + pagerItems: map[string][]models.DriveItemable{}, + pagerErr: map[string]error{ + driveID: errors.New("delta query error"), }, expectedItemProps: map[string]itemProps{ "1": {}, @@ -408,15 +349,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { { name: "folder item", - pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{ + pagerItems: map[string][]models.DriveItemable{ driveID: { - { - Values: []models.DriveItemable{ - fileItem("1", "file1", "root", "root", "https://dummy1.com", false), - driveItem("2", "folder2", "root", "root", false, true, false), - }, - DeltaLink: &deltaString, - }, + fileItem("1", "file1", "root", "root", "https://dummy1.com", false), + driveItem("2", "folder2", "root", "root", false, true, false), }, }, expectedItemProps: map[string]itemProps{ @@ -437,15 +373,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { ctx, flush := tester.NewContext(t) defer flush() - itemPager := &apiMock.DeltaPager[models.DriveItemable]{ - ToReturn: test.pagerResult[driveID], + medi := mock.EnumeratesDriveItemsDelta{ + Items: test.pagerItems, + Err: test.pagerErr, + DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}}, } cache, err := newURLCache( driveID, "", 1*time.Hour, - itemPager, + &medi, fault.New(true)) require.NoError(suite.T(), err, clues.ToCore(err)) @@ -480,15 +418,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() { // Test needsRefresh func (suite *URLCacheUnitSuite) TestNeedsRefresh() { - driveID := "drive1" - t := suite.T() - refreshInterval := 1 * time.Second + var ( + t = suite.T() + driveID = "drive1" + refreshInterval = 1 * time.Second + ) cache, err := newURLCache( driveID, "", refreshInterval, - &apiMock.DeltaPager[models.DriveItemable]{}, + &mock.EnumeratesDriveItemsDelta{}, fault.New(true)) require.NoError(t, err, clues.ToCore(err)) @@ -510,14 +450,12 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() { require.False(t, cache.needsRefresh()) } -// Test newURLCache func (suite *URLCacheUnitSuite) TestNewURLCache() { - // table driven tests table := []struct { name string driveID string refreshInt time.Duration - itemPager api.DeltaPager[models.DriveItemable] + itemPager EnumerateDriveItemsDeltaer errors *fault.Bus expectedErr require.ErrorAssertionFunc }{ @@ -525,7 +463,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "invalid driveID", driveID: "", refreshInt: 1 * time.Hour, - itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, + itemPager: &mock.EnumeratesDriveItemsDelta{}, errors: fault.New(true), expectedErr: require.Error, }, @@ -533,12 +471,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "invalid refresh interval", driveID: "drive1", refreshInt: 100 * time.Millisecond, - itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, + itemPager: &mock.EnumeratesDriveItemsDelta{}, errors: fault.New(true), expectedErr: require.Error, }, { - name: "invalid itemPager", + name: "invalid item enumerator", driveID: "drive1", refreshInt: 1 * time.Hour, itemPager: nil, @@ -549,7 +487,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() { name: "valid", driveID: "drive1", refreshInt: 1 * time.Hour, - itemPager: &apiMock.DeltaPager[models.DriveItemable]{}, + itemPager: &mock.EnumeratesDriveItemsDelta{}, errors: fault.New(true), expectedErr: require.NoError, }, diff --git a/src/internal/m365/collection/exchange/backup.go b/src/internal/m365/collection/exchange/backup.go index 48193f89b..87db96312 100644 --- a/src/internal/m365/collection/exchange/backup.go +++ b/src/internal/m365/collection/exchange/backup.go @@ -160,7 +160,7 @@ func populateCollections( ictx = clues.Add(ictx, "previous_path", prevPath) - added, _, removed, newDelta, err := bh.itemEnumerator(). + added, validModTimes, removed, newDelta, err := bh.itemEnumerator(). GetAddedAndRemovedItemIDs( ictx, qp.ProtectedResource.ID(), @@ -199,9 +199,7 @@ func populateCollections( bh.itemHandler(), added, removed, - // TODO(ashmrtn): Set to value returned by pager when we have deletion - // markers in files. - false, + validModTimes, statusUpdater) collections[cID] = edc diff --git a/src/internal/m365/collection/exchange/collection.go b/src/internal/m365/collection/exchange/collection.go index 71b9bb01b..74ac0e88f 100644 --- a/src/internal/m365/collection/exchange/collection.go +++ b/src/internal/m365/collection/exchange/collection.go @@ -278,7 +278,7 @@ func (col *prefetchCollection) streamItems( return } - item, err := data.NewPrefetchedItem( + item, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(itemData)), id, details.ItemInfo{Exchange: info}) @@ -403,7 +403,7 @@ func (col *lazyFetchCollection) streamItems( "service", path.ExchangeService.String(), "category", col.Category().String()) - stream <- data.NewLazyItem( + stream <- data.NewLazyItemWithInfo( ictx, &lazyItemGetter{ userID: user, diff --git a/src/internal/m365/collection/exchange/collection_test.go b/src/internal/m365/collection/exchange/collection_test.go index f373bd1a5..c52a9eca0 100644 --- a/src/internal/m365/collection/exchange/collection_test.go +++ b/src/internal/m365/collection/exchange/collection_test.go @@ -56,7 +56,7 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() { suite.Run(test.name, func() { t := suite.T() - ed, err := data.NewPrefetchedItem( + ed, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(test.readData)), "itemID", details.ItemInfo{}) @@ -494,7 +494,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() { ctx, flush := tester.NewContext(t) defer flush() - li := data.NewLazyItem( + li := data.NewLazyItemWithInfo( ctx, nil, "itemID", @@ -552,7 +552,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() { SerializeErr: test.serializeErr, } - li := data.NewLazyItem( + li := data.NewLazyItemWithInfo( ctx, &lazyItemGetter{ userID: "userID", @@ -592,7 +592,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlig getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight} - li := data.NewLazyItem( + li := data.NewLazyItemWithInfo( ctx, &lazyItemGetter{ userID: "userID", @@ -645,7 +645,7 @@ func (suite *CollectionUnitSuite) TestLazyItem() { getter := &mock.ItemGetSerialize{GetData: testData} - li := data.NewLazyItem( + li := data.NewLazyItemWithInfo( ctx, &lazyItemGetter{ userID: "userID", diff --git a/src/internal/m365/collection/groups/backup_test.go b/src/internal/m365/collection/groups/backup_test.go index 899b6ceea..a372922ba 100644 --- a/src/internal/m365/collection/groups/backup_test.go +++ b/src/internal/m365/collection/groups/backup_test.go @@ -2,7 +2,6 @@ package groups import ( "context" - "fmt" "testing" "time" @@ -527,8 +526,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() { require.NotEmpty(t, c.FullPath().Folder(false)) - fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false)) - // TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection // interface. if !assert.Implements(t, (*data.LocationPather)(nil), c) { @@ -537,8 +534,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() { loc := c.(data.LocationPather).LocationPath().String() - fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String()) - require.NotEmpty(t, loc) delete(test.channelNames, loc) diff --git a/src/internal/m365/collection/groups/collection.go b/src/internal/m365/collection/groups/collection.go index 0a1ca7212..c9d0854b6 100644 --- a/src/internal/m365/collection/groups/collection.go +++ b/src/internal/m365/collection/groups/collection.go @@ -176,7 +176,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) { info.ParentPath = col.LocationPath().String() - storeItem, err := data.NewPrefetchedItem( + storeItem, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(itemData)), id, details.ItemInfo{Groups: info}) diff --git a/src/internal/m365/collection/groups/collection_test.go b/src/internal/m365/collection/groups/collection_test.go index e0bf19d19..1f0c17d25 100644 --- a/src/internal/m365/collection/groups/collection_test.go +++ b/src/internal/m365/collection/groups/collection_test.go @@ -49,7 +49,7 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() { suite.Run(test.name, func() { t := suite.T() - ed, err := data.NewPrefetchedItem( + ed, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(test.readData)), "itemID", details.ItemInfo{}) diff --git a/src/internal/m365/collection/site/collection.go b/src/internal/m365/collection/site/collection.go index 8af643d4b..43676b954 100644 --- a/src/internal/m365/collection/site/collection.go +++ b/src/internal/m365/collection/site/collection.go @@ -212,7 +212,7 @@ func (sc *Collection) retrieveLists( metrics.Successes++ - item, err := data.NewPrefetchedItem( + item, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(byteArray)), ptr.Val(lst.GetId()), details.ItemInfo{SharePoint: ListToSPInfo(lst, size)}) @@ -279,7 +279,7 @@ func (sc *Collection) retrievePages( metrics.Bytes += size metrics.Successes++ - item, err := data.NewPrefetchedItem( + item, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(byteArray)), ptr.Val(pg.GetId()), details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)}) diff --git a/src/internal/m365/collection/site/collection_test.go b/src/internal/m365/collection/site/collection_test.go index 5b53513f0..5e0420c63 100644 --- a/src/internal/m365/collection/site/collection_test.go +++ b/src/internal/m365/collection/site/collection_test.go @@ -103,7 +103,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { byteArray, err := ow.GetSerializedContent() require.NoError(t, err, clues.ToCore(err)) - data, err := data.NewPrefetchedItem( + data, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(byteArray)), name, details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) @@ -133,7 +133,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() { page, err := betaAPI.CreatePageFromBytes(byteArray) require.NoError(t, err, clues.ToCore(err)) - data, err := data.NewPrefetchedItem( + data, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(byteArray)), itemName, details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))}) @@ -196,7 +196,7 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() { byteArray, err := service.Serialize(listing) require.NoError(t, err, clues.ToCore(err)) - listData, err := data.NewPrefetchedItem( + listData, err := data.NewPrefetchedItemWithInfo( io.NopCloser(bytes.NewReader(byteArray)), testName, details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))}) diff --git a/src/internal/m365/graph/errors.go b/src/internal/m365/graph/errors.go index 6a758977e..b15ccc417 100644 --- a/src/internal/m365/graph/errors.go +++ b/src/internal/m365/graph/errors.go @@ -124,6 +124,8 @@ var ( ErrTimeout = clues.New("communication timeout") ErrResourceOwnerNotFound = clues.New("resource owner not found in tenant") + + ErrTokenExpired = clues.New("jwt token expired") ) func IsErrApplicationThrottled(err error) bool { @@ -224,7 +226,8 @@ func IsErrUnauthorized(err error) bool { // TODO: refine this investigation. We don't currently know if // a specific item download url expired, or if the full connection // auth expired. - return clues.HasLabel(err, LabelStatus(http.StatusUnauthorized)) + return clues.HasLabel(err, LabelStatus(http.StatusUnauthorized)) || + errors.Is(err, ErrTokenExpired) } func IsErrItemAlreadyExistsConflict(err error) bool { diff --git a/src/internal/m365/graph/errors_test.go b/src/internal/m365/graph/errors_test.go index cd0057fda..7921b2b64 100644 --- a/src/internal/m365/graph/errors_test.go +++ b/src/internal/m365/graph/errors_test.go @@ -478,11 +478,16 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUnauthorized() { expect: assert.False, }, { - name: "as", + name: "graph 401", err: clues.Stack(assert.AnError). Label(LabelStatus(http.StatusUnauthorized)), expect: assert.True, }, + { + name: "token expired", + err: clues.Stack(assert.AnError, ErrTokenExpired), + expect: assert.True, + }, } for _, test := range table { suite.Run(test.name, func() { diff --git a/src/internal/m365/graph/metadata_collection.go b/src/internal/m365/graph/metadata_collection.go index 1c3d1f766..9d9534c1e 100644 --- a/src/internal/m365/graph/metadata_collection.go +++ b/src/internal/m365/graph/metadata_collection.go @@ -57,7 +57,7 @@ func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) { return metadataItem{}, clues.Wrap(err, "serializing metadata") } - item, err := data.NewUnindexedPrefetchedItem( + item, err := data.NewPrefetchedItem( io.NopCloser(buf), mce.fileName, time.Now()) diff --git a/src/internal/m365/graph/metadata_collection_test.go b/src/internal/m365/graph/metadata_collection_test.go index ee9ca6b5c..1e4a087ae 100644 --- a/src/internal/m365/graph/metadata_collection_test.go +++ b/src/internal/m365/graph/metadata_collection_test.go @@ -70,7 +70,7 @@ func (suite *MetadataCollectionUnitSuite) TestItems() { items := []metadataItem{} for i := 0; i < len(itemNames); i++ { - item, err := data.NewUnindexedPrefetchedItem( + item, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(itemData[i])), itemNames[i], time.Time{}) diff --git a/src/internal/m365/service/onedrive/mock/handlers.go b/src/internal/m365/service/onedrive/mock/handlers.go index 5d1b603b2..6678e4c57 100644 --- a/src/internal/m365/service/onedrive/mock/handlers.go +++ b/src/internal/m365/service/onedrive/mock/handlers.go @@ -9,11 +9,13 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/alcionai/corso/src/internal/common/idname" + "github.com/alcionai/corso/src/internal/common/ptr" odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/services/m365/api" + apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock" ) // --------------------------------------------------------------------------- @@ -23,6 +25,8 @@ import ( type BackupHandler struct { ItemInfo details.ItemInfo + DriveItemEnumeration EnumeratesDriveItemsDelta + GI GetsItem GIP GetsItemPermission @@ -56,6 +60,7 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler { OneDrive: &details.OneDriveInfo{}, Extension: &details.ExtensionData{}, }, + DriveItemEnumeration: EnumeratesDriveItemsDelta{}, GI: GetsItem{Err: clues.New("not defined")}, GIP: GetsItemPermission{Err: clues.New("not defined")}, PathPrefixFn: defaultOneDrivePathPrefixer, @@ -125,10 +130,6 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl return h.DrivePagerV } -func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] { - return h.ItemPagerV[driveID] -} - func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string { return "/" + pb.String() } @@ -159,6 +160,13 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R return h.GetResps[c], h.GetErrs[c] } +func (h BackupHandler) EnumerateDriveItemsDelta( + ctx context.Context, + driveID, prevDeltaLink string, +) ([]models.DriveItemable, api.DeltaUpdate, error) { + return h.DriveItemEnumeration.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink) +} + func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) { return h.GI.GetItem(ctx, "", "") } @@ -261,6 +269,65 @@ func (m GetsItem) GetItem( return m.Item, m.Err } +// --------------------------------------------------------------------------- +// Enumerates Drive Items +// --------------------------------------------------------------------------- + +type EnumeratesDriveItemsDelta struct { + Items map[string][]models.DriveItemable + DeltaUpdate map[string]api.DeltaUpdate + Err map[string]error +} + +func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta( + _ context.Context, + driveID, _ string, +) ( + []models.DriveItemable, + api.DeltaUpdate, + error, +) { + return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID] +} + +func PagerResultToEDID( + m map[string][]apiMock.PagerResult[models.DriveItemable], +) EnumeratesDriveItemsDelta { + edi := EnumeratesDriveItemsDelta{ + Items: map[string][]models.DriveItemable{}, + DeltaUpdate: map[string]api.DeltaUpdate{}, + Err: map[string]error{}, + } + + for driveID, results := range m { + var ( + err error + items = []models.DriveItemable{} + deltaUpdate api.DeltaUpdate + ) + + for _, pr := range results { + items = append(items, pr.Values...) + + if pr.DeltaLink != nil { + deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)} + } + + if pr.Err != nil { + err = pr.Err + } + + deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta + } + + edi.Items[driveID] = items + edi.Err[driveID] = err + edi.DeltaUpdate[driveID] = deltaUpdate + } + + return edi +} + // --------------------------------------------------------------------------- // Get Item Permissioner // --------------------------------------------------------------------------- diff --git a/src/internal/m365/service/sharepoint/api/pages_test.go b/src/internal/m365/service/sharepoint/api/pages_test.go index 792e3eda0..0de4e3a4e 100644 --- a/src/internal/m365/service/sharepoint/api/pages_test.go +++ b/src/internal/m365/service/sharepoint/api/pages_test.go @@ -109,7 +109,7 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() { //nolint:lll byteArray := spMock.Page("Byte Test") - pageData, err := data.NewUnindexedPrefetchedItem( + pageData, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(byteArray)), testName, time.Now()) diff --git a/src/internal/m365/service/sharepoint/backup_test.go b/src/internal/m365/service/sharepoint/backup_test.go index cfed30567..6edcfd067 100644 --- a/src/internal/m365/service/sharepoint/backup_test.go +++ b/src/internal/m365/service/sharepoint/backup_test.go @@ -91,12 +91,9 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { var ( paths = map[string]string{} - newPaths = map[string]string{} + currPaths = map[string]string{} excluded = map[string]struct{}{} - itemColls = map[string]map[string]string{ - driveID: {}, - } - collMap = map[string]map[string]*drive.Collection{ + collMap = map[string]map[string]*drive.Collection{ driveID: {}, } ) @@ -110,15 +107,14 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() { c.CollectionMap = collMap - err := c.UpdateCollections( + _, err := c.UpdateCollections( ctx, driveID, "General", test.items, paths, - newPaths, + currPaths, excluded, - itemColls, true, fault.New(true)) diff --git a/src/internal/operations/helpers.go b/src/internal/operations/helpers.go index cdce0fdec..06e457909 100644 --- a/src/internal/operations/helpers.go +++ b/src/internal/operations/helpers.go @@ -48,9 +48,9 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) { } var ( - log = logger.Ctx(ctx) - pfxMsg = prefix + ":" - li, ls, lr = len(fe.Items), len(fe.Skipped), len(fe.Recovered) + log = logger.Ctx(ctx) + pfxMsg = prefix + ":" + li, ls, lr, la = len(fe.Items), len(fe.Skipped), len(fe.Recovered), len(fe.Alerts) ) if fe.Failure == nil && li+ls+lr == 0 { @@ -73,4 +73,8 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) { for i, err := range fe.Recovered { log.With("recovered_error", err).Errorf("%s recoverable error %d of %d: %s", pfxMsg, i+1, lr, err.Msg) } + + for i, alert := range fe.Alerts { + log.With("alert", alert).Infof("%s alert %d of %d: %s", pfxMsg, i+1, la, alert.Message) + } } diff --git a/src/internal/operations/test/exchange_test.go b/src/internal/operations/test/exchange_test.go index 38f875f3a..fedcc5e89 100644 --- a/src/internal/operations/test/exchange_test.go +++ b/src/internal/operations/test/exchange_test.go @@ -76,28 +76,28 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() { category path.CategoryType metadataFiles [][]string }{ - // { - // name: "Mail", - // selector: func() *selectors.ExchangeBackup { - // sel := selectors.NewExchangeBackup([]string{suite.its.user.ID}) - // sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch())) - // sel.DiscreteOwner = suite.its.user.ID + { + name: "Mail", + selector: func() *selectors.ExchangeBackup { + sel := selectors.NewExchangeBackup([]string{suite.its.user.ID}) + sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch())) + sel.DiscreteOwner = suite.its.user.ID - // return sel - // }, - // category: path.EmailCategory, - // metadataFiles: exchange.MetadataFileNames(path.EmailCategory), - // }, - // { - // name: "Contacts", - // selector: func() *selectors.ExchangeBackup { - // sel := selectors.NewExchangeBackup([]string{suite.its.user.ID}) - // sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch())) - // return sel - // }, - // category: path.ContactsCategory, - // metadataFiles: exchange.MetadataFileNames(path.ContactsCategory), - // }, + return sel + }, + category: path.EmailCategory, + metadataFiles: MetadataFileNames(path.EmailCategory), + }, + { + name: "Contacts", + selector: func() *selectors.ExchangeBackup { + sel := selectors.NewExchangeBackup([]string{suite.its.user.ID}) + sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch())) + return sel + }, + category: path.ContactsCategory, + metadataFiles: MetadataFileNames(path.ContactsCategory), + }, { name: "Calendar Events", selector: func() *selectors.ExchangeBackup { diff --git a/src/internal/operations/test/group_test.go b/src/internal/operations/test/group_test.go index 770267e68..9f60a2274 100644 --- a/src/internal/operations/test/group_test.go +++ b/src/internal/operations/test/group_test.go @@ -226,18 +226,18 @@ func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsWithAdvancedOp suite.its.group.RootSite.DriveRootFolderID) } -func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsAlternateProtectedResource() { - sel := selectors.NewGroupsBackup([]string{suite.its.group.ID}) - sel.Include(selTD.GroupsBackupLibraryFolderScope(sel)) - sel.Filter(sel.Library("documents")) - sel.DiscreteOwner = suite.its.group.ID +// func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsAlternateProtectedResource() { +// sel := selectors.NewGroupsBackup([]string{suite.its.group.ID}) +// sel.Include(selTD.GroupsBackupLibraryFolderScope(sel)) +// sel.Filter(sel.Library("documents")) +// sel.DiscreteOwner = suite.its.group.ID - runDriveRestoreToAlternateProtectedResource( - suite.T(), - suite, - suite.its.ac, - sel.Selector, - suite.its.group.RootSite, - suite.its.secondaryGroup.RootSite, - suite.its.secondaryGroup.ID) -} +// runDriveRestoreToAlternateProtectedResource( +// suite.T(), +// suite, +// suite.its.ac, +// sel.Selector, +// suite.its.group.RootSite, +// suite.its.secondaryGroup.RootSite, +// suite.its.secondaryGroup.ID) +// } diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index 9246a9325..5e704f9ff 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -182,7 +182,7 @@ func collect( return nil, clues.Wrap(err, "marshalling body").WithClues(ctx) } - item, err := data.NewUnindexedPrefetchedItem( + item, err := data.NewPrefetchedItem( io.NopCloser(bytes.NewReader(bs)), col.itemName, time.Now()) diff --git a/src/pkg/fault/alert.go b/src/pkg/fault/alert.go new file mode 100644 index 000000000..5d4c97cea --- /dev/null +++ b/src/pkg/fault/alert.go @@ -0,0 +1,70 @@ +package fault + +import ( + "github.com/alcionai/corso/src/cli/print" +) + +var _ print.Printable = &Alert{} + +// Alerts are informational-only notifications. The purpose of alerts is to +// provide a means of end-user communication about important events without +// needing to generate runtime failures or recoverable errors. When generating +// an alert, no other fault feature (failure, recoverable, skip, etc) should +// be in use. IE: Errors do not also get alerts, since the error itself is a +// form of end-user communication already. +type Alert struct { + Item Item `json:"item"` + Message string `json:"message"` +} + +// String complies with the stringer interface. +func (a *Alert) String() string { + msg := "" + + if a != nil { + msg = a.Message + } + + if len(msg) == 0 { + msg = "" + } + + return "Alert: " + msg +} + +func (a Alert) MinimumPrintable() any { + return a +} + +// Headers returns the human-readable names of properties of a skipped Item +// for printing out to a terminal. +func (a Alert) Headers() []string { + return []string{"Action", "Message", "Container", "Name", "ID"} +} + +// Values populates the printable values matching the Headers list. +func (a Alert) Values() []string { + var cn string + + acn, ok := a.Item.Additional[AddtlContainerName] + if ok { + str, ok := acn.(string) + if ok { + cn = str + } + } + + return []string{"Alert", a.Message, cn, a.Item.Name, a.Item.ID} +} + +func NewAlert(message, namespace, itemID, name string, addtl map[string]any) *Alert { + return &Alert{ + Message: message, + Item: Item{ + Namespace: namespace, + ID: itemID, + Name: name, + Additional: addtl, + }, + } +} diff --git a/src/pkg/fault/alert_test.go b/src/pkg/fault/alert_test.go new file mode 100644 index 000000000..c45ec2e70 --- /dev/null +++ b/src/pkg/fault/alert_test.go @@ -0,0 +1,88 @@ +package fault_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/fault" +) + +type AlertUnitSuite struct { + tester.Suite +} + +func TestAlertUnitSuite(t *testing.T) { + suite.Run(t, &AlertUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *AlertUnitSuite) TestAlert_String() { + var ( + t = suite.T() + a fault.Alert + ) + + assert.Contains(t, a.String(), "Alert: ") + + a = fault.Alert{ + Item: fault.Item{}, + Message: "", + } + assert.Contains(t, a.String(), "Alert: ") + + a = fault.Alert{ + Item: fault.Item{ + ID: "item_id", + }, + Message: "msg", + } + assert.NotContains(t, a.String(), "item_id") + assert.Contains(t, a.String(), "Alert: msg") +} + +func (suite *AlertUnitSuite) TestNewAlert() { + t := suite.T() + addtl := map[string]any{"foo": "bar"} + a := fault.NewAlert("message-to-show", "ns", "item_id", "item_name", addtl) + + expect := fault.Alert{ + Item: fault.Item{ + Namespace: "ns", + ID: "item_id", + Name: "item_name", + Additional: addtl, + }, + Message: "message-to-show", + } + + assert.Equal(t, expect, *a) +} + +func (suite *AlertUnitSuite) TestAlert_HeadersValues() { + addtl := map[string]any{ + fault.AddtlContainerID: "cid", + fault.AddtlContainerName: "cname", + } + + table := []struct { + name string + alert *fault.Alert + expect []string + }{ + { + name: "new alert", + alert: fault.NewAlert("message-to-show", "ns", "id", "name", addtl), + expect: []string{"Alert", "message-to-show", "cname", "name", "id"}, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + assert.Equal(t, []string{"Action", "Message", "Container", "Name", "ID"}, test.alert.Headers()) + assert.Equal(t, test.expect, test.alert.Values()) + }) + } +} diff --git a/src/pkg/fault/example_fault_test.go b/src/pkg/fault/example_fault_test.go index e272655b6..90c93e083 100644 --- a/src/pkg/fault/example_fault_test.go +++ b/src/pkg/fault/example_fault_test.go @@ -441,3 +441,28 @@ func ExampleBus_AddSkip() { // Output: skipped processing file: malware_detected } + +// ExampleBus_AddAlert showcases when to use AddAlert. +func ExampleBus_AddAlert() { + errs := fault.New(false) + + // Some events should be communicated to the end user without recording an + // error to the operation. Logs aren't sufficient because we don't promote + // log messages to the terminal. But errors and skips are too heavy and hacky + // to use. In these cases, we can create informational Alerts. + // + // Only the message gets shown to the user. But since we're persisting this + // data along with the backup details and other fault info, we have the option + // of packing any other contextual data that we want. + errs.AddAlert(ctx, fault.NewAlert( + "something important happened!", + "deduplication-namespace", + "file-id", + "file-name", + map[string]any{"foo": "bar"})) + + // later on, after processing, end users can scrutinize the alerts. + fmt.Println(errs.Alerts()[0].String()) + + // Alert: something important happened! +} diff --git a/src/pkg/fault/fault.go b/src/pkg/fault/fault.go index 488656fa4..e6ea1bcd9 100644 --- a/src/pkg/fault/fault.go +++ b/src/pkg/fault/fault.go @@ -15,11 +15,24 @@ import ( "github.com/alcionai/corso/src/pkg/logger" ) +// temporary hack identifier +// see: https://github.com/alcionai/corso/pull/2510#discussion_r1113532530 +// TODO: https://github.com/alcionai/corso/issues/4003 +const LabelForceNoBackupCreation = "label_forces_no_backup_creations" + type Bus struct { mu *sync.Mutex + // When creating a local bus, the parent property retains a pointer + // to the root Bus. Even in the case of multiple chained creations of + // local busses, the parent reference remains the original root bus, + // and does not create a linked list of lineage. Any errors and failures + // created by a local instance will get fielded to the parent. But only + // local errors will returned by property getter funcs. + parent *Bus + // Failure probably identifies errors that were added to the bus - // or localBus via AddRecoverable, but which were promoted + // or a local Bus via AddRecoverable, but which were promoted // to the failure position due to failFast=true configuration. // Alternatively, the process controller might have set failure // by calling Fail(err). @@ -36,6 +49,12 @@ type Bus struct { // inability to process an item, due to a well-known cause. skipped []Skipped + // alerts contain purely informational messages and data. They + // represent situations where the end user should be aware of some + // occurrence that is not an error, exception, skipped data, or + // other runtime/persistence impacting issue. + alerts []Alert + // if failFast is true, the first errs addition will // get promoted to the err value. This signifies a // non-recoverable processing state, causing any running @@ -52,62 +71,61 @@ func New(failFast bool) *Bus { } } +// Local constructs a new bus with a local reference to handle error aggregation +// in a constrained scope. This allows the caller to review recoverable errors and +// failures within only the current codespace, as opposed to the global set of errors. +// The function that spawned the local bus should always return `bus.Failure()` to +// ensure that hard failures are propagated back upstream. +func (e *Bus) Local() *Bus { + parent := e.parent + + // only use e if it is already the root instance + if parent == nil { + parent = e + } + + return &Bus{ + mu: &sync.Mutex{}, + parent: parent, + failFast: parent.failFast, + } +} + // FailFast returns the failFast flag in the bus. func (e *Bus) FailFast() bool { return e.failFast } -// Failure returns the primary error. If not nil, this -// indicates the operation exited prior to completion. -func (e *Bus) Failure() error { - return e.failure -} - -// Recovered returns the slice of errors that occurred in -// recoverable points of processing. This is often during -// iteration where a single failure (ex: retrieving an item), -// doesn't require the entire process to end. -func (e *Bus) Recovered() []error { - return slices.Clone(e.recoverable) -} - -// Skipped returns the slice of items that were permanently -// skipped during processing. -func (e *Bus) Skipped() []Skipped { - return slices.Clone(e.skipped) -} - // Fail sets the non-recoverable error (ie: bus.failure) // in the bus. If a failure error is already present, // the error gets added to the recoverable slice for // purposes of tracking. -// -// TODO: Return Data, not Bus. The consumers of a failure -// should care about the state of data, not the communication -// pattern. func (e *Bus) Fail(err error) *Bus { if err == nil { return e } - e.mu.Lock() - defer e.mu.Unlock() - return e.setFailure(err) } // setErr handles setting bus.failure. Sync locking gets // handled upstream of this call. func (e *Bus) setFailure(err error) *Bus { + e.mu.Lock() + defer e.mu.Unlock() + if e.failure == nil { e.failure = err - return e + } else { + // technically not a recoverable error: we're using the + // recoverable slice as an overflow container here to + // ensure everything is tracked. + e.recoverable = append(e.recoverable, err) } - // technically not a recoverable error: we're using the - // recoverable slice as an overflow container here to - // ensure everything is tracked. - e.recoverable = append(e.recoverable, err) + if e.parent != nil { + e.parent.setFailure(err) + } return e } @@ -116,17 +134,11 @@ func (e *Bus) setFailure(err error) *Bus { // errors (ie: bus.recoverable). If failFast is true, the first // added error will get copied to bus.failure, causing the bus // to identify as non-recoverably failed. -// -// TODO: nil return, not Bus, since we don't want people to return -// from errors.AddRecoverable(). func (e *Bus) AddRecoverable(ctx context.Context, err error) { if err == nil { return } - e.mu.Lock() - defer e.mu.Unlock() - e.logAndAddRecoverable(ctx, err, 1) } @@ -147,19 +159,77 @@ func (e *Bus) logAndAddRecoverable(ctx context.Context, err error, skip int) { // gets handled upstream of this call. Returns true if the // error is a failure, false otherwise. func (e *Bus) addRecoverableErr(err error) bool { + e.mu.Lock() + defer e.mu.Unlock() + var isFail bool if e.failure == nil && e.failFast { - e.setFailure(err) + if e.failure == nil { + e.failure = err + } else { + // technically not a recoverable error: we're using the + // recoverable slice as an overflow container here to + // ensure everything is tracked. + e.recoverable = append(e.recoverable, err) + } + + if e.parent != nil { + e.parent.setFailure(err) + } isFail = true } e.recoverable = append(e.recoverable, err) + // local bus instances must promote errors to the root bus. + if e.parent != nil { + e.parent.addRecoverableErr(err) + } + return isFail } +// --------------------------------------------------------------------------- +// Non-error adders +// --------------------------------------------------------------------------- + +// AddAlert appends a record of an Alert message to the fault bus. +// Importantly, alerts are not errors, exceptions, or skipped items. +// An alert should only be generated if no other fault functionality +// is in use, but that we still want the end user to clearly and +// plainly receive a notification about a runtime event. +func (e *Bus) AddAlert(ctx context.Context, a *Alert) { + if a == nil { + return + } + + e.logAndAddAlert(ctx, a, 1) +} + +// logs the error and adds an alert. +func (e *Bus) logAndAddAlert(ctx context.Context, a *Alert, trace int) { + logger.CtxStack(ctx, trace+1). + With("alert", a). + Info("alert: " + a.Message) + e.addAlert(a) +} + +func (e *Bus) addAlert(a *Alert) *Bus { + e.mu.Lock() + defer e.mu.Unlock() + + e.alerts = append(e.alerts, *a) + + // local bus instances must promote alerts to the root bus. + if e.parent != nil { + e.parent.addAlert(a) + } + + return e +} + // AddSkip appends a record of a Skipped item to the fault bus. // Importantly, skipped items are not the same as recoverable // errors. An item should only be skipped under the following @@ -175,25 +245,35 @@ func (e *Bus) AddSkip(ctx context.Context, s *Skipped) { return } - e.mu.Lock() - defer e.mu.Unlock() - e.logAndAddSkip(ctx, s, 1) } // logs the error and adds a skipped item. -func (e *Bus) logAndAddSkip(ctx context.Context, s *Skipped, skip int) { - logger.CtxStack(ctx, skip+1). +func (e *Bus) logAndAddSkip(ctx context.Context, s *Skipped, trace int) { + logger.CtxStack(ctx, trace+1). With("skipped", s). - Info("recoverable error") + Info("skipped an item") e.addSkip(s) } func (e *Bus) addSkip(s *Skipped) *Bus { + e.mu.Lock() + defer e.mu.Unlock() + e.skipped = append(e.skipped, *s) + + // local bus instances must promote skipped items to the root bus. + if e.parent != nil { + e.parent.addSkip(s) + } + return e } +// --------------------------------------------------------------------------- +// Results +// --------------------------------------------------------------------------- + // Errors returns the plain record of errors that were aggregated // within a fult Bus. func (e *Bus) Errors() *Errors { @@ -204,10 +284,44 @@ func (e *Bus) Errors() *Errors { Recovered: nonItems, Items: items, Skipped: slices.Clone(e.skipped), + Alerts: slices.Clone(e.alerts), FailFast: e.failFast, } } +// Failure returns the primary error. If not nil, this +// indicates the operation exited prior to completion. +// If the bus is a local instance, this only returns the +// local failure, and will not return parent data. +func (e *Bus) Failure() error { + return e.failure +} + +// Recovered returns the slice of errors that occurred in +// recoverable points of processing. This is often during +// iteration where a single failure (ex: retrieving an item), +// doesn't require the entire process to end. +// If the bus is a local instance, this only returns the +// local recovered errors, and will not return parent data. +func (e *Bus) Recovered() []error { + return slices.Clone(e.recoverable) +} + +// Skipped returns the slice of items that were permanently +// skipped during processing. +// If the bus is a local instance, this only returns the +// local skipped items, and will not return parent data. +func (e *Bus) Skipped() []Skipped { + return slices.Clone(e.skipped) +} + +// Alerts returns the slice of alerts generated during runtime. +// If the bus is a local alerts, this only returns the +// local failure, and will not return parent data. +func (e *Bus) Alerts() []Alert { + return slices.Clone(e.alerts) +} + // ItemsAndRecovered returns the items that failed along with other // recoverable errors func (e *Bus) ItemsAndRecovered() ([]Item, []error) { @@ -234,10 +348,6 @@ func (e *Bus) ItemsAndRecovered() ([]Item, []error) { return maps.Values(is), non } -// --------------------------------------------------------------------------- -// Errors Data -// --------------------------------------------------------------------------- - // Errors provides the errors data alone, without sync controls // or adders/setters. Expected to get called at the end of processing, // as a way to aggregate results. @@ -265,6 +375,12 @@ type Errors struct { // inability to process an item, due to a well-known cause. Skipped []Skipped `json:"skipped"` + // Alerts contain purely informational messages and data. They + // represent situations where the end user should be aware of some + // occurrence that is not an error, exception, skipped data, or + // other runtime/persistence impacting issue. + Alerts []Alert + // If FailFast is true, then the first Recoverable error will // promote to the Failure spot, causing processing to exit. FailFast bool `json:"failFast"` @@ -313,16 +429,29 @@ func UnmarshalErrorsTo(e *Errors) func(io.ReadCloser) error { } } +// --------------------------------------------------------------------------- +// Print compatibility +// --------------------------------------------------------------------------- + // Print writes the DetailModel Entries to StdOut, in the format // requested by the caller. -func (e *Errors) PrintItems(ctx context.Context, ignoreErrors, ignoreSkips, ignoreRecovered bool) { - if len(e.Items)+len(e.Skipped)+len(e.Recovered) == 0 || - ignoreErrors && ignoreSkips && ignoreRecovered { +func (e *Errors) PrintItems( + ctx context.Context, + ignoreAlerts, ignoreErrors, ignoreSkips, ignoreRecovered bool, +) { + if len(e.Alerts)+len(e.Items)+len(e.Skipped)+len(e.Recovered) == 0 || + (ignoreAlerts && ignoreErrors && ignoreSkips && ignoreRecovered) { return } sl := make([]print.Printable, 0) + if !ignoreAlerts { + for _, a := range e.Alerts { + sl = append(sl, print.Printable(a)) + } + } + if !ignoreSkips { for _, s := range e.Skipped { sl = append(sl, print.Printable(s)) @@ -374,73 +503,3 @@ func (pec printableErrCore) Values() []string { return []string{pec.Msg} } - -// --------------------------------------------------------------------------- -// Local aggregator -// --------------------------------------------------------------------------- - -// Local constructs a new local bus to handle error aggregation in a -// constrained scope. Local busses shouldn't be passed down to other -// funcs, and the function that spawned the local bus should always -// return `local.Failure()` to ensure that hard failures are propagated -// back upstream. -func (e *Bus) Local() *localBus { - return &localBus{ - mu: &sync.Mutex{}, - bus: e, - } -} - -type localBus struct { - mu *sync.Mutex - bus *Bus - current error -} - -func (e *localBus) AddRecoverable(ctx context.Context, err error) { - if err == nil { - return - } - - e.mu.Lock() - defer e.mu.Unlock() - - if e.current == nil && e.bus.failFast { - e.current = err - } - - e.bus.logAndAddRecoverable(ctx, err, 1) -} - -// AddSkip appends a record of a Skipped item to the local bus. -// Importantly, skipped items are not the same as recoverable -// errors. An item should only be skipped under the following -// conditions. All other cases should be handled as errors. -// 1. The conditions for skipping the item are well-known and -// well-documented. End users need to be able to understand -// both the conditions and identifications of skips. -// 2. Skipping avoids a permanent and consistent failure. If -// the underlying reason is transient or otherwise recoverable, -// the item should not be skipped. -func (e *localBus) AddSkip(ctx context.Context, s *Skipped) { - if s == nil { - return - } - - e.mu.Lock() - defer e.mu.Unlock() - - e.bus.logAndAddSkip(ctx, s, 1) -} - -// Failure returns the failure that happened within the local bus. -// It does not return the underlying bus.Failure(), only the failure -// that was recorded within the local bus instance. This error should -// get returned by any func which created a local bus. -func (e *localBus) Failure() error { - return e.current -} - -// temporary hack identifier -// see: https://github.com/alcionai/corso/pull/2510#discussion_r1113532530 -const LabelForceNoBackupCreation = "label_forces_no_backup_creations" diff --git a/src/pkg/fault/fault_test.go b/src/pkg/fault/fault_test.go index c4166456b..d7fd79f28 100644 --- a/src/pkg/fault/fault_test.go +++ b/src/pkg/fault/fault_test.go @@ -189,25 +189,6 @@ func (suite *FaultErrorsUnitSuite) TestAdd() { assert.Len(t, n.Recovered(), 2) } -func (suite *FaultErrorsUnitSuite) TestAddSkip() { - t := suite.T() - - ctx, flush := tester.NewContext(t) - defer flush() - - n := fault.New(true) - require.NotNil(t, n) - - n.Fail(assert.AnError) - assert.Len(t, n.Skipped(), 0) - - n.AddRecoverable(ctx, assert.AnError) - assert.Len(t, n.Skipped(), 0) - - n.AddSkip(ctx, fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil)) - assert.Len(t, n.Skipped(), 1) -} - func (suite *FaultErrorsUnitSuite) TestErrors() { t := suite.T() diff --git a/src/pkg/fault/item.go b/src/pkg/fault/item.go index 166a914a7..e43070ebe 100644 --- a/src/pkg/fault/item.go +++ b/src/pkg/fault/item.go @@ -11,15 +11,15 @@ const ( AddtlMalwareDesc = "malware_description" ) -type itemType string +type ItemType string const ( - FileType itemType = "file" - ContainerType itemType = "container" - ResourceOwnerType itemType = "resource_owner" + FileType ItemType = "file" + ContainerType ItemType = "container" + ResourceOwnerType ItemType = "resource_owner" ) -func (it itemType) Printable() string { +func (it ItemType) Printable() string { switch it { case FileType: return "File" @@ -62,7 +62,7 @@ type Item struct { Name string `json:"name"` // tracks the type of item represented by this entry. - Type itemType `json:"type"` + Type ItemType `json:"type"` // Error() of the causal error, or a sentinel if this is the // source of the error. In case of ID collisions, the first @@ -138,7 +138,7 @@ func OwnerErr(cause error, namespace, id, name string, addtl map[string]any) *It } // itemErr produces a Item of the provided type for tracking erroneous items. -func itemErr(t itemType, cause error, namespace, id, name string, addtl map[string]any) *Item { +func itemErr(t ItemType, cause error, namespace, id, name string, addtl map[string]any) *Item { return &Item{ Namespace: namespace, ID: id, @@ -148,119 +148,3 @@ func itemErr(t itemType, cause error, namespace, id, name string, addtl map[stri Additional: addtl, } } - -// --------------------------------------------------------------------------- -// Skipped Items -// --------------------------------------------------------------------------- - -// skipCause identifies the well-known conditions to Skip an item. It is -// important that skip cause enumerations do not overlap with general error -// handling. Skips must be well known, well documented, and consistent. -// Transient failures, undocumented or unknown conditions, and arbitrary -// handling should never produce a skipped item. Those cases should get -// handled as normal errors. -type skipCause string - -const ( - // SkipMalware identifies a malware detection case. Files that graph - // api identifies as malware cannot be downloaded or uploaded, and will - // permanently fail any attempts to backup or restore. - SkipMalware skipCause = "malware_detected" - - // SkipBigOneNote identifies that a file was skipped because it - // was big OneNote file and we can only download OneNote files which - // are less that 2GB in size. - //nolint:lll - // https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa#onenotenotebooks - SkipBigOneNote skipCause = "big_one_note_file" -) - -var _ print.Printable = &Skipped{} - -// Skipped items are permanently unprocessable due to well-known conditions. -// In order to skip an item, the following conditions should be met: -// 1. The conditions for skipping the item are well-known and -// well-documented. End users need to be able to understand -// both the conditions and identifications of skips. -// 2. Skipping avoids a permanent and consistent failure. If -// the underlying reason is transient or otherwise recoverable, -// the item should not be skipped. -// -// Skipped wraps Item primarily to minimize confusion when sharing the -// fault interface. Skipped items are not errors, and Item{} errors are -// not the basis for a Skip. -type Skipped struct { - Item Item `json:"item"` -} - -// String complies with the stringer interface. -func (s *Skipped) String() string { - if s == nil { - return "" - } - - return "skipped " + s.Item.Error() + ": " + s.Item.Cause -} - -// HasCause compares the underlying cause against the parameter. -func (s *Skipped) HasCause(c skipCause) bool { - if s == nil { - return false - } - - return s.Item.Cause == string(c) -} - -func (s Skipped) MinimumPrintable() any { - return s -} - -// Headers returns the human-readable names of properties of a skipped Item -// for printing out to a terminal. -func (s Skipped) Headers() []string { - return []string{"Action", "Type", "Name", "Container", "Cause"} -} - -// Values populates the printable values matching the Headers list. -func (s Skipped) Values() []string { - var cn string - - acn, ok := s.Item.Additional[AddtlContainerName] - if ok { - str, ok := acn.(string) - if ok { - cn = str - } - } - - return []string{"Skip", s.Item.Type.Printable(), s.Item.Name, cn, s.Item.Cause} -} - -// ContainerSkip produces a Container-kind Item for tracking skipped items. -func ContainerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { - return itemSkip(ContainerType, cause, namespace, id, name, addtl) -} - -// FileSkip produces a File-kind Item for tracking skipped items. -func FileSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { - return itemSkip(FileType, cause, namespace, id, name, addtl) -} - -// OnwerSkip produces a ResourceOwner-kind Item for tracking skipped items. -func OwnerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { - return itemSkip(ResourceOwnerType, cause, namespace, id, name, addtl) -} - -// itemSkip produces a Item of the provided type for tracking skipped items. -func itemSkip(t itemType, cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { - return &Skipped{ - Item: Item{ - Namespace: namespace, - ID: id, - Name: name, - Type: t, - Cause: string(cause), - Additional: addtl, - }, - } -} diff --git a/src/pkg/fault/item_test.go b/src/pkg/fault/item_test.go index b597121ee..bdb2ca482 100644 --- a/src/pkg/fault/item_test.go +++ b/src/pkg/fault/item_test.go @@ -1,4 +1,4 @@ -package fault +package fault_test import ( "testing" @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/fault" ) type ItemUnitSuite struct { @@ -21,28 +22,28 @@ func TestItemUnitSuite(t *testing.T) { func (suite *ItemUnitSuite) TestItem_Error() { var ( t = suite.T() - i *Item + i *fault.Item ) assert.Contains(t, i.Error(), "nil") - i = &Item{} + i = &fault.Item{} assert.Contains(t, i.Error(), "unknown type") - i = &Item{Type: FileType} - assert.Contains(t, i.Error(), FileType) + i = &fault.Item{Type: fault.FileType} + assert.Contains(t, i.Error(), fault.FileType) } func (suite *ItemUnitSuite) TestContainerErr() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := ContainerErr(clues.New("foo"), "ns", "id", "name", addtl) + i := fault.ContainerErr(clues.New("foo"), "ns", "id", "name", addtl) - expect := Item{ + expect := fault.Item{ Namespace: "ns", ID: "id", Name: "name", - Type: ContainerType, + Type: fault.ContainerType, Cause: "foo", Additional: addtl, } @@ -53,13 +54,13 @@ func (suite *ItemUnitSuite) TestContainerErr() { func (suite *ItemUnitSuite) TestFileErr() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := FileErr(clues.New("foo"), "ns", "id", "name", addtl) + i := fault.FileErr(clues.New("foo"), "ns", "id", "name", addtl) - expect := Item{ + expect := fault.Item{ Namespace: "ns", ID: "id", Name: "name", - Type: FileType, + Type: fault.FileType, Cause: "foo", Additional: addtl, } @@ -70,13 +71,13 @@ func (suite *ItemUnitSuite) TestFileErr() { func (suite *ItemUnitSuite) TestOwnerErr() { t := suite.T() addtl := map[string]any{"foo": "bar"} - i := OwnerErr(clues.New("foo"), "ns", "id", "name", addtl) + i := fault.OwnerErr(clues.New("foo"), "ns", "id", "name", addtl) - expect := Item{ + expect := fault.Item{ Namespace: "ns", ID: "id", Name: "name", - Type: ResourceOwnerType, + Type: fault.ResourceOwnerType, Cause: "foo", Additional: addtl, } @@ -86,23 +87,23 @@ func (suite *ItemUnitSuite) TestOwnerErr() { func (suite *ItemUnitSuite) TestItemType_Printable() { table := []struct { - t itemType + t fault.ItemType expect string }{ { - t: FileType, + t: fault.FileType, expect: "File", }, { - t: ContainerType, + t: fault.ContainerType, expect: "Container", }, { - t: ResourceOwnerType, + t: fault.ResourceOwnerType, expect: "Resource Owner", }, { - t: itemType("foo"), + t: fault.ItemType("foo"), expect: "Unknown", }, } @@ -118,30 +119,30 @@ func (suite *ItemUnitSuite) TestItem_HeadersValues() { err = assert.AnError cause = err.Error() addtl = map[string]any{ - AddtlContainerID: "cid", - AddtlContainerName: "cname", + fault.AddtlContainerID: "cid", + fault.AddtlContainerName: "cname", } ) table := []struct { name string - item *Item + item *fault.Item expect []string }{ { name: "file", - item: FileErr(assert.AnError, "ns", "id", "name", addtl), - expect: []string{"Error", FileType.Printable(), "name", "cname", cause}, + item: fault.FileErr(assert.AnError, "ns", "id", "name", addtl), + expect: []string{"Error", fault.FileType.Printable(), "name", "cname", cause}, }, { name: "container", - item: ContainerErr(assert.AnError, "ns", "id", "name", addtl), - expect: []string{"Error", ContainerType.Printable(), "name", "cname", cause}, + item: fault.ContainerErr(assert.AnError, "ns", "id", "name", addtl), + expect: []string{"Error", fault.ContainerType.Printable(), "name", "cname", cause}, }, { name: "owner", - item: OwnerErr(assert.AnError, "ns", "id", "name", nil), - expect: []string{"Error", ResourceOwnerType.Printable(), "name", "", cause}, + item: fault.OwnerErr(assert.AnError, "ns", "id", "name", nil), + expect: []string{"Error", fault.ResourceOwnerType.Printable(), "name", "", cause}, }, } for _, test := range table { @@ -153,106 +154,3 @@ func (suite *ItemUnitSuite) TestItem_HeadersValues() { }) } } - -func (suite *ItemUnitSuite) TestSkipped_String() { - var ( - t = suite.T() - i *Skipped - ) - - assert.Contains(t, i.String(), "nil") - - i = &Skipped{Item{}} - assert.Contains(t, i.String(), "unknown type") - - i = &Skipped{Item{Type: FileType}} - assert.Contains(t, i.Item.Error(), FileType) -} - -func (suite *ItemUnitSuite) TestContainerSkip() { - t := suite.T() - addtl := map[string]any{"foo": "bar"} - i := ContainerSkip(SkipMalware, "ns", "id", "name", addtl) - - expect := Item{ - Namespace: "ns", - ID: "id", - Name: "name", - Type: ContainerType, - Cause: string(SkipMalware), - Additional: addtl, - } - - assert.Equal(t, Skipped{expect}, *i) -} - -func (suite *ItemUnitSuite) TestFileSkip() { - t := suite.T() - addtl := map[string]any{"foo": "bar"} - i := FileSkip(SkipMalware, "ns", "id", "name", addtl) - - expect := Item{ - Namespace: "ns", - ID: "id", - Name: "name", - Type: FileType, - Cause: string(SkipMalware), - Additional: addtl, - } - - assert.Equal(t, Skipped{expect}, *i) -} - -func (suite *ItemUnitSuite) TestOwnerSkip() { - t := suite.T() - addtl := map[string]any{"foo": "bar"} - i := OwnerSkip(SkipMalware, "ns", "id", "name", addtl) - - expect := Item{ - Namespace: "ns", - ID: "id", - Name: "name", - Type: ResourceOwnerType, - Cause: string(SkipMalware), - Additional: addtl, - } - - assert.Equal(t, Skipped{expect}, *i) -} - -func (suite *ItemUnitSuite) TestSkipped_HeadersValues() { - addtl := map[string]any{ - AddtlContainerID: "cid", - AddtlContainerName: "cname", - } - - table := []struct { - name string - skip *Skipped - expect []string - }{ - { - name: "file", - skip: FileSkip(SkipMalware, "ns", "id", "name", addtl), - expect: []string{"Skip", FileType.Printable(), "name", "cname", string(SkipMalware)}, - }, - { - name: "container", - skip: ContainerSkip(SkipMalware, "ns", "id", "name", addtl), - expect: []string{"Skip", ContainerType.Printable(), "name", "cname", string(SkipMalware)}, - }, - { - name: "owner", - skip: OwnerSkip(SkipMalware, "ns", "id", "name", nil), - expect: []string{"Skip", ResourceOwnerType.Printable(), "name", "", string(SkipMalware)}, - }, - } - for _, test := range table { - suite.Run(test.name, func() { - t := suite.T() - - assert.Equal(t, []string{"Action", "Type", "Name", "Container", "Cause"}, test.skip.Headers()) - assert.Equal(t, test.expect, test.skip.Values()) - }) - } -} diff --git a/src/pkg/fault/skipped.go b/src/pkg/fault/skipped.go new file mode 100644 index 000000000..b836fc129 --- /dev/null +++ b/src/pkg/fault/skipped.go @@ -0,0 +1,117 @@ +package fault + +import ( + "github.com/alcionai/corso/src/cli/print" +) + +// skipCause identifies the well-known conditions to Skip an item. It is +// important that skip cause enumerations do not overlap with general error +// handling. Skips must be well known, well documented, and consistent. +// Transient failures, undocumented or unknown conditions, and arbitrary +// handling should never produce a skipped item. Those cases should get +// handled as normal errors. +type skipCause string + +const ( + // SkipMalware identifies a malware detection case. Files that graph + // api identifies as malware cannot be downloaded or uploaded, and will + // permanently fail any attempts to backup or restore. + SkipMalware skipCause = "malware_detected" + + // SkipBigOneNote identifies that a file was skipped because it + // was big OneNote file and we can only download OneNote files which + // are less that 2GB in size. + //nolint:lll + // https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa#onenotenotebooks + SkipBigOneNote skipCause = "big_one_note_file" +) + +var _ print.Printable = &Skipped{} + +// Skipped items are permanently unprocessable due to well-known conditions. +// In order to skip an item, the following conditions should be met: +// 1. The conditions for skipping the item are well-known and +// well-documented. End users need to be able to understand +// both the conditions and identifications of skips. +// 2. Skipping avoids a permanent and consistent failure. If +// the underlying reason is transient or otherwise recoverable, +// the item should not be skipped. +// +// Skipped wraps Item primarily to minimize confusion when sharing the +// fault interface. Skipped items are not errors, and Item{} errors are +// not the basis for a Skip. +type Skipped struct { + Item Item `json:"item"` +} + +// String complies with the stringer interface. +func (s *Skipped) String() string { + if s == nil { + return "" + } + + return "skipped " + s.Item.Error() + ": " + s.Item.Cause +} + +// HasCause compares the underlying cause against the parameter. +func (s *Skipped) HasCause(c skipCause) bool { + if s == nil { + return false + } + + return s.Item.Cause == string(c) +} + +func (s Skipped) MinimumPrintable() any { + return s +} + +// Headers returns the human-readable names of properties of a skipped Item +// for printing out to a terminal. +func (s Skipped) Headers() []string { + return []string{"Action", "Type", "Name", "Container", "Cause"} +} + +// Values populates the printable values matching the Headers list. +func (s Skipped) Values() []string { + var cn string + + acn, ok := s.Item.Additional[AddtlContainerName] + if ok { + str, ok := acn.(string) + if ok { + cn = str + } + } + + return []string{"Skip", s.Item.Type.Printable(), s.Item.Name, cn, s.Item.Cause} +} + +// ContainerSkip produces a Container-kind Item for tracking skipped items. +func ContainerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { + return itemSkip(ContainerType, cause, namespace, id, name, addtl) +} + +// FileSkip produces a File-kind Item for tracking skipped items. +func FileSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { + return itemSkip(FileType, cause, namespace, id, name, addtl) +} + +// OnwerSkip produces a ResourceOwner-kind Item for tracking skipped items. +func OwnerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { + return itemSkip(ResourceOwnerType, cause, namespace, id, name, addtl) +} + +// itemSkip produces a Item of the provided type for tracking skipped items. +func itemSkip(t ItemType, cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped { + return &Skipped{ + Item: Item{ + Namespace: namespace, + ID: id, + Name: name, + Type: t, + Cause: string(cause), + Additional: addtl, + }, + } +} diff --git a/src/pkg/fault/skipped_test.go b/src/pkg/fault/skipped_test.go new file mode 100644 index 000000000..22d8cddf4 --- /dev/null +++ b/src/pkg/fault/skipped_test.go @@ -0,0 +1,146 @@ +package fault_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/fault" +) + +type SkippedUnitSuite struct { + tester.Suite +} + +func TestSkippedUnitSuite(t *testing.T) { + suite.Run(t, &SkippedUnitSuite{Suite: tester.NewUnitSuite(t)}) +} + +func (suite *SkippedUnitSuite) TestSkipped_String() { + var ( + t = suite.T() + i *fault.Skipped + ) + + assert.Contains(t, i.String(), "nil") + + i = &fault.Skipped{fault.Item{}} + assert.Contains(t, i.String(), "unknown type") + + i = &fault.Skipped{ + fault.Item{ + Type: fault.FileType, + }, + } + assert.Contains(t, i.Item.Error(), fault.FileType) +} + +func (suite *SkippedUnitSuite) TestContainerSkip() { + t := suite.T() + addtl := map[string]any{"foo": "bar"} + i := fault.ContainerSkip(fault.SkipMalware, "ns", "id", "name", addtl) + + expect := fault.Item{ + Namespace: "ns", + ID: "id", + Name: "name", + Type: fault.ContainerType, + Cause: string(fault.SkipMalware), + Additional: addtl, + } + + assert.Equal(t, fault.Skipped{expect}, *i) +} + +func (suite *SkippedUnitSuite) TestFileSkip() { + t := suite.T() + addtl := map[string]any{"foo": "bar"} + i := fault.FileSkip(fault.SkipMalware, "ns", "id", "name", addtl) + + expect := fault.Item{ + Namespace: "ns", + ID: "id", + Name: "name", + Type: fault.FileType, + Cause: string(fault.SkipMalware), + Additional: addtl, + } + + assert.Equal(t, fault.Skipped{expect}, *i) +} + +func (suite *SkippedUnitSuite) TestOwnerSkip() { + t := suite.T() + addtl := map[string]any{"foo": "bar"} + i := fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", addtl) + + expect := fault.Item{ + Namespace: "ns", + ID: "id", + Name: "name", + Type: fault.ResourceOwnerType, + Cause: string(fault.SkipMalware), + Additional: addtl, + } + + assert.Equal(t, fault.Skipped{expect}, *i) +} + +func (suite *SkippedUnitSuite) TestSkipped_HeadersValues() { + addtl := map[string]any{ + fault.AddtlContainerID: "cid", + fault.AddtlContainerName: "cname", + } + + table := []struct { + name string + skip *fault.Skipped + expect []string + }{ + { + name: "file", + skip: fault.FileSkip(fault.SkipMalware, "ns", "id", "name", addtl), + expect: []string{"Skip", fault.FileType.Printable(), "name", "cname", string(fault.SkipMalware)}, + }, + { + name: "container", + skip: fault.ContainerSkip(fault.SkipMalware, "ns", "id", "name", addtl), + expect: []string{"Skip", fault.ContainerType.Printable(), "name", "cname", string(fault.SkipMalware)}, + }, + { + name: "owner", + skip: fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil), + expect: []string{"Skip", fault.ResourceOwnerType.Printable(), "name", "", string(fault.SkipMalware)}, + }, + } + for _, test := range table { + suite.Run(test.name, func() { + t := suite.T() + + assert.Equal(t, []string{"Action", "Type", "Name", "Container", "Cause"}, test.skip.Headers()) + assert.Equal(t, test.expect, test.skip.Values()) + }) + } +} + +func (suite *SkippedUnitSuite) TestBus_AddSkip() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + n := fault.New(true) + require.NotNil(t, n) + + n.Fail(assert.AnError) + assert.Len(t, n.Skipped(), 0) + + n.AddRecoverable(ctx, assert.AnError) + assert.Len(t, n.Skipped(), 0) + + n.AddSkip(ctx, fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil)) + assert.Len(t, n.Skipped(), 1) +} diff --git a/src/pkg/selectors/exchange.go b/src/pkg/selectors/exchange.go index 68f45263c..987165199 100644 --- a/src/pkg/selectors/exchange.go +++ b/src/pkg/selectors/exchange.go @@ -697,7 +697,7 @@ func (s ExchangeScope) IncludesCategory(cat exchangeCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s ExchangeScope) IsAny(cat exchangeCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/groups.go b/src/pkg/selectors/groups.go index 584887bfb..e6399fbf1 100644 --- a/src/pkg/selectors/groups.go +++ b/src/pkg/selectors/groups.go @@ -699,7 +699,7 @@ func (s GroupsScope) IncludesCategory(cat groupsCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s GroupsScope) IsAny(cat groupsCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/onedrive.go b/src/pkg/selectors/onedrive.go index 5d1538a89..f97ceccaf 100644 --- a/src/pkg/selectors/onedrive.go +++ b/src/pkg/selectors/onedrive.go @@ -484,7 +484,7 @@ func (s OneDriveScope) Matches(cat oneDriveCategory, target string) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s OneDriveScope) IsAny(cat oneDriveCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/selectors/scopes.go b/src/pkg/selectors/scopes.go index aec624486..6e2eb86e9 100644 --- a/src/pkg/selectors/scopes.go +++ b/src/pkg/selectors/scopes.go @@ -694,7 +694,7 @@ func matchesPathValues[T scopeT, C categoryT]( return false } - if isAnyTarget(sc, cc) { + if IsAnyTarget(sc, cc) { // continue, not return: all path keys must match the entry to succeed continue } @@ -795,7 +795,7 @@ func isNoneTarget[T scopeT, C categoryT](s T, cat C) bool { // returns true if the category is included in the scope's category type, // and the value is set to Any(). -func isAnyTarget[T scopeT, C categoryT](s T, cat C) bool { +func IsAnyTarget[T scopeT, C categoryT](s T, cat C) bool { if !typeAndCategoryMatches(cat, s.categorizer()) { return false } diff --git a/src/pkg/selectors/scopes_test.go b/src/pkg/selectors/scopes_test.go index 6bf1e3ad9..0a44df160 100644 --- a/src/pkg/selectors/scopes_test.go +++ b/src/pkg/selectors/scopes_test.go @@ -125,14 +125,14 @@ func (suite *SelectorScopesSuite) TestGetCatValue() { func (suite *SelectorScopesSuite) TestIsAnyTarget() { t := suite.T() stub := stubScope("") - assert.True(t, isAnyTarget(stub, rootCatStub)) - assert.True(t, isAnyTarget(stub, leafCatStub)) - assert.False(t, isAnyTarget(stub, mockCategorizer("smarf"))) + assert.True(t, IsAnyTarget(stub, rootCatStub)) + assert.True(t, IsAnyTarget(stub, leafCatStub)) + assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf"))) stub = stubScope("none") - assert.False(t, isAnyTarget(stub, rootCatStub)) - assert.False(t, isAnyTarget(stub, leafCatStub)) - assert.False(t, isAnyTarget(stub, mockCategorizer("smarf"))) + assert.False(t, IsAnyTarget(stub, rootCatStub)) + assert.False(t, IsAnyTarget(stub, leafCatStub)) + assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf"))) } var reduceTestTable = []struct { diff --git a/src/pkg/selectors/sharepoint.go b/src/pkg/selectors/sharepoint.go index f35aa10b5..68f6655e5 100644 --- a/src/pkg/selectors/sharepoint.go +++ b/src/pkg/selectors/sharepoint.go @@ -625,7 +625,7 @@ func (s SharePointScope) IncludesCategory(cat sharePointCategory) bool { // returns true if the category is included in the scope's data type, // and the value is set to Any(). func (s SharePointScope) IsAny(cat sharePointCategory) bool { - return isAnyTarget(s, cat) + return IsAnyTarget(s, cat) } // Get returns the data category in the scope. If the scope diff --git a/src/pkg/services/m365/api/config.go b/src/pkg/services/m365/api/config.go index 0a0bb913d..8a5be9d23 100644 --- a/src/pkg/services/m365/api/config.go +++ b/src/pkg/services/m365/api/config.go @@ -101,7 +101,7 @@ func idAnd(ss ...string) []string { // exported // --------------------------------------------------------------------------- -func DriveItemSelectDefault() []string { +func DefaultDriveItemProps() []string { return idAnd( "content.downloadUrl", "createdBy", diff --git a/src/pkg/services/m365/api/delta.go b/src/pkg/services/m365/api/delta.go deleted file mode 100644 index dc24961f0..000000000 --- a/src/pkg/services/m365/api/delta.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// DeltaUpdate holds the results of a current delta token. It normally -// gets produced when aggregating the addition and removal of items in -// a delta-queryable folder. -type DeltaUpdate struct { - // the deltaLink itself - URL string - // true if the old delta was marked as invalid - Reset bool -} diff --git a/src/pkg/services/m365/api/drive.go b/src/pkg/services/m365/api/drive.go index 4c3b9b312..374fa545c 100644 --- a/src/pkg/services/m365/api/drive.go +++ b/src/pkg/services/m365/api/drive.go @@ -351,6 +351,10 @@ func (c Drives) PostItemLinkShareUpdate( return itm, nil } +// --------------------------------------------------------------------------- +// helper funcs +// --------------------------------------------------------------------------- + // DriveItemCollisionKeyy constructs a key from the item name. // collision keys are used to identify duplicate item conflicts for handling advanced restoration config. func DriveItemCollisionKey(item models.DriveItemable) string { @@ -360,3 +364,17 @@ func DriveItemCollisionKey(item models.DriveItemable) string { return ptr.Val(item.GetName()) } + +// NewDriveItem initializes a `models.DriveItemable` with either a folder or file entry. +func NewDriveItem(name string, folder bool) *models.DriveItem { + itemToCreate := models.NewDriveItem() + itemToCreate.SetName(&name) + + if folder { + itemToCreate.SetFolder(models.NewFolder()) + } else { + itemToCreate.SetFile(models.NewFile()) + } + + return itemToCreate +} diff --git a/src/pkg/services/m365/api/drive_pager.go b/src/pkg/services/m365/api/drive_pager.go index c592fa656..e5523d35f 100644 --- a/src/pkg/services/m365/api/drive_pager.go +++ b/src/pkg/services/m365/api/drive_pager.go @@ -15,6 +15,11 @@ import ( "github.com/alcionai/corso/src/pkg/logger" ) +type DriveItemIDType struct { + ItemID string + IsFolder bool +} + // --------------------------------------------------------------------------- // non-delta item pager // --------------------------------------------------------------------------- @@ -65,11 +70,6 @@ func (p *driveItemPageCtrl) ValidModTimes() bool { return true } -type DriveItemIDType struct { - ItemID string - IsFolder bool -} - func (c Drives) GetItemsInContainerByCollisionKey( ctx context.Context, driveID, containerID string, @@ -131,9 +131,9 @@ type DriveItemDeltaPageCtrl struct { options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration } -func (c Drives) NewDriveItemDeltaPager( - driveID, link string, - selectFields []string, +func (c Drives) newDriveItemDeltaPager( + driveID, prevDeltaLink string, + selectProps ...string, ) *DriveItemDeltaPageCtrl { preferHeaderItems := []string{ "deltashowremovedasdeleted", @@ -142,28 +142,32 @@ func (c Drives) NewDriveItemDeltaPager( "hierarchicalsharing", } - requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{ - Headers: newPreferHeaders(preferHeaderItems...), - QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{ - Select: selectFields, - }, + options := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{ + Headers: newPreferHeaders(preferHeaderItems...), + QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{}, + } + + if len(selectProps) > 0 { + options.QueryParameters.Select = selectProps + } + + builder := c.Stable. + Client(). + Drives(). + ByDriveId(driveID). + Items(). + ByDriveItemId(onedrive.RootID). + Delta() + + if len(prevDeltaLink) > 0 { + builder = drives.NewItemItemsItemDeltaRequestBuilder(prevDeltaLink, c.Stable.Adapter()) } res := &DriveItemDeltaPageCtrl{ gs: c.Stable, driveID: driveID, - options: requestConfig, - builder: c.Stable. - Client(). - Drives(). - ByDriveId(driveID). - Items(). - ByDriveItemId(onedrive.RootID). - Delta(), - } - - if len(link) > 0 { - res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, c.Stable.Adapter()) + options: options, + builder: builder, } return res @@ -193,6 +197,27 @@ func (p *DriveItemDeltaPageCtrl) ValidModTimes() bool { return true } +// EnumerateDriveItems will enumerate all items in the specified drive and hand them to the +// provided `collector` method +func (c Drives) EnumerateDriveItemsDelta( + ctx context.Context, + driveID string, + prevDeltaLink string, +) ( + []models.DriveItemable, + DeltaUpdate, + error, +) { + pager := c.newDriveItemDeltaPager(driveID, prevDeltaLink, DefaultDriveItemProps()...) + + items, du, err := deltaEnumerateItems[models.DriveItemable](ctx, pager, prevDeltaLink) + if err != nil { + return nil, du, clues.Stack(err) + } + + return items, du, nil +} + // --------------------------------------------------------------------------- // user's drives pager // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/drive_pager_test.go b/src/pkg/services/m365/api/drive_pager_test.go index f28277eee..b75c3d320 100644 --- a/src/pkg/services/m365/api/drive_pager_test.go +++ b/src/pkg/services/m365/api/drive_pager_test.go @@ -178,3 +178,18 @@ func (suite *DrivePagerIntgSuite) TestDrives_GetItemIDsInContainer() { }) } } + +func (suite *DrivePagerIntgSuite) TestEnumerateDriveItems() { + t := suite.T() + + ctx, flush := tester.NewContext(t) + defer flush() + + items, du, err := suite.its. + ac. + Drives(). + EnumerateDriveItemsDelta(ctx, suite.its.user.driveID, "") + require.NoError(t, err, clues.ToCore(err)) + require.NotEmpty(t, items, "no items found in user's drive") + assert.NotEmpty(t, du.URL, "should have a delta link") +} diff --git a/src/pkg/services/m365/api/drive_test.go b/src/pkg/services/m365/api/drive_test.go index 28173c27a..1f9ccadca 100644 --- a/src/pkg/services/m365/api/drive_test.go +++ b/src/pkg/services/m365/api/drive_test.go @@ -17,6 +17,7 @@ import ( "github.com/alcionai/corso/src/internal/tester/tconfig" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/control/testdata" + "github.com/alcionai/corso/src/pkg/services/m365/api" ) type DriveAPIIntgSuite struct { @@ -50,20 +51,6 @@ func (suite *DriveAPIIntgSuite) TestDrives_CreatePagerAndGetPage() { assert.NotNil(t, a) } -// newItem initializes a `models.DriveItemable` that can be used as input to `createItem` -func newItem(name string, folder bool) *models.DriveItem { - itemToCreate := models.NewDriveItem() - itemToCreate.SetName(&name) - - if folder { - itemToCreate.SetFolder(models.NewFolder()) - } else { - itemToCreate.SetFile(models.NewFile()) - } - - return itemToCreate -} - func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { t := suite.T() @@ -78,12 +65,12 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { ctx, suite.its.user.driveID, suite.its.user.driveRootFolderID, - newItem(rc.Location, true), + api.NewDriveItem(rc.Location, true), control.Replace) require.NoError(t, err, clues.ToCore(err)) // generate a folder to use for collision testing - folder := newItem("collision", true) + folder := api.NewDriveItem("collision", true) origFolder, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -93,7 +80,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() { require.NoError(t, err, clues.ToCore(err)) // generate an item to use for collision testing - file := newItem("collision.txt", false) + file := api.NewDriveItem("collision.txt", false) origFile, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -241,7 +228,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr ctx, suite.its.user.driveID, suite.its.user.driveRootFolderID, - newItem(rc.Location, true), + api.NewDriveItem(rc.Location, true), // skip instead of replace here to get // an ErrItemAlreadyExistsConflict, just in case. control.Skip) @@ -249,7 +236,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr // generate items within that folder for i := 0; i < 5; i++ { - file := newItem(fmt.Sprintf("collision_%d.txt", i), false) + file := api.NewDriveItem(fmt.Sprintf("collision_%d.txt", i), false) f, err := acd.PostItemInContainer( ctx, suite.its.user.driveID, @@ -265,7 +252,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr ctx, suite.its.user.driveID, ptr.Val(folder.GetParentReference().GetId()), - newItem(rc.Location, true), + api.NewDriveItem(rc.Location, true), control.Replace) require.NoError(t, err, clues.ToCore(err)) require.NotEmpty(t, ptr.Val(resultFolder.GetId())) diff --git a/src/pkg/services/m365/api/item_pager.go b/src/pkg/services/m365/api/item_pager.go index 5effcb7a6..f991f2345 100644 --- a/src/pkg/services/m365/api/item_pager.go +++ b/src/pkg/services/m365/api/item_pager.go @@ -13,6 +13,20 @@ import ( "github.com/alcionai/corso/src/pkg/logger" ) +// --------------------------------------------------------------------------- +// common structs +// --------------------------------------------------------------------------- + +// DeltaUpdate holds the results of a current delta token. It normally +// gets produced when aggregating the addition and removal of items in +// a delta-queryable folder. +type DeltaUpdate struct { + // the deltaLink itself + URL string + // true if the old delta was marked as invalid + Reset bool +} + // --------------------------------------------------------------------------- // common interfaces // --------------------------------------------------------------------------- diff --git a/src/pkg/services/m365/api/mock/pager.go b/src/pkg/services/m365/api/mock/pager.go index b1818ac17..bccf5b428 100644 --- a/src/pkg/services/m365/api/mock/pager.go +++ b/src/pkg/services/m365/api/mock/pager.go @@ -32,10 +32,11 @@ func (dnl *DeltaNextLinkValues[T]) GetOdataDeltaLink() *string { } type PagerResult[T any] struct { - Values []T - NextLink *string - DeltaLink *string - Err error + Values []T + NextLink *string + DeltaLink *string + ResetDelta bool + Err error } // --------------------------------------------------------------------------- diff --git a/website/docs/support/known-issues.md b/website/docs/support/known-issues.md index 5655a82f3..ae56f8db4 100644 --- a/website/docs/support/known-issues.md +++ b/website/docs/support/known-issues.md @@ -33,3 +33,5 @@ Below is a list of known Corso issues and limitations: * Teams messages don't support Restore due to limited Graph API support for message creation. * Groups and Teams support is available in an early-access status, and may be subject to breaking changes. + +* Restoring the data into a different Group from the one it was backed up from isn't currently supported diff --git a/website/package-lock.json b/website/package-lock.json index 6718a2196..decb98489 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -20,7 +20,7 @@ "feather-icons": "^4.29.1", "jarallax": "^2.1.4", "mdx-mermaid": "^1.3.2", - "mermaid": "^10.4.0", + "mermaid": "^10.5.0", "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", @@ -9363,9 +9363,9 @@ } }, "node_modules/mermaid": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.4.0.tgz", - "integrity": "sha512-4QCQLp79lvz7UZxow5HUX7uWTPJOaQBVExduo91tliXC7v78i6kssZOPHxLL+Xs30KU72cpPn3g3imw/xm/gaw==", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.5.0.tgz", + "integrity": "sha512-9l0o1uUod78D3/FVYPGSsgV+Z0tSnzLBDiC9rVzvelPxuO80HbN1oDr9ofpPETQy9XpypPQa26fr09VzEPfvWA==", "dependencies": { "@braintree/sanitize-url": "^6.0.1", "@types/d3-scale": "^4.0.3", @@ -21895,9 +21895,9 @@ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==" }, "mermaid": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.4.0.tgz", - "integrity": "sha512-4QCQLp79lvz7UZxow5HUX7uWTPJOaQBVExduo91tliXC7v78i6kssZOPHxLL+Xs30KU72cpPn3g3imw/xm/gaw==", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.5.0.tgz", + "integrity": "sha512-9l0o1uUod78D3/FVYPGSsgV+Z0tSnzLBDiC9rVzvelPxuO80HbN1oDr9ofpPETQy9XpypPQa26fr09VzEPfvWA==", "requires": { "@braintree/sanitize-url": "^6.0.1", "@types/d3-scale": "^4.0.3", diff --git a/website/package.json b/website/package.json index ab05f0a2c..f53dbaa83 100644 --- a/website/package.json +++ b/website/package.json @@ -26,7 +26,7 @@ "feather-icons": "^4.29.1", "jarallax": "^2.1.4", "mdx-mermaid": "^1.3.2", - "mermaid": "^10.4.0", + "mermaid": "^10.5.0", "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2",