Merge branch 'main' into addErrorEvent
This commit is contained in:
commit
ec9c72be10
24
CHANGELOG.md
24
CHANGELOG.md
@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased] (beta)
|
## [Unreleased] (beta)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Skips graph calls for expired item download URLs.
|
||||||
|
|
||||||
|
## [v0.14.0] (beta) - 2023-10-09
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Enables local or network-attached storage for Corso repositories.
|
- Enables local or network-attached storage for Corso repositories.
|
||||||
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
|
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
|
||||||
@ -14,17 +19,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
|
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
|
||||||
- Backup now includes all sites that belongs to a team, not just the root site.
|
- Backup now includes all sites that belongs to a team, not just the root site.
|
||||||
|
|
||||||
## Fixed
|
### Fixed
|
||||||
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.
|
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.
|
||||||
|
|
||||||
|
### Known issues
|
||||||
|
- Restoring the data into a different Group from the one it was backed up from is not currently supported
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- Groups and Teams service support is still in feature preview
|
||||||
|
|
||||||
## [v0.13.0] (beta) - 2023-09-18
|
## [v0.13.0] (beta) - 2023-09-18
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Groups and Teams service support available as a feature preview! Channel messages and Files are now available for backup and restore in the CLI: `corso backup create groups --group '*'`
|
- Groups and Teams service support available as a feature preview! Channel messages and Files are now available for backup and restore in the CLI: `corso backup create groups --group '*'`
|
||||||
* The cli commands for "groups" and "teams" can be used interchangably, and will operate on the same backup data.
|
- The cli commands for "groups" and "teams" can be used interchangeably, and will operate on the same backup data.
|
||||||
* New permissions are required to backup Channel messages. See the [Corso Documentation](https://corsobackup.io/docs/setup/m365-access/#configure-required-permissions) for complete details.
|
- New permissions are required to backup Channel messages. See the [Corso Documentation](https://corsobackup.io/docs/setup/m365-access/#configure-required-permissions) for complete details.
|
||||||
Even though Channel message restoration is not available, message write permissions are included to cover future integration.
|
Even though Channel message restoration is not available, message write permissions are included to cover future integration.
|
||||||
* This is a feature preview, and may be subject to breaking changes based on feedback and testing.
|
- This is a feature preview, and may be subject to breaking changes based on feedback and testing.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Switched to Go 1.21
|
- Switched to Go 1.21
|
||||||
@ -379,7 +390,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Miscellaneous
|
- Miscellaneous
|
||||||
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
||||||
|
|
||||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.11.1...HEAD
|
[Unreleased]: https://github.com/alcionai/corso/compare/v0.14.0...HEAD
|
||||||
|
[v0.14.0]: https://github.com/alcionai/corso/compare/v0.13.0...v0.14.0
|
||||||
|
[v0.13.0]: https://github.com/alcionai/corso/compare/v0.12.0...v0.13.0
|
||||||
|
[v0.12.0]: https://github.com/alcionai/corso/compare/v0.11.1...v0.12.0
|
||||||
[v0.11.1]: https://github.com/alcionai/corso/compare/v0.11.0...v0.11.1
|
[v0.11.1]: https://github.com/alcionai/corso/compare/v0.11.0...v0.11.1
|
||||||
[v0.11.0]: https://github.com/alcionai/corso/compare/v0.10.0...v0.11.0
|
[v0.11.0]: https://github.com/alcionai/corso/compare/v0.10.0...v0.11.0
|
||||||
[v0.10.0]: https://github.com/alcionai/corso/compare/v0.9.0...v0.10.0
|
[v0.10.0]: https://github.com/alcionai/corso/compare/v0.9.0...v0.10.0
|
||||||
|
|||||||
@ -317,6 +317,7 @@ func genericListCommand(
|
|||||||
b.Print(ctx)
|
b.Print(ctx)
|
||||||
fe.PrintItems(
|
fe.PrintItems(
|
||||||
ctx,
|
ctx,
|
||||||
|
!ifShow(flags.ListAlertsFV),
|
||||||
!ifShow(flags.ListFailedItemsFV),
|
!ifShow(flags.ListFailedItemsFV),
|
||||||
!ifShow(flags.ListSkippedItemsFV),
|
!ifShow(flags.ListSkippedItemsFV),
|
||||||
!ifShow(flags.ListRecoveredErrorsFV))
|
!ifShow(flags.ListRecoveredErrorsFV))
|
||||||
|
|||||||
@ -17,7 +17,6 @@ import (
|
|||||||
"github.com/alcionai/corso/src/cli/print"
|
"github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/repo"
|
"github.com/alcionai/corso/src/cli/repo"
|
||||||
"github.com/alcionai/corso/src/cli/restore"
|
"github.com/alcionai/corso/src/cli/restore"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -61,43 +60,6 @@ func preRun(cc *cobra.Command, args []string) error {
|
|||||||
print.Infof(ctx, "Logging to file: %s", logger.ResolvedLogFile)
|
print.Infof(ctx, "Logging to file: %s", logger.ResolvedLogFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
avoidTheseDescription := []string{
|
|
||||||
"Initialize a repository.",
|
|
||||||
"Initialize a S3 repository",
|
|
||||||
"Connect to a S3 repository",
|
|
||||||
"Initialize a repository on local or network storage.",
|
|
||||||
"Connect to a repository on local or network storage.",
|
|
||||||
"Help about any command",
|
|
||||||
"Free, Secure, Open-Source Backup for M365.",
|
|
||||||
"env var guide",
|
|
||||||
}
|
|
||||||
|
|
||||||
if !slices.Contains(avoidTheseDescription, cc.Short) {
|
|
||||||
provider, overrides, err := utils.GetStorageProviderAndOverrides(ctx, cc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg, err := config.GetConfigRepoDetails(
|
|
||||||
ctx,
|
|
||||||
provider,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
overrides)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error while getting config info to run command: ", cc.Use)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.SendStartCorsoEvent(
|
|
||||||
ctx,
|
|
||||||
cfg.Storage,
|
|
||||||
cfg.Account.ID(),
|
|
||||||
map[string]any{"command": cc.CommandPath()},
|
|
||||||
cfg.RepoID,
|
|
||||||
utils.Control())
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle deprecated user flag in Backup exchange command
|
// handle deprecated user flag in Backup exchange command
|
||||||
if cc.CommandPath() == "corso backup create exchange" {
|
if cc.CommandPath() == "corso backup create exchange" {
|
||||||
handleMailBoxFlag(ctx, cc, flagSl)
|
handleMailBoxFlag(ctx, cc, flagSl)
|
||||||
|
|||||||
@ -8,6 +8,7 @@ func AddAllBackupListFlags(cmd *cobra.Command) {
|
|||||||
AddFailedItemsFN(cmd)
|
AddFailedItemsFN(cmd)
|
||||||
AddSkippedItemsFN(cmd)
|
AddSkippedItemsFN(cmd)
|
||||||
AddRecoveredErrorsFN(cmd)
|
AddRecoveredErrorsFN(cmd)
|
||||||
|
AddAlertsFN(cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func AddFailedItemsFN(cmd *cobra.Command) {
|
func AddFailedItemsFN(cmd *cobra.Command) {
|
||||||
@ -27,3 +28,9 @@ func AddRecoveredErrorsFN(cmd *cobra.Command) {
|
|||||||
&ListRecoveredErrorsFV, RecoveredErrorsFN, Show,
|
&ListRecoveredErrorsFV, RecoveredErrorsFN, Show,
|
||||||
"Toggles showing or hiding the list of errors which Corso recovered from.")
|
"Toggles showing or hiding the list of errors which Corso recovered from.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func AddAlertsFN(cmd *cobra.Command) {
|
||||||
|
cmd.Flags().StringVar(
|
||||||
|
&ListAlertsFV, AlertsFN, Show,
|
||||||
|
"Toggles showing or hiding the list of alerts produced during the operation.")
|
||||||
|
}
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
AlertsFN = "alerts"
|
||||||
DeltaPageSizeFN = "delta-page-size"
|
DeltaPageSizeFN = "delta-page-size"
|
||||||
DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
|
DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
|
||||||
DisableDeltaFN = "disable-delta"
|
DisableDeltaFN = "disable-delta"
|
||||||
@ -31,6 +32,7 @@ var (
|
|||||||
EnableImmutableIDFV bool
|
EnableImmutableIDFV bool
|
||||||
FailFastFV bool
|
FailFastFV bool
|
||||||
FetchParallelismFV int
|
FetchParallelismFV int
|
||||||
|
ListAlertsFV string
|
||||||
ListFailedItemsFV string
|
ListFailedItemsFV string
|
||||||
ListSkippedItemsFV string
|
ListSkippedItemsFV string
|
||||||
ListRecoveredErrorsFV string
|
ListRecoveredErrorsFV string
|
||||||
|
|||||||
@ -19,7 +19,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// AddRestoreConfigFlags adds the restore config flag set.
|
// AddRestoreConfigFlags adds the restore config flag set.
|
||||||
func AddRestoreConfigFlags(cmd *cobra.Command) {
|
func AddRestoreConfigFlags(cmd *cobra.Command, canRestoreToAlternate bool) {
|
||||||
fs := cmd.Flags()
|
fs := cmd.Flags()
|
||||||
fs.StringVar(
|
fs.StringVar(
|
||||||
&CollisionsFV, CollisionsFN, string(control.Skip),
|
&CollisionsFV, CollisionsFN, string(control.Skip),
|
||||||
@ -28,7 +28,10 @@ func AddRestoreConfigFlags(cmd *cobra.Command) {
|
|||||||
fs.StringVar(
|
fs.StringVar(
|
||||||
&DestinationFV, DestinationFN, "",
|
&DestinationFV, DestinationFN, "",
|
||||||
"Overrides the folder where items get restored; '/' places items into their original location")
|
"Overrides the folder where items get restored; '/' places items into their original location")
|
||||||
fs.StringVar(
|
|
||||||
&ToResourceFV, ToResourceFN, "",
|
if canRestoreToAlternate {
|
||||||
"Overrides the protected resource (mailbox, site, user, etc) where data gets restored")
|
fs.StringVar(
|
||||||
|
&ToResourceFV, ToResourceFN, "",
|
||||||
|
"Overrides the protected resource (mailbox, site, user, etc) where data gets restored")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
2
src/cli/flags/testdata/backup_list.go
vendored
2
src/cli/flags/testdata/backup_list.go
vendored
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
func PreparedBackupListFlags() []string {
|
func PreparedBackupListFlags() []string {
|
||||||
return []string{
|
return []string{
|
||||||
|
"--" + flags.AlertsFN, flags.Show,
|
||||||
"--" + flags.FailedItemsFN, flags.Show,
|
"--" + flags.FailedItemsFN, flags.Show,
|
||||||
"--" + flags.SkippedItemsFN, flags.Show,
|
"--" + flags.SkippedItemsFN, flags.Show,
|
||||||
"--" + flags.RecoveredErrorsFN, flags.Show,
|
"--" + flags.RecoveredErrorsFN, flags.Show,
|
||||||
@ -18,6 +19,7 @@ func PreparedBackupListFlags() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func AssertBackupListFlags(t *testing.T, cmd *cobra.Command) {
|
func AssertBackupListFlags(t *testing.T, cmd *cobra.Command) {
|
||||||
|
assert.Equal(t, flags.Show, flags.ListAlertsFV)
|
||||||
assert.Equal(t, flags.Show, flags.ListFailedItemsFV)
|
assert.Equal(t, flags.Show, flags.ListFailedItemsFV)
|
||||||
assert.Equal(t, flags.Show, flags.ListSkippedItemsFV)
|
assert.Equal(t, flags.Show, flags.ListSkippedItemsFV)
|
||||||
assert.Equal(t, flags.Show, flags.ListRecoveredErrorsFV)
|
assert.Equal(t, flags.Show, flags.ListRecoveredErrorsFV)
|
||||||
|
|||||||
@ -87,15 +87,6 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
|
|||||||
// Retention is not supported for filesystem repos.
|
// Retention is not supported for filesystem repos.
|
||||||
retentionOpts := ctrlRepo.Retention{}
|
retentionOpts := ctrlRepo.Retention{}
|
||||||
|
|
||||||
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
|
|
||||||
utils.SendStartCorsoEvent(
|
|
||||||
ctx,
|
|
||||||
cfg.Storage,
|
|
||||||
cfg.Account.ID(),
|
|
||||||
map[string]any{"command": "init repo"},
|
|
||||||
cfg.Account.ID(),
|
|
||||||
opt)
|
|
||||||
|
|
||||||
storageCfg, err := cfg.Storage.ToFilesystemConfig()
|
storageCfg, err := cfg.Storage.ToFilesystemConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
|
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))
|
||||||
|
|||||||
@ -102,15 +102,6 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
|
|||||||
return Only(ctx, err)
|
return Only(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
|
|
||||||
utils.SendStartCorsoEvent(
|
|
||||||
ctx,
|
|
||||||
cfg.Storage,
|
|
||||||
cfg.Account.ID(),
|
|
||||||
map[string]any{"command": "init repo"},
|
|
||||||
cfg.Account.ID(),
|
|
||||||
opt)
|
|
||||||
|
|
||||||
s3Cfg, err := cfg.Storage.ToS3Config()
|
s3Cfg, err := cfg.Storage.ToS3Config()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
|
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))
|
||||||
|
|||||||
@ -28,7 +28,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
|
|
||||||
flags.AddBackupIDFlag(c, true)
|
flags.AddBackupIDFlag(c, true)
|
||||||
flags.AddExchangeDetailsAndRestoreFlags(c)
|
flags.AddExchangeDetailsAndRestoreFlags(c)
|
||||||
flags.AddRestoreConfigFlags(c)
|
flags.AddRestoreConfigFlags(c, true)
|
||||||
flags.AddFailFastFlag(c)
|
flags.AddFailFastFlag(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -30,7 +30,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
flags.AddNoPermissionsFlag(c)
|
flags.AddNoPermissionsFlag(c)
|
||||||
flags.AddSharePointDetailsAndRestoreFlags(c) // for sp restores
|
flags.AddSharePointDetailsAndRestoreFlags(c) // for sp restores
|
||||||
flags.AddSiteIDFlag(c)
|
flags.AddSiteIDFlag(c)
|
||||||
flags.AddRestoreConfigFlags(c)
|
flags.AddRestoreConfigFlags(c, false)
|
||||||
flags.AddFailFastFlag(c)
|
flags.AddFailFastFlag(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -65,7 +65,7 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
|
|||||||
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
|
||||||
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
"--" + flags.CollisionsFN, flagsTD.Collisions,
|
||||||
"--" + flags.DestinationFN, flagsTD.Destination,
|
"--" + flags.DestinationFN, flagsTD.Destination,
|
||||||
"--" + flags.ToResourceFN, flagsTD.ToResource,
|
// "--" + flags.ToResourceFN, flagsTD.ToResource,
|
||||||
"--" + flags.NoPermissionsFN,
|
"--" + flags.NoPermissionsFN,
|
||||||
},
|
},
|
||||||
flagsTD.PreparedProviderFlags(),
|
flagsTD.PreparedProviderFlags(),
|
||||||
@ -91,7 +91,7 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
|
|||||||
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||||
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
|
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
|
||||||
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
|
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
|
||||||
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
|
// assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||||
assert.True(t, flags.NoPermissionsFV)
|
assert.True(t, flags.NoPermissionsFV)
|
||||||
flagsTD.AssertProviderFlags(t, cmd)
|
flagsTD.AssertProviderFlags(t, cmd)
|
||||||
flagsTD.AssertStorageFlags(t, cmd)
|
flagsTD.AssertStorageFlags(t, cmd)
|
||||||
|
|||||||
@ -29,7 +29,7 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
flags.AddBackupIDFlag(c, true)
|
flags.AddBackupIDFlag(c, true)
|
||||||
flags.AddOneDriveDetailsAndRestoreFlags(c)
|
flags.AddOneDriveDetailsAndRestoreFlags(c)
|
||||||
flags.AddNoPermissionsFlag(c)
|
flags.AddNoPermissionsFlag(c)
|
||||||
flags.AddRestoreConfigFlags(c)
|
flags.AddRestoreConfigFlags(c, true)
|
||||||
flags.AddFailFastFlag(c)
|
flags.AddFailFastFlag(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -29,7 +29,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
flags.AddBackupIDFlag(c, true)
|
flags.AddBackupIDFlag(c, true)
|
||||||
flags.AddSharePointDetailsAndRestoreFlags(c)
|
flags.AddSharePointDetailsAndRestoreFlags(c)
|
||||||
flags.AddNoPermissionsFlag(c)
|
flags.AddNoPermissionsFlag(c)
|
||||||
flags.AddRestoreConfigFlags(c)
|
flags.AddRestoreConfigFlags(c, true)
|
||||||
flags.AddFailFastFlag(c)
|
flags.AddFailFastFlag(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -239,24 +239,6 @@ func splitFoldersIntoContainsAndPrefix(folders []string) ([]string, []string) {
|
|||||||
return containsFolders, prefixFolders
|
return containsFolders, prefixFolders
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendStartCorsoEvent utility sends corso start event at start of each action
|
|
||||||
func SendStartCorsoEvent(
|
|
||||||
ctx context.Context,
|
|
||||||
s storage.Storage,
|
|
||||||
tenID string,
|
|
||||||
data map[string]any,
|
|
||||||
repoID string,
|
|
||||||
opts control.Options,
|
|
||||||
) {
|
|
||||||
bus, err := events.NewBus(ctx, s, tenID, opts)
|
|
||||||
if err != nil {
|
|
||||||
logger.CtxErr(ctx, err).Info("sending start event")
|
|
||||||
}
|
|
||||||
|
|
||||||
bus.SetRepoID(repoID)
|
|
||||||
bus.Event(ctx, events.CorsoStart, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStorageProviderAndOverrides returns the storage provider type and
|
// GetStorageProviderAndOverrides returns the storage provider type and
|
||||||
// any flags specified on the command line which are storage provider specific.
|
// any flags specified on the command line which are storage provider specific.
|
||||||
func GetStorageProviderAndOverrides(
|
func GetStorageProviderAndOverrides(
|
||||||
|
|||||||
@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required"
|
Write-Host "User (for OneDrive) or Site (for Sharepoint) is required"
|
||||||
Exit
|
Exit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
48
src/go.mod
48
src/go.mod
@ -10,15 +10,16 @@ require (
|
|||||||
github.com/armon/go-metrics v0.4.1
|
github.com/armon/go-metrics v0.4.1
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.2
|
github.com/aws/aws-xray-sdk-go v1.8.2
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1
|
github.com/cenkalti/backoff/v4 v4.2.1
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.0.0
|
||||||
github.com/google/uuid v1.3.1
|
github.com/google/uuid v1.3.1
|
||||||
github.com/h2non/gock v1.2.0
|
github.com/h2non/gock v1.2.0
|
||||||
github.com/kopia/kopia v0.13.0
|
github.com/kopia/kopia v0.13.0
|
||||||
github.com/microsoft/kiota-abstractions-go v1.2.1
|
github.com/microsoft/kiota-abstractions-go v1.2.3
|
||||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0
|
github.com/microsoft/kiota-authentication-azure-go v1.0.0
|
||||||
github.com/microsoft/kiota-http-go v1.1.0
|
github.com/microsoft/kiota-http-go v1.1.0
|
||||||
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
||||||
github.com/microsoft/kiota-serialization-json-go v1.0.4
|
github.com/microsoft/kiota-serialization-json-go v1.0.4
|
||||||
github.com/microsoftgraph/msgraph-sdk-go v1.19.0
|
github.com/microsoftgraph/msgraph-sdk-go v1.20.0
|
||||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
|
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/puzpuzpuz/xsync/v2 v2.5.1
|
github.com/puzpuzpuz/xsync/v2 v2.5.1
|
||||||
@ -27,7 +28,7 @@ require (
|
|||||||
github.com/spf13/cast v1.5.1
|
github.com/spf13/cast v1.5.1
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.7.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.16.0
|
github.com/spf13/viper v1.17.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/tidwall/pretty v1.2.1
|
github.com/tidwall/pretty v1.2.1
|
||||||
github.com/tomlazar/table v0.1.2
|
github.com/tomlazar/table v0.1.2
|
||||||
@ -35,7 +36,7 @@ require (
|
|||||||
go.uber.org/zap v1.26.0
|
go.uber.org/zap v1.26.0
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
|
||||||
golang.org/x/time v0.3.0
|
golang.org/x/time v0.3.0
|
||||||
golang.org/x/tools v0.13.0
|
golang.org/x/tools v0.14.0
|
||||||
gotest.tools/v3 v3.5.1
|
gotest.tools/v3 v3.5.1
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -46,7 +47,6 @@ require (
|
|||||||
github.com/aws/aws-sdk-go v1.45.0 // indirect
|
github.com/aws/aws-sdk-go v1.45.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/gofrs/flock v0.8.1 // indirect
|
github.com/gofrs/flock v0.8.1 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
|
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
|
||||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||||
@ -55,14 +55,17 @@ require (
|
|||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/microsoft/kiota-serialization-multipart-go v1.0.0 // indirect
|
github.com/microsoft/kiota-serialization-multipart-go v1.0.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
|
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||||
github.com/spf13/afero v1.9.5 // indirect
|
github.com/sagikazarmark/locafero v0.3.0 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
github.com/subosito/gotenv v1.4.2 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
|
github.com/spf13/afero v1.10.0 // indirect
|
||||||
|
github.com/std-uritemplate/std-uritemplate/go v0.0.42 // indirect
|
||||||
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/valyala/fasthttp v1.48.0 // indirect
|
github.com/valyala/fasthttp v1.48.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.18.0 // indirect
|
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -74,7 +77,7 @@ require (
|
|||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||||
github.com/cjlapao/common-go v0.0.39 // indirect
|
github.com/cjlapao/common-go v0.0.39 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1
|
github.com/dustin/go-humanize v1.0.1
|
||||||
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
||||||
github.com/go-logr/logr v1.2.4 // indirect
|
github.com/go-logr/logr v1.2.4 // indirect
|
||||||
@ -84,7 +87,7 @@ require (
|
|||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.16.7 // indirect
|
github.com/klauspost/compress v1.17.0 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||||
github.com/klauspost/reedsolomon v1.11.8 // indirect
|
github.com/klauspost/reedsolomon v1.11.8 // indirect
|
||||||
@ -103,7 +106,7 @@ require (
|
|||||||
github.com/natefinch/atomic v1.0.1 // indirect
|
github.com/natefinch/atomic v1.0.1 // indirect
|
||||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||||
github.com/prometheus/client_model v0.4.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
github.com/prometheus/common v0.44.0 // indirect
|
github.com/prometheus/common v0.44.0 // indirect
|
||||||
@ -115,18 +118,17 @@ require (
|
|||||||
github.com/tidwall/gjson v1.15.0 // indirect
|
github.com/tidwall/gjson v1.15.0 // indirect
|
||||||
github.com/tidwall/match v1.1.1 // indirect
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
|
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
|
||||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
|
||||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||||
go.opentelemetry.io/otel v1.18.0 // indirect
|
go.opentelemetry.io/otel v1.19.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.18.0 // indirect
|
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.13.0 // indirect
|
golang.org/x/crypto v0.14.0 // indirect
|
||||||
golang.org/x/mod v0.12.0 // indirect
|
golang.org/x/mod v0.13.0 // indirect
|
||||||
golang.org/x/net v0.15.0
|
golang.org/x/net v0.16.0
|
||||||
golang.org/x/sync v0.3.0 // indirect
|
golang.org/x/sync v0.4.0 // indirect
|
||||||
golang.org/x/sys v0.12.0 // indirect
|
golang.org/x/sys v0.13.0 // indirect
|
||||||
golang.org/x/text v0.13.0 // indirect
|
golang.org/x/text v0.13.0 // indirect
|
||||||
google.golang.org/grpc v1.57.0 // indirect
|
google.golang.org/grpc v1.58.2 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
|||||||
90
src/go.sum
90
src/go.sum
@ -102,8 +102,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
|||||||
github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE=
|
github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE=
|
||||||
github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec=
|
github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
@ -246,8 +247,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
|||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
|
||||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||||
@ -287,8 +288,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk
|
|||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||||
github.com/microsoft/kiota-abstractions-go v1.2.1 h1:TnLF7rjy1GfhuGK2ra/a3Vuz6piFXTR1OfdNoqesagA=
|
github.com/microsoft/kiota-abstractions-go v1.2.3 h1:ir+p5o/0ytcLunikHSylhYyCm2Ojvoq3pXWSYomOACc=
|
||||||
github.com/microsoft/kiota-abstractions-go v1.2.1/go.mod h1:rEeeaytcnal/If3f1tz6/spFz4V+Hiqvz3rxF+oWQFA=
|
github.com/microsoft/kiota-abstractions-go v1.2.3/go.mod h1:yPSuzNSOIVQSFFe1iT+3Lu5zmis22E8Wg+bkyjhd+pY=
|
||||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk=
|
github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk=
|
||||||
github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw=
|
github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw=
|
||||||
github.com/microsoft/kiota-http-go v1.1.0 h1:L5I93EiNtlP/X6YzeTlhjWt7Q1DxzC9CmWSVtX3b0tE=
|
github.com/microsoft/kiota-http-go v1.1.0 h1:L5I93EiNtlP/X6YzeTlhjWt7Q1DxzC9CmWSVtX3b0tE=
|
||||||
@ -301,8 +302,8 @@ github.com/microsoft/kiota-serialization-multipart-go v1.0.0 h1:3O5sb5Zj+moLBiJy
|
|||||||
github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so=
|
github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so=
|
||||||
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
|
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
|
||||||
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
|
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
|
||||||
github.com/microsoftgraph/msgraph-sdk-go v1.19.0 h1:hx+SvDTm5ENYZFqmMIskF7tOn48zzT2Xv3OVFrxl2dc=
|
github.com/microsoftgraph/msgraph-sdk-go v1.20.0 h1:Hi8URs+Ll07+GojbY9lyuYUMj8rxI4mcYW+GISO7BTA=
|
||||||
github.com/microsoftgraph/msgraph-sdk-go v1.19.0/go.mod h1:3DArbqPS7riix0VsJhdtYsgPaAFAH9Jer64psW55riI=
|
github.com/microsoftgraph/msgraph-sdk-go v1.20.0/go.mod h1:UTUjxLPExc1K+YLmFeyEyep6vYd1GOj2bLMSd7/lPWE=
|
||||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY=
|
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY=
|
||||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE=
|
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE=
|
||||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||||
@ -327,8 +328,8 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
|
|||||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
|
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||||
@ -338,8 +339,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||||
@ -373,26 +375,32 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
|||||||
github.com/rudderlabs/analytics-go v3.3.3+incompatible h1:OG0XlKoXfr539e2t1dXtTB+Gr89uFW+OUNQBVhHIIBY=
|
github.com/rudderlabs/analytics-go v3.3.3+incompatible h1:OG0XlKoXfr539e2t1dXtTB+Gr89uFW+OUNQBVhHIIBY=
|
||||||
github.com/rudderlabs/analytics-go v3.3.3+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30=
|
github.com/rudderlabs/analytics-go v3.3.3+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ=
|
||||||
|
github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U=
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||||
github.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N++y4=
|
github.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N++y4=
|
||||||
github.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc=
|
github.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
|
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||||
|
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||||
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 h1:lQ3JvmcVO1/AMFbabvUSJ4YtJRpEAX9Qza73p5j03sw=
|
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 h1:lQ3JvmcVO1/AMFbabvUSJ4YtJRpEAX9Qza73p5j03sw=
|
||||||
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1/go.mod h1:4aKqcbhASNqjbrG0h9BmkzcWvPJGxbef4B+j0XfFrZo=
|
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1/go.mod h1:4aKqcbhASNqjbrG0h9BmkzcWvPJGxbef4B+j0XfFrZo=
|
||||||
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
|
github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
|
||||||
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
|
github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
|
||||||
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
|
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
|
||||||
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
|
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
|
||||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
|
github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI=
|
||||||
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
|
github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI=
|
||||||
|
github.com/std-uritemplate/std-uritemplate/go v0.0.42 h1:rG+XlE4drkVWs2NLfGS15N+vg+CUcjXElQKvJ0fctlI=
|
||||||
|
github.com/std-uritemplate/std-uritemplate/go v0.0.42/go.mod h1:Qov4Ay4U83j37XjgxMYevGJFLbnZ2o9cEOhGufBKgKY=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
@ -406,8 +414,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||||
github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU=
|
github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU=
|
||||||
github.com/tg123/go-htpasswd v1.2.1/go.mod h1:erHp1B86KXdwQf1X5ZrLb7erXZnWueEQezb2dql4q58=
|
github.com/tg123/go-htpasswd v1.2.1/go.mod h1:erHp1B86KXdwQf1X5ZrLb7erXZnWueEQezb2dql4q58=
|
||||||
github.com/tidwall/gjson v1.15.0 h1:5n/pM+v3r5ujuNl4YLZLsQ+UE5jlkLVm7jMzT5Mpolw=
|
github.com/tidwall/gjson v1.15.0 h1:5n/pM+v3r5ujuNl4YLZLsQ+UE5jlkLVm7jMzT5Mpolw=
|
||||||
@ -428,8 +436,6 @@ github.com/vbauerster/mpb/v8 v8.1.6 h1:EswHDkAsy4OQ7QBAmU1MUPz4vHzl6KlINjlh7vJox
|
|||||||
github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8=
|
github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8=
|
||||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
||||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
||||||
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
|
|
||||||
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
|
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
@ -449,12 +455,12 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||||
go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs=
|
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||||
go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI=
|
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
||||||
go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ=
|
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
||||||
go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k=
|
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
||||||
go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10=
|
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||||
go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0=
|
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||||
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
|
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
@ -470,8 +476,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@ -508,8 +514,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
||||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -546,8 +552,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||||
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos=
|
||||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -568,8 +574,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -619,8 +625,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@ -688,8 +694,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
|||||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
|
||||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -756,8 +762,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
@ -774,8 +780,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
|||||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
|
||||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
|||||||
39
src/internal/common/jwt/jwt.go
Normal file
39
src/internal/common/jwt/jwt.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
jwt "github.com/golang-jwt/jwt/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsJWTExpired checks if the JWT token is past expiry by analyzing the
|
||||||
|
// "exp" claim present in the token. Token is considered expired if "exp"
|
||||||
|
// claim < current time. Missing "exp" claim is considered as non-expired.
|
||||||
|
// An error is returned if the supplied token is malformed.
|
||||||
|
func IsJWTExpired(
|
||||||
|
rawToken string,
|
||||||
|
) (bool, error) {
|
||||||
|
p := jwt.NewParser()
|
||||||
|
|
||||||
|
// Note: Call to ParseUnverified is intentional since token verification is
|
||||||
|
// not our objective. We only care about the embed claims in the token.
|
||||||
|
// We assume the token signature is valid & verified by caller stack.
|
||||||
|
token, _, err := p.ParseUnverified(rawToken, &jwt.RegisteredClaims{})
|
||||||
|
if err != nil {
|
||||||
|
return false, clues.Wrap(err, "invalid jwt")
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := token.Claims.GetExpirationTime()
|
||||||
|
if err != nil {
|
||||||
|
return false, clues.Wrap(err, "getting token expiry time")
|
||||||
|
}
|
||||||
|
|
||||||
|
if t == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
expired := t.Before(time.Now())
|
||||||
|
|
||||||
|
return expired, nil
|
||||||
|
}
|
||||||
115
src/internal/common/jwt/jwt_test.go
Normal file
115
src/internal/common/jwt/jwt_test.go
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
jwt "github.com/golang-jwt/jwt/v5"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JWTUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJWTUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &JWTUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
// createJWTToken creates a JWT token with the specified expiration time.
|
||||||
|
func createJWTToken(
|
||||||
|
claims jwt.RegisteredClaims,
|
||||||
|
) (string, error) {
|
||||||
|
// build claims from map
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||||
|
|
||||||
|
return token.SignedString([]byte(""))
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Raw test token valid for 100 years.
|
||||||
|
rawToken = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9." +
|
||||||
|
"eyJuYmYiOiIxNjkxODE5NTc5IiwiZXhwIjoiMzk0NTUyOTE3OSIsImVuZHBvaW50dXJsTGVuZ3RoIjoiMTYw" +
|
||||||
|
"IiwiaXNsb29wYmFjayI6IlRydWUiLCJ2ZXIiOiJoYXNoZWRwcm9vZnRva2VuIiwicm9sZXMiOiJhbGxmaWxl" +
|
||||||
|
"cy53cml0ZSBhbGxzaXRlcy5mdWxsY29udHJvbCBhbGxwcm9maWxlcy5yZWFkIiwidHQiOiIxIiwiYWxnIjoi" +
|
||||||
|
"SFMyNTYifQ" +
|
||||||
|
".signature"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (suite *JWTUnitSuite) TestIsJWTExpired() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
expect bool
|
||||||
|
getToken func() (string, error)
|
||||||
|
expectErr assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "alive token",
|
||||||
|
getToken: func() (string, error) {
|
||||||
|
return createJWTToken(
|
||||||
|
jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour)),
|
||||||
|
})
|
||||||
|
},
|
||||||
|
expect: false,
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "expired token",
|
||||||
|
getToken: func() (string, error) {
|
||||||
|
return createJWTToken(
|
||||||
|
jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(-time.Hour)),
|
||||||
|
})
|
||||||
|
},
|
||||||
|
expect: true,
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
},
|
||||||
|
// Test with a raw token which is not generated with go-jwt lib.
|
||||||
|
{
|
||||||
|
name: "alive raw token",
|
||||||
|
getToken: func() (string, error) {
|
||||||
|
return rawToken, nil
|
||||||
|
},
|
||||||
|
expect: false,
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "alive token, missing exp claim",
|
||||||
|
getToken: func() (string, error) {
|
||||||
|
return createJWTToken(jwt.RegisteredClaims{})
|
||||||
|
},
|
||||||
|
expect: false,
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "malformed token",
|
||||||
|
getToken: func() (string, error) {
|
||||||
|
return "header.claims.signature", nil
|
||||||
|
},
|
||||||
|
expect: false,
|
||||||
|
expectErr: assert.Error,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
_, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
token, err := test.getToken()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expired, err := IsJWTExpired(token)
|
||||||
|
test.expectErr(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, test.expect, expired)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap {
|
|||||||
|
|
||||||
func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) {
|
func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) {
|
||||||
if pm.Empty() {
|
if pm.Empty() {
|
||||||
require.True(t, r.Empty(), "both prefix maps are empty")
|
require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
27
src/internal/common/url.go
Normal file
27
src/internal/common/url.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetQueryParamFromURL parses an URL and returns value of the specified
|
||||||
|
// query parameter. In case of multiple occurrences, first one is returned.
|
||||||
|
func GetQueryParamFromURL(
|
||||||
|
rawURL, queryParam string,
|
||||||
|
) (string, error) {
|
||||||
|
u, err := url.Parse(rawURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", clues.Wrap(err, "parsing url")
|
||||||
|
}
|
||||||
|
|
||||||
|
qp := u.Query()
|
||||||
|
|
||||||
|
val := qp.Get(queryParam)
|
||||||
|
if len(val) == 0 {
|
||||||
|
return "", clues.New("query param not found").With("query_param", queryParam)
|
||||||
|
}
|
||||||
|
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
72
src/internal/common/url_test.go
Normal file
72
src/internal/common/url_test.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package common_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
)
|
||||||
|
|
||||||
|
type URLUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestURLUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &URLUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *URLUnitSuite) TestGetQueryParamFromURL() {
|
||||||
|
qp := "tempauth"
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
rawURL string
|
||||||
|
queryParam string
|
||||||
|
expectedResult string
|
||||||
|
expect assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid",
|
||||||
|
rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val",
|
||||||
|
queryParam: qp,
|
||||||
|
expectedResult: "h.c.s",
|
||||||
|
expect: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "query param not found",
|
||||||
|
rawURL: "http://localhost:8080?other=val",
|
||||||
|
queryParam: qp,
|
||||||
|
expect: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty query param",
|
||||||
|
rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val",
|
||||||
|
queryParam: "",
|
||||||
|
expect: assert.Error,
|
||||||
|
},
|
||||||
|
// In case of multiple occurrences, the first occurrence of param is returned.
|
||||||
|
{
|
||||||
|
name: "multiple occurrences",
|
||||||
|
rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val&" + qp + "=h1.c1.s1",
|
||||||
|
queryParam: qp,
|
||||||
|
expectedResult: "h.c.s",
|
||||||
|
expect: assert.NoError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
_, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
token, err := common.GetQueryParamFromURL(test.rawURL, test.queryParam)
|
||||||
|
test.expect(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, test.expectedResult, token)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -16,23 +16,23 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ Item = &unindexedPrefetchedItem{}
|
|
||||||
_ ItemModTime = &unindexedPrefetchedItem{}
|
|
||||||
|
|
||||||
_ Item = &prefetchedItem{}
|
_ Item = &prefetchedItem{}
|
||||||
_ ItemInfo = &prefetchedItem{}
|
|
||||||
_ ItemModTime = &prefetchedItem{}
|
_ ItemModTime = &prefetchedItem{}
|
||||||
|
|
||||||
_ Item = &unindexedLazyItem{}
|
_ Item = &prefetchedItemWithInfo{}
|
||||||
_ ItemModTime = &unindexedLazyItem{}
|
_ ItemInfo = &prefetchedItemWithInfo{}
|
||||||
|
_ ItemModTime = &prefetchedItemWithInfo{}
|
||||||
|
|
||||||
_ Item = &lazyItem{}
|
_ Item = &lazyItem{}
|
||||||
_ ItemInfo = &lazyItem{}
|
|
||||||
_ ItemModTime = &lazyItem{}
|
_ ItemModTime = &lazyItem{}
|
||||||
|
|
||||||
|
_ Item = &lazyItemWithInfo{}
|
||||||
|
_ ItemInfo = &lazyItemWithInfo{}
|
||||||
|
_ ItemModTime = &lazyItemWithInfo{}
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDeletedItem(itemID string) Item {
|
func NewDeletedItem(itemID string) Item {
|
||||||
return &unindexedPrefetchedItem{
|
return &prefetchedItem{
|
||||||
id: itemID,
|
id: itemID,
|
||||||
deleted: true,
|
deleted: true,
|
||||||
// TODO(ashmrtn): This really doesn't need to be set since deleted items are
|
// TODO(ashmrtn): This really doesn't need to be set since deleted items are
|
||||||
@ -42,11 +42,11 @@ func NewDeletedItem(itemID string) Item {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUnindexedPrefetchedItem(
|
func NewPrefetchedItem(
|
||||||
reader io.ReadCloser,
|
reader io.ReadCloser,
|
||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
) (*unindexedPrefetchedItem, error) {
|
) (*prefetchedItem, error) {
|
||||||
r, err := readers.NewVersionedBackupReader(
|
r, err := readers.NewVersionedBackupReader(
|
||||||
readers.SerializationFormat{Version: readers.DefaultSerializationVersion},
|
readers.SerializationFormat{Version: readers.DefaultSerializationVersion},
|
||||||
reader)
|
reader)
|
||||||
@ -54,19 +54,18 @@ func NewUnindexedPrefetchedItem(
|
|||||||
return nil, clues.Stack(err)
|
return nil, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &unindexedPrefetchedItem{
|
return &prefetchedItem{
|
||||||
id: itemID,
|
id: itemID,
|
||||||
reader: r,
|
reader: r,
|
||||||
modTime: modTime,
|
modTime: modTime,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// unindexedPrefetchedItem represents a single item retrieved from the remote
|
// prefetchedItem represents a single item retrieved from the remote service.
|
||||||
// service.
|
|
||||||
//
|
//
|
||||||
// This item doesn't implement ItemInfo so it's safe to use for items like
|
// This item doesn't implement ItemInfo so it's safe to use for items like
|
||||||
// metadata that shouldn't appear in backup details.
|
// metadata that shouldn't appear in backup details.
|
||||||
type unindexedPrefetchedItem struct {
|
type prefetchedItem struct {
|
||||||
id string
|
id string
|
||||||
reader io.ReadCloser
|
reader io.ReadCloser
|
||||||
// modTime is the modified time of the item. It should match the modTime in
|
// modTime is the modified time of the item. It should match the modTime in
|
||||||
@ -79,48 +78,49 @@ type unindexedPrefetchedItem struct {
|
|||||||
deleted bool
|
deleted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i unindexedPrefetchedItem) ID() string {
|
func (i prefetchedItem) ID() string {
|
||||||
return i.id
|
return i.id
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser {
|
func (i *prefetchedItem) ToReader() io.ReadCloser {
|
||||||
return i.reader
|
return i.reader
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i unindexedPrefetchedItem) Deleted() bool {
|
func (i prefetchedItem) Deleted() bool {
|
||||||
return i.deleted
|
return i.deleted
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i unindexedPrefetchedItem) ModTime() time.Time {
|
func (i prefetchedItem) ModTime() time.Time {
|
||||||
return i.modTime
|
return i.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPrefetchedItem(
|
func NewPrefetchedItemWithInfo(
|
||||||
reader io.ReadCloser,
|
reader io.ReadCloser,
|
||||||
itemID string,
|
itemID string,
|
||||||
info details.ItemInfo,
|
info details.ItemInfo,
|
||||||
) (*prefetchedItem, error) {
|
) (*prefetchedItemWithInfo, error) {
|
||||||
inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified())
|
inner, err := NewPrefetchedItem(reader, itemID, info.Modified())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Stack(err)
|
return nil, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &prefetchedItem{
|
return &prefetchedItemWithInfo{
|
||||||
unindexedPrefetchedItem: inner,
|
prefetchedItem: inner,
|
||||||
info: info,
|
info: info,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// prefetchedItem represents a single item retrieved from the remote service.
|
// prefetchedItemWithInfo represents a single item retrieved from the remote
|
||||||
|
// service.
|
||||||
//
|
//
|
||||||
// This item implements ItemInfo so it should be used for things that need to
|
// This item implements ItemInfo so it should be used for things that need to
|
||||||
// appear in backup details.
|
// appear in backup details.
|
||||||
type prefetchedItem struct {
|
type prefetchedItemWithInfo struct {
|
||||||
*unindexedPrefetchedItem
|
*prefetchedItem
|
||||||
info details.ItemInfo
|
info details.ItemInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i prefetchedItem) Info() (details.ItemInfo, error) {
|
func (i prefetchedItemWithInfo) Info() (details.ItemInfo, error) {
|
||||||
return i.info, nil
|
return i.info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,14 +131,14 @@ type ItemDataGetter interface {
|
|||||||
) (io.ReadCloser, *details.ItemInfo, bool, error)
|
) (io.ReadCloser, *details.ItemInfo, bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUnindexedLazyItem(
|
func NewLazyItem(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
itemGetter ItemDataGetter,
|
itemGetter ItemDataGetter,
|
||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) *unindexedLazyItem {
|
) *lazyItem {
|
||||||
return &unindexedLazyItem{
|
return &lazyItem{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
id: itemID,
|
id: itemID,
|
||||||
itemGetter: itemGetter,
|
itemGetter: itemGetter,
|
||||||
@ -147,13 +147,13 @@ func NewUnindexedLazyItem(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// unindexedLazyItem represents a single item retrieved from the remote service.
|
// lazyItem represents a single item retrieved from the remote service. It
|
||||||
// It lazily fetches the item's data when the first call to ToReader().Read() is
|
// lazily fetches the item's data when the first call to ToReader().Read() is
|
||||||
// made.
|
// made.
|
||||||
//
|
//
|
||||||
// This item doesn't implement ItemInfo so it's safe to use for items like
|
// This item doesn't implement ItemInfo so it's safe to use for items like
|
||||||
// metadata that shouldn't appear in backup details.
|
// metadata that shouldn't appear in backup details.
|
||||||
type unindexedLazyItem struct {
|
type lazyItem struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
id string
|
id string
|
||||||
@ -165,19 +165,19 @@ type unindexedLazyItem struct {
|
|||||||
// struct so we can tell if it's been set already or not.
|
// struct so we can tell if it's been set already or not.
|
||||||
//
|
//
|
||||||
// This also helps with garbage collection because now the golang garbage
|
// This also helps with garbage collection because now the golang garbage
|
||||||
// collector can collect the lazyItem struct once the storage engine is done
|
// collector can collect the lazyItemWithInfo struct once the storage engine
|
||||||
// with it. The ItemInfo struct needs to stick around until the end of the
|
// is done with it. The ItemInfo struct needs to stick around until the end of
|
||||||
// backup though as backup details is written last.
|
// the backup though as backup details is written last.
|
||||||
info *details.ItemInfo
|
info *details.ItemInfo
|
||||||
|
|
||||||
delInFlight bool
|
delInFlight bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *unindexedLazyItem) ID() string {
|
func (i *lazyItem) ID() string {
|
||||||
return i.id
|
return i.id
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
func (i *lazyItem) ToReader() io.ReadCloser {
|
||||||
return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
|
return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
|
||||||
// Don't allow getting Item info while trying to initialize said info.
|
// Don't allow getting Item info while trying to initialize said info.
|
||||||
// GetData could be a long running call, but in theory nothing should happen
|
// GetData could be a long running call, but in theory nothing should happen
|
||||||
@ -219,23 +219,23 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *unindexedLazyItem) Deleted() bool {
|
func (i *lazyItem) Deleted() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *unindexedLazyItem) ModTime() time.Time {
|
func (i *lazyItem) ModTime() time.Time {
|
||||||
return i.modTime
|
return i.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLazyItem(
|
func NewLazyItemWithInfo(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
itemGetter ItemDataGetter,
|
itemGetter ItemDataGetter,
|
||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) *lazyItem {
|
) *lazyItemWithInfo {
|
||||||
return &lazyItem{
|
return &lazyItemWithInfo{
|
||||||
unindexedLazyItem: NewUnindexedLazyItem(
|
lazyItem: NewLazyItem(
|
||||||
ctx,
|
ctx,
|
||||||
itemGetter,
|
itemGetter,
|
||||||
itemID,
|
itemID,
|
||||||
@ -244,17 +244,17 @@ func NewLazyItem(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// lazyItem represents a single item retrieved from the remote service. It
|
// lazyItemWithInfo represents a single item retrieved from the remote service.
|
||||||
// lazily fetches the item's data when the first call to ToReader().Read() is
|
// It lazily fetches the item's data when the first call to ToReader().Read() is
|
||||||
// made.
|
// made.
|
||||||
//
|
//
|
||||||
// This item implements ItemInfo so it should be used for things that need to
|
// This item implements ItemInfo so it should be used for things that need to
|
||||||
// appear in backup details.
|
// appear in backup details.
|
||||||
type lazyItem struct {
|
type lazyItemWithInfo struct {
|
||||||
*unindexedLazyItem
|
*lazyItem
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *lazyItem) Info() (details.ItemInfo, error) {
|
func (i *lazyItemWithInfo) Info() (details.ItemInfo, error) {
|
||||||
i.mu.Lock()
|
i.mu.Lock()
|
||||||
defer i.mu.Unlock()
|
defer i.mu.Unlock()
|
||||||
|
|
||||||
|
|||||||
@ -51,7 +51,7 @@ func TestItemUnitSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
|
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
|
||||||
prefetch, err := data.NewUnindexedPrefetchedItem(
|
prefetch, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader([]byte{})),
|
io.NopCloser(bytes.NewReader([]byte{})),
|
||||||
"foo",
|
"foo",
|
||||||
time.Time{})
|
time.Time{})
|
||||||
@ -69,7 +69,7 @@ func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
lazy := data.NewUnindexedLazyItem(
|
lazy := data.NewLazyItem(
|
||||||
ctx,
|
ctx,
|
||||||
nil,
|
nil,
|
||||||
"foo",
|
"foo",
|
||||||
@ -148,7 +148,7 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
item, err := data.NewPrefetchedItem(test.reader, id, test.info)
|
item, err := data.NewPrefetchedItemWithInfo(test.reader, id, test.info)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Equal(t, id, item.ID(), "ID")
|
assert.Equal(t, id, item.ID(), "ID")
|
||||||
@ -291,7 +291,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
|
|
||||||
defer test.mid.check(t, true)
|
defer test.mid.check(t, true)
|
||||||
|
|
||||||
item := data.NewLazyItem(
|
item := data.NewLazyItemWithInfo(
|
||||||
ctx,
|
ctx,
|
||||||
test.mid,
|
test.mid,
|
||||||
id,
|
id,
|
||||||
@ -354,7 +354,7 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
|
|||||||
mid := &mockItemDataGetter{delInFlight: true}
|
mid := &mockItemDataGetter{delInFlight: true}
|
||||||
defer mid.check(t, true)
|
defer mid.check(t, true)
|
||||||
|
|
||||||
item := data.NewLazyItem(ctx, mid, id, now, errs)
|
item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs)
|
||||||
|
|
||||||
assert.Equal(t, id, item.ID(), "ID")
|
assert.Equal(t, id, item.ID(), "ID")
|
||||||
assert.False(t, item.Deleted(), "deleted")
|
assert.False(t, item.Deleted(), "deleted")
|
||||||
@ -400,7 +400,7 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
|
|||||||
mid := &mockItemDataGetter{}
|
mid := &mockItemDataGetter{}
|
||||||
defer mid.check(t, false)
|
defer mid.check(t, false)
|
||||||
|
|
||||||
item := data.NewLazyItem(ctx, mid, id, now, errs)
|
item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs)
|
||||||
|
|
||||||
assert.Equal(t, id, item.ID(), "ID")
|
assert.Equal(t, id, item.ID(), "ID")
|
||||||
assert.False(t, item.Deleted(), "deleted")
|
assert.False(t, item.Deleted(), "deleted")
|
||||||
|
|||||||
@ -28,7 +28,6 @@ const (
|
|||||||
tenantIDDeprecated = "m365_tenant_hash_deprecated"
|
tenantIDDeprecated = "m365_tenant_hash_deprecated"
|
||||||
|
|
||||||
// Event Keys
|
// Event Keys
|
||||||
CorsoStart = "Corso Start"
|
|
||||||
RepoInit = "Repo Init"
|
RepoInit = "Repo Init"
|
||||||
RepoConnect = "Repo Connect"
|
RepoConnect = "Repo Connect"
|
||||||
BackupStart = "Backup Start"
|
BackupStart = "Backup Start"
|
||||||
|
|||||||
165
src/internal/kopia/wrapper_scale_test.go
Normal file
165
src/internal/kopia/wrapper_scale_test.go
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
package kopia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
|
"github.com/kopia/kopia/snapshot"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkHierarchyMerge(b *testing.B) {
|
||||||
|
ctx, flush := tester.NewContext(b)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
c, err := openKopiaRepo(b, ctx)
|
||||||
|
require.NoError(b, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
w := &Wrapper{c}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := w.Close(ctx)
|
||||||
|
assert.NoError(b, err, clues.ToCore(err))
|
||||||
|
}()
|
||||||
|
|
||||||
|
var (
|
||||||
|
cols []data.BackupCollection
|
||||||
|
collectionLimit = 1000
|
||||||
|
collectionItemsLimit = 3
|
||||||
|
itemData = []byte("abcdefghijklmnopqrstuvwxyz")
|
||||||
|
)
|
||||||
|
|
||||||
|
baseStorePath, err := path.Build(
|
||||||
|
"a-tenant",
|
||||||
|
"a-user",
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
"Inbox")
|
||||||
|
require.NoError(b, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
for i := 0; i < collectionLimit; i++ {
|
||||||
|
folderName := fmt.Sprintf("folder%d", i)
|
||||||
|
|
||||||
|
storePath, err := baseStorePath.Append(false, folderName)
|
||||||
|
require.NoError(b, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
col := exchMock.NewCollection(
|
||||||
|
storePath,
|
||||||
|
storePath,
|
||||||
|
collectionItemsLimit)
|
||||||
|
|
||||||
|
for j := 0; j < collectionItemsLimit; j++ {
|
||||||
|
itemName := fmt.Sprintf("item%d", j)
|
||||||
|
col.Names[j] = itemName
|
||||||
|
col.Data[j] = itemData
|
||||||
|
}
|
||||||
|
|
||||||
|
cols = append(cols, col)
|
||||||
|
}
|
||||||
|
|
||||||
|
reasons := []identity.Reasoner{
|
||||||
|
NewReason(
|
||||||
|
testTenant,
|
||||||
|
baseStorePath.ProtectedResource(),
|
||||||
|
baseStorePath.Service(),
|
||||||
|
baseStorePath.Category()),
|
||||||
|
}
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
name string
|
||||||
|
baseBackups func(base ManifestEntry) BackupBases
|
||||||
|
collections []data.BackupCollection
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initial backup. All files should be considered new by kopia.
|
||||||
|
baseBackupCase := testCase{
|
||||||
|
name: "Setup",
|
||||||
|
baseBackups: func(ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases()
|
||||||
|
},
|
||||||
|
collections: cols,
|
||||||
|
}
|
||||||
|
|
||||||
|
runAndTestBackup := func(
|
||||||
|
t tester.TestT,
|
||||||
|
ctx context.Context,
|
||||||
|
test testCase,
|
||||||
|
base ManifestEntry,
|
||||||
|
) ManifestEntry {
|
||||||
|
bbs := test.baseBackups(base)
|
||||||
|
|
||||||
|
stats, _, _, err := w.ConsumeBackupCollections(
|
||||||
|
ctx,
|
||||||
|
reasons,
|
||||||
|
bbs,
|
||||||
|
test.collections,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
fault.New(true))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, 0, stats.IgnoredErrorCount)
|
||||||
|
assert.Equal(t, 0, stats.ErrorCount)
|
||||||
|
assert.False(t, stats.Incomplete)
|
||||||
|
|
||||||
|
snap, err := snapshot.LoadSnapshot(
|
||||||
|
ctx,
|
||||||
|
w.c,
|
||||||
|
manifest.ID(stats.SnapshotID))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
return ManifestEntry{
|
||||||
|
Manifest: snap,
|
||||||
|
Reasons: reasons,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Logf("setting up base backup\n")
|
||||||
|
|
||||||
|
base := runAndTestBackup(b, ctx, baseBackupCase, ManifestEntry{})
|
||||||
|
|
||||||
|
table := []testCase{
|
||||||
|
{
|
||||||
|
name: "Merge All",
|
||||||
|
baseBackups: func(base ManifestEntry) BackupBases {
|
||||||
|
return NewMockBackupBases().WithMergeBases(base)
|
||||||
|
},
|
||||||
|
collections: func() []data.BackupCollection {
|
||||||
|
p, err := baseStorePath.Dir()
|
||||||
|
require.NoError(b, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
col := exchMock.NewCollection(p, p, 0)
|
||||||
|
col.ColState = data.NotMovedState
|
||||||
|
col.PrevPath = p
|
||||||
|
|
||||||
|
return []data.BackupCollection{col}
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
b.Run(fmt.Sprintf("num_dirs_%d", collectionLimit), func(b *testing.B) {
|
||||||
|
ctx, flush := tester.NewContext(b)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
runAndTestBackup(b, ctx, test, base)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -575,7 +575,7 @@ func (oc *Collection) streamDriveItem(
|
|||||||
// This ensures that downloads won't be attempted unless that consumer
|
// This ensures that downloads won't be attempted unless that consumer
|
||||||
// attempts to read bytes. Assumption is that kopia will check things
|
// attempts to read bytes. Assumption is that kopia will check things
|
||||||
// like file modtimes before attempting to read.
|
// like file modtimes before attempting to read.
|
||||||
oc.data <- data.NewLazyItem(
|
oc.data <- data.NewLazyItemWithInfo(
|
||||||
ctx,
|
ctx,
|
||||||
&lazyItemGetter{
|
&lazyItemGetter{
|
||||||
info: &itemInfo,
|
info: &itemInfo,
|
||||||
@ -600,7 +600,7 @@ func (oc *Collection) streamDriveItem(
|
|||||||
return progReader, nil
|
return progReader, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
storeItem, err := data.NewUnindexedPrefetchedItem(
|
storeItem, err := data.NewPrefetchedItem(
|
||||||
metaReader,
|
metaReader,
|
||||||
metaFileName+metaSuffix,
|
metaFileName+metaSuffix,
|
||||||
// Metadata file should always use the latest time as
|
// Metadata file should always use the latest time as
|
||||||
|
|||||||
@ -228,16 +228,16 @@ func (c *Collections) Get(
|
|||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup)
|
||||||
|
|
||||||
driveTombstones := map[string]struct{}{}
|
driveTombstones := map[string]struct{}{}
|
||||||
|
|
||||||
for driveID := range oldPathsByDriveID {
|
for driveID := range oldPrevPathsByDriveID {
|
||||||
driveTombstones[driveID] = struct{}{}
|
driveTombstones[driveID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,76 +255,88 @@ func (c *Collections) Get(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Drive ID -> delta URL for drive
|
driveIDToDeltaLink = map[string]string{}
|
||||||
deltaURLs = map[string]string{}
|
driveIDToPrevPaths = map[string]map[string]string{}
|
||||||
// Drive ID -> folder ID -> folder path
|
numPrevItems = 0
|
||||||
folderPaths = map[string]map[string]string{}
|
|
||||||
numPrevItems = 0
|
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, d := range drives {
|
for _, d := range drives {
|
||||||
var (
|
var (
|
||||||
driveID = ptr.Val(d.GetId())
|
driveID = ptr.Val(d.GetId())
|
||||||
driveName = ptr.Val(d.GetName())
|
driveName = ptr.Val(d.GetName())
|
||||||
prevDelta = prevDeltas[driveID]
|
ictx = clues.Add(
|
||||||
oldPaths = oldPathsByDriveID[driveID]
|
ctx,
|
||||||
numOldDelta = 0
|
"drive_id", driveID,
|
||||||
ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
"drive_name", clues.Hide(driveName))
|
||||||
|
|
||||||
|
excludedItemIDs = map[string]struct{}{}
|
||||||
|
oldPrevPaths = oldPrevPathsByDriveID[driveID]
|
||||||
|
prevDeltaLink = prevDriveIDToDelta[driveID]
|
||||||
|
|
||||||
|
// itemCollection is used to identify which collection a
|
||||||
|
// file belongs to. This is useful to delete a file from the
|
||||||
|
// collection it was previously in, in case it was moved to a
|
||||||
|
// different collection within the same delta query
|
||||||
|
// item ID -> item ID
|
||||||
|
itemCollection = map[string]string{}
|
||||||
)
|
)
|
||||||
|
|
||||||
delete(driveTombstones, driveID)
|
delete(driveTombstones, driveID)
|
||||||
|
|
||||||
|
if _, ok := driveIDToPrevPaths[driveID]; !ok {
|
||||||
|
driveIDToPrevPaths[driveID] = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := c.CollectionMap[driveID]; !ok {
|
if _, ok := c.CollectionMap[driveID]; !ok {
|
||||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(prevDelta) > 0 {
|
|
||||||
numOldDelta++
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ictx).Infow(
|
logger.Ctx(ictx).Infow(
|
||||||
"previous metadata for drive",
|
"previous metadata for drive",
|
||||||
"num_paths_entries", len(oldPaths),
|
"num_paths_entries", len(oldPrevPaths))
|
||||||
"num_deltas_entries", numOldDelta)
|
|
||||||
|
|
||||||
delta, paths, excluded, err := collectItems(
|
items, du, err := c.handler.EnumerateDriveItemsDelta(
|
||||||
ictx,
|
ictx,
|
||||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()),
|
|
||||||
driveID,
|
driveID,
|
||||||
driveName,
|
prevDeltaLink)
|
||||||
c.UpdateCollections,
|
|
||||||
oldPaths,
|
|
||||||
prevDelta,
|
|
||||||
errs)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for logging below.
|
|
||||||
numDeltas := 0
|
|
||||||
|
|
||||||
// It's alright to have an empty folders map (i.e. no folders found) but not
|
// It's alright to have an empty folders map (i.e. no folders found) but not
|
||||||
// an empty delta token. This is because when deserializing the metadata we
|
// an empty delta token. This is because when deserializing the metadata we
|
||||||
// remove entries for which there is no corresponding delta token/folder. If
|
// remove entries for which there is no corresponding delta token/folder. If
|
||||||
// we leave empty delta tokens then we may end up setting the State field
|
// we leave empty delta tokens then we may end up setting the State field
|
||||||
// for collections when not actually getting delta results.
|
// for collections when not actually getting delta results.
|
||||||
if len(delta.URL) > 0 {
|
if len(du.URL) > 0 {
|
||||||
deltaURLs[driveID] = delta.URL
|
driveIDToDeltaLink[driveID] = du.URL
|
||||||
numDeltas++
|
}
|
||||||
|
|
||||||
|
newPrevPaths, err := c.UpdateCollections(
|
||||||
|
ctx,
|
||||||
|
driveID,
|
||||||
|
driveName,
|
||||||
|
items,
|
||||||
|
oldPrevPaths,
|
||||||
|
itemCollection,
|
||||||
|
excludedItemIDs,
|
||||||
|
du.Reset,
|
||||||
|
errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid the edge case where there's no paths but we do have a valid delta
|
// Avoid the edge case where there's no paths but we do have a valid delta
|
||||||
// token. We can accomplish this by adding an empty paths map for this
|
// token. We can accomplish this by adding an empty paths map for this
|
||||||
// drive. If we don't have this then the next backup won't use the delta
|
// drive. If we don't have this then the next backup won't use the delta
|
||||||
// token because it thinks the folder paths weren't persisted.
|
// token because it thinks the folder paths weren't persisted.
|
||||||
folderPaths[driveID] = map[string]string{}
|
driveIDToPrevPaths[driveID] = map[string]string{}
|
||||||
maps.Copy(folderPaths[driveID], paths)
|
maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths)
|
||||||
|
|
||||||
logger.Ctx(ictx).Infow(
|
logger.Ctx(ictx).Infow(
|
||||||
"persisted metadata for drive",
|
"persisted metadata for drive",
|
||||||
"num_paths_entries", len(paths),
|
"num_new_paths_entries", len(newPrevPaths),
|
||||||
"num_deltas_entries", numDeltas,
|
"delta_reset", du.Reset)
|
||||||
"delta_reset", delta.Reset)
|
|
||||||
|
|
||||||
numDriveItems := c.NumItems - numPrevItems
|
numDriveItems := c.NumItems - numPrevItems
|
||||||
numPrevItems = c.NumItems
|
numPrevItems = c.NumItems
|
||||||
@ -336,7 +348,7 @@ func (c *Collections) Get(
|
|||||||
err = c.addURLCacheToDriveCollections(
|
err = c.addURLCacheToDriveCollections(
|
||||||
ictx,
|
ictx,
|
||||||
driveID,
|
driveID,
|
||||||
prevDelta,
|
prevDeltaLink,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
@ -345,8 +357,8 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
// For both cases we don't need to do set difference on folder map if the
|
// For both cases we don't need to do set difference on folder map if the
|
||||||
// delta token was valid because we should see all the changes.
|
// delta token was valid because we should see all the changes.
|
||||||
if !delta.Reset {
|
if !du.Reset {
|
||||||
if len(excluded) == 0 {
|
if len(excludedItemIDs) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -355,7 +367,7 @@ func (c *Collections) Get(
|
|||||||
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
ssmb.Add(p.String(), excluded)
|
ssmb.Add(p.String(), excludedItemIDs)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -370,13 +382,11 @@ func (c *Collections) Get(
|
|||||||
foundFolders[id] = struct{}{}
|
foundFolders[id] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
for fldID, p := range oldPaths {
|
for fldID, p := range oldPrevPaths {
|
||||||
if _, ok := foundFolders[fldID]; ok {
|
if _, ok := foundFolders[fldID]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(paths, fldID)
|
|
||||||
|
|
||||||
prevPath, err := path.FromDataLayerPath(p, false)
|
prevPath, err := path.FromDataLayerPath(p, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||||
@ -446,14 +456,14 @@ func (c *Collections) Get(
|
|||||||
// empty/missing and default to a full backup.
|
// empty/missing and default to a full backup.
|
||||||
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
||||||
|
|
||||||
return collections, canUsePreviousBackup, nil
|
return collections, canUsePrevBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
md, err := graph.MakeMetadataCollection(
|
md, err := graph.MakeMetadataCollection(
|
||||||
pathPrefix,
|
pathPrefix,
|
||||||
[]graph.MetadataCollectionEntry{
|
[]graph.MetadataCollectionEntry{
|
||||||
graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths),
|
graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths),
|
||||||
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs),
|
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink),
|
||||||
},
|
},
|
||||||
c.statusUpdater)
|
c.statusUpdater)
|
||||||
|
|
||||||
@ -466,7 +476,7 @@ func (c *Collections) Get(
|
|||||||
collections = append(collections, md)
|
collections = append(collections, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, canUsePreviousBackup, nil
|
return collections, canUsePrevBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
||||||
@ -480,7 +490,7 @@ func (c *Collections) addURLCacheToDriveCollections(
|
|||||||
driveID,
|
driveID,
|
||||||
prevDelta,
|
prevDelta,
|
||||||
urlCacheRefreshInterval,
|
urlCacheRefreshInterval,
|
||||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()),
|
c.handler,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -536,22 +546,21 @@ func updateCollectionPaths(
|
|||||||
|
|
||||||
func (c *Collections) handleDelete(
|
func (c *Collections) handleDelete(
|
||||||
itemID, driveID string,
|
itemID, driveID string,
|
||||||
oldPaths, newPaths map[string]string,
|
oldPrevPaths, currPrevPaths, newPrevPaths map[string]string,
|
||||||
isFolder bool,
|
isFolder bool,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
itemCollection map[string]map[string]string,
|
|
||||||
invalidPrevDelta bool,
|
invalidPrevDelta bool,
|
||||||
) error {
|
) error {
|
||||||
if !isFolder {
|
if !isFolder {
|
||||||
// Try to remove the item from the Collection if an entry exists for this
|
// Try to remove the item from the Collection if an entry exists for this
|
||||||
// item. This handles cases where an item was created and deleted during the
|
// item. This handles cases where an item was created and deleted during the
|
||||||
// same delta query.
|
// same delta query.
|
||||||
if parentID, ok := itemCollection[driveID][itemID]; ok {
|
if parentID, ok := currPrevPaths[itemID]; ok {
|
||||||
if col := c.CollectionMap[driveID][parentID]; col != nil {
|
if col := c.CollectionMap[driveID][parentID]; col != nil {
|
||||||
col.Remove(itemID)
|
col.Remove(itemID)
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(itemCollection[driveID], itemID)
|
delete(currPrevPaths, itemID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't need to add to exclude list if the delta is invalid since the
|
// Don't need to add to exclude list if the delta is invalid since the
|
||||||
@ -572,7 +581,7 @@ func (c *Collections) handleDelete(
|
|||||||
|
|
||||||
var prevPath path.Path
|
var prevPath path.Path
|
||||||
|
|
||||||
prevPathStr, ok := oldPaths[itemID]
|
prevPathStr, ok := oldPrevPaths[itemID]
|
||||||
if ok {
|
if ok {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@ -589,7 +598,7 @@ func (c *Collections) handleDelete(
|
|||||||
// Nested folders also return deleted delta results so we don't have to
|
// Nested folders also return deleted delta results so we don't have to
|
||||||
// worry about doing a prefix search in the map to remove the subtree of
|
// worry about doing a prefix search in the map to remove the subtree of
|
||||||
// the deleted folder/package.
|
// the deleted folder/package.
|
||||||
delete(newPaths, itemID)
|
delete(newPrevPaths, itemID)
|
||||||
|
|
||||||
if prevPath == nil || invalidPrevDelta {
|
if prevPath == nil || invalidPrevDelta {
|
||||||
// It is possible that an item was created and deleted between two delta
|
// It is possible that an item was created and deleted between two delta
|
||||||
@ -680,21 +689,29 @@ func (c *Collections) getCollectionPath(
|
|||||||
|
|
||||||
// UpdateCollections initializes and adds the provided drive items to Collections
|
// UpdateCollections initializes and adds the provided drive items to Collections
|
||||||
// A new collection is created for every drive folder (or package).
|
// A new collection is created for every drive folder (or package).
|
||||||
// oldPaths is the unchanged data that was loaded from the metadata file.
|
// oldPrevPaths is the unchanged data that was loaded from the metadata file.
|
||||||
// newPaths starts as a copy of oldPaths and is updated as changes are found in
|
// This map is not modified during the call.
|
||||||
// the returned results.
|
// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in
|
||||||
|
// the returned results. Items are added to this collection throughout the call.
|
||||||
|
// newPrevPaths, ie: the items added during this call, get returned as a map.
|
||||||
func (c *Collections) UpdateCollections(
|
func (c *Collections) UpdateCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
driveID, driveName string,
|
driveID, driveName string,
|
||||||
items []models.DriveItemable,
|
items []models.DriveItemable,
|
||||||
oldPaths map[string]string,
|
oldPrevPaths map[string]string,
|
||||||
newPaths map[string]string,
|
currPrevPaths map[string]string,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
itemCollection map[string]map[string]string,
|
|
||||||
invalidPrevDelta bool,
|
invalidPrevDelta bool,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) error {
|
) (map[string]string, error) {
|
||||||
el := errs.Local()
|
var (
|
||||||
|
el = errs.Local()
|
||||||
|
newPrevPaths = map[string]string{}
|
||||||
|
)
|
||||||
|
|
||||||
|
if !invalidPrevDelta {
|
||||||
|
maps.Copy(newPrevPaths, oldPrevPaths)
|
||||||
|
}
|
||||||
|
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
@ -704,8 +721,12 @@ func (c *Collections) UpdateCollections(
|
|||||||
var (
|
var (
|
||||||
itemID = ptr.Val(item.GetId())
|
itemID = ptr.Val(item.GetId())
|
||||||
itemName = ptr.Val(item.GetName())
|
itemName = ptr.Val(item.GetName())
|
||||||
ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName))
|
|
||||||
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
||||||
|
ictx = clues.Add(
|
||||||
|
ctx,
|
||||||
|
"item_id", itemID,
|
||||||
|
"item_name", clues.Hide(itemName),
|
||||||
|
"item_is_folder", isFolder)
|
||||||
)
|
)
|
||||||
|
|
||||||
if item.GetMalware() != nil {
|
if item.GetMalware() != nil {
|
||||||
@ -727,13 +748,13 @@ func (c *Collections) UpdateCollections(
|
|||||||
if err := c.handleDelete(
|
if err := c.handleDelete(
|
||||||
itemID,
|
itemID,
|
||||||
driveID,
|
driveID,
|
||||||
oldPaths,
|
oldPrevPaths,
|
||||||
newPaths,
|
currPrevPaths,
|
||||||
|
newPrevPaths,
|
||||||
isFolder,
|
isFolder,
|
||||||
excluded,
|
excluded,
|
||||||
itemCollection,
|
|
||||||
invalidPrevDelta); err != nil {
|
invalidPrevDelta); err != nil {
|
||||||
return clues.Stack(err).WithClues(ictx)
|
return nil, clues.Stack(err).WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
@ -759,13 +780,13 @@ func (c *Collections) UpdateCollections(
|
|||||||
// Deletions are handled above so this is just moves/renames.
|
// Deletions are handled above so this is just moves/renames.
|
||||||
var prevPath path.Path
|
var prevPath path.Path
|
||||||
|
|
||||||
prevPathStr, ok := oldPaths[itemID]
|
prevPathStr, ok := oldPrevPaths[itemID]
|
||||||
if ok {
|
if ok {
|
||||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
|
el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
|
||||||
WithClues(ictx).
|
WithClues(ictx).
|
||||||
With("path_string", prevPathStr))
|
With("prev_path_string", path.LoggableDir(prevPathStr)))
|
||||||
}
|
}
|
||||||
} else if item.GetRoot() != nil {
|
} else if item.GetRoot() != nil {
|
||||||
// Root doesn't move or get renamed.
|
// Root doesn't move or get renamed.
|
||||||
@ -775,11 +796,11 @@ func (c *Collections) UpdateCollections(
|
|||||||
// Moved folders don't cause delta results for any subfolders nested in
|
// Moved folders don't cause delta results for any subfolders nested in
|
||||||
// them. We need to go through and update paths to handle that. We only
|
// them. We need to go through and update paths to handle that. We only
|
||||||
// update newPaths so we don't accidentally clobber previous deletes.
|
// update newPaths so we don't accidentally clobber previous deletes.
|
||||||
updatePath(newPaths, itemID, collectionPath.String())
|
updatePath(newPrevPaths, itemID, collectionPath.String())
|
||||||
|
|
||||||
found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath)
|
found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Stack(err).WithClues(ictx)
|
return nil, clues.Stack(err).WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if found {
|
if found {
|
||||||
@ -803,7 +824,7 @@ func (c *Collections) UpdateCollections(
|
|||||||
invalidPrevDelta,
|
invalidPrevDelta,
|
||||||
nil)
|
nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Stack(err).WithClues(ictx)
|
return nil, clues.Stack(err).WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
col.driveName = driveName
|
col.driveName = driveName
|
||||||
@ -825,35 +846,38 @@ func (c *Collections) UpdateCollections(
|
|||||||
case item.GetFile() != nil:
|
case item.GetFile() != nil:
|
||||||
// Deletions are handled above so this is just moves/renames.
|
// Deletions are handled above so this is just moves/renames.
|
||||||
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
||||||
return clues.New("file without parent ID").WithClues(ictx)
|
return nil, clues.New("file without parent ID").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the collection for this item.
|
// Get the collection for this item.
|
||||||
parentID := ptr.Val(item.GetParentReference().GetId())
|
parentID := ptr.Val(item.GetParentReference().GetId())
|
||||||
ictx = clues.Add(ictx, "parent_id", parentID)
|
ictx = clues.Add(ictx, "parent_id", parentID)
|
||||||
|
|
||||||
collection, found := c.CollectionMap[driveID][parentID]
|
collection, ok := c.CollectionMap[driveID][parentID]
|
||||||
if !found {
|
if !ok {
|
||||||
return clues.New("item seen before parent folder").WithClues(ictx)
|
return nil, clues.New("item seen before parent folder").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the file from previous collection. This will
|
// This will only kick in if the file was moved multiple times
|
||||||
// only kick in if the file was moved multiple times
|
// within a single delta query. We delete the file from the previous
|
||||||
// within a single delta query
|
// collection so that it doesn't appear in two places.
|
||||||
icID, found := itemCollection[driveID][itemID]
|
prevParentContainerID, ok := currPrevPaths[itemID]
|
||||||
if found {
|
if ok {
|
||||||
pcollection, found := c.CollectionMap[driveID][icID]
|
prevColl, found := c.CollectionMap[driveID][prevParentContainerID]
|
||||||
if !found {
|
if !found {
|
||||||
return clues.New("previous collection not found").WithClues(ictx)
|
return nil, clues.New("previous collection not found").
|
||||||
|
With("prev_parent_container_id", prevParentContainerID).
|
||||||
|
WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
removed := pcollection.Remove(itemID)
|
if ok := prevColl.Remove(itemID); !ok {
|
||||||
if !removed {
|
return nil, clues.New("removing item from prev collection").
|
||||||
return clues.New("removing from prev collection").WithClues(ictx)
|
With("prev_parent_container_id", prevParentContainerID).
|
||||||
|
WithClues(ictx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
itemCollection[driveID][itemID] = parentID
|
currPrevPaths[itemID] = parentID
|
||||||
|
|
||||||
if collection.Add(item) {
|
if collection.Add(item) {
|
||||||
c.NumItems++
|
c.NumItems++
|
||||||
@ -874,11 +898,13 @@ func (c *Collections) UpdateCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return clues.New("item type not supported").WithClues(ictx)
|
el.AddRecoverable(ictx, clues.New("item is neither folder nor file").
|
||||||
|
WithClues(ictx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return el.Failure()
|
return newPrevPaths, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
type dirScopeChecker interface {
|
type dirScopeChecker interface {
|
||||||
|
|||||||
@ -8,7 +8,6 @@ import (
|
|||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -138,7 +137,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath)
|
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
testCase string
|
name string
|
||||||
items []models.DriveItemable
|
items []models.DriveItemable
|
||||||
inputFolderMap map[string]string
|
inputFolderMap map[string]string
|
||||||
scope selectors.OneDriveScope
|
scope selectors.OneDriveScope
|
||||||
@ -148,11 +147,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedContainerCount int
|
expectedContainerCount int
|
||||||
expectedFileCount int
|
expectedFileCount int
|
||||||
expectedSkippedCount int
|
expectedSkippedCount int
|
||||||
expectedMetadataPaths map[string]string
|
expectedPrevPaths map[string]string
|
||||||
expectedExcludes map[string]struct{}
|
expectedExcludes map[string]struct{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
testCase: "Invalid item",
|
name: "Invalid item",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("item", "item", testBaseDrivePath, "root", false, false, false),
|
driveItem("item", "item", testBaseDrivePath, "root", false, false, false),
|
||||||
@ -164,13 +163,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
"root": expectedStatePath(data.NotMovedState, ""),
|
"root": expectedStatePath(data.NotMovedState, ""),
|
||||||
},
|
},
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "Single File",
|
name: "Single File",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("file", "file", testBaseDrivePath, "root", true, false, false),
|
driveItem("file", "file", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -185,13 +184,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
// Root folder is skipped since it's always present.
|
// Root folder is skipped since it's always present.
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "Single Folder",
|
name: "Single Folder",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -203,7 +202,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
"root": expectedStatePath(data.NotMovedState, ""),
|
"root": expectedStatePath(data.NotMovedState, ""),
|
||||||
"folder": expectedStatePath(data.NewState, folder),
|
"folder": expectedStatePath(data.NewState, folder),
|
||||||
},
|
},
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
},
|
},
|
||||||
@ -212,7 +211,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "Single Package",
|
name: "Single Package",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("package", "package", testBaseDrivePath, "root", false, false, true),
|
driveItem("package", "package", testBaseDrivePath, "root", false, false, true),
|
||||||
@ -224,7 +223,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
"root": expectedStatePath(data.NotMovedState, ""),
|
"root": expectedStatePath(data.NotMovedState, ""),
|
||||||
"package": expectedStatePath(data.NewState, pkg),
|
"package": expectedStatePath(data.NewState, pkg),
|
||||||
},
|
},
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"package": expectedPath("/package"),
|
"package": expectedPath("/package"),
|
||||||
},
|
},
|
||||||
@ -233,7 +232,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
name: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -253,7 +252,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 5,
|
expectedItemCount: 5,
|
||||||
expectedFileCount: 3,
|
expectedFileCount: 3,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
"package": expectedPath("/package"),
|
"package": expectedPath("/package"),
|
||||||
@ -261,7 +260,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"),
|
expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "contains folder selector",
|
name: "contains folder selector",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -286,7 +285,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
// just "folder" isn't added here because the include check is done on the
|
// just "folder" isn't added here because the include check is done on the
|
||||||
// parent path since we only check later if something is a folder or not.
|
// parent path since we only check later if something is a folder or not.
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
"folder2": expectedPath(folderSub + folder),
|
"folder2": expectedPath(folderSub + folder),
|
||||||
@ -294,7 +293,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("fileInFolder", "fileInFolder2"),
|
expectedExcludes: getDelList("fileInFolder", "fileInFolder2"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "prefix subfolder selector",
|
name: "prefix subfolder selector",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -317,14 +316,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 3,
|
expectedItemCount: 3,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
"folder2": expectedPath(folderSub + folder),
|
"folder2": expectedPath(folderSub + folder),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("fileInFolder2"),
|
expectedExcludes: getDelList("fileInFolder2"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "match subfolder selector",
|
name: "match subfolder selector",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -345,13 +344,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
// No child folders for subfolder so nothing here.
|
// No child folders for subfolder so nothing here.
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("fileInSubfolder"),
|
expectedExcludes: getDelList("fileInSubfolder"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "not moved folder tree",
|
name: "not moved folder tree",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -369,7 +368,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
@ -377,7 +376,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree",
|
name: "moved folder tree",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -395,7 +394,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
@ -403,7 +402,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree with file no previous",
|
name: "moved folder tree with file no previous",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -420,14 +419,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder2"),
|
"folder": expectedPath("/folder2"),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree with file no previous 1",
|
name: "moved folder tree with file no previous 1",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -443,14 +442,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree and subfolder 1",
|
name: "moved folder tree and subfolder 1",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -470,7 +469,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath("/subfolder"),
|
"subfolder": expectedPath("/subfolder"),
|
||||||
@ -478,7 +477,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree and subfolder 2",
|
name: "moved folder tree and subfolder 2",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false),
|
driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -498,7 +497,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath("/subfolder"),
|
"subfolder": expectedPath("/subfolder"),
|
||||||
@ -506,7 +505,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "move subfolder when moving parent",
|
name: "move subfolder when moving parent",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -540,7 +539,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 5,
|
expectedItemCount: 5,
|
||||||
expectedFileCount: 2,
|
expectedFileCount: 2,
|
||||||
expectedContainerCount: 4,
|
expectedContainerCount: 4,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
"folder2": expectedPath("/folder2"),
|
"folder2": expectedPath("/folder2"),
|
||||||
@ -549,7 +548,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"),
|
expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree multiple times",
|
name: "moved folder tree multiple times",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -569,7 +568,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder2"),
|
"folder": expectedPath("/folder2"),
|
||||||
"subfolder": expectedPath("/folder2/subfolder"),
|
"subfolder": expectedPath("/folder2/subfolder"),
|
||||||
@ -577,7 +576,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "deleted folder and package",
|
name: "deleted folder and package",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"), // root is always present, but not necessary here
|
driveRootItem("root"), // root is always present, but not necessary here
|
||||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -598,13 +597,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 0,
|
expectedItemCount: 0,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "delete folder without previous",
|
name: "delete folder without previous",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -620,13 +619,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 0,
|
expectedItemCount: 0,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "delete folder tree move subfolder",
|
name: "delete folder tree move subfolder",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -647,14 +646,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"subfolder": expectedPath("/subfolder"),
|
"subfolder": expectedPath("/subfolder"),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "delete file",
|
name: "delete file",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("item", testBaseDrivePath, "root", true, false, false),
|
delItem("item", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -670,13 +669,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("item"),
|
expectedExcludes: getDelList("item"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "item before parent errors",
|
name: "item before parent errors",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false),
|
driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false),
|
||||||
@ -691,13 +690,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 0,
|
expectedItemCount: 0,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: nil,
|
||||||
"root": expectedPath(""),
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
|
||||||
expectedExcludes: map[string]struct{}{},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -718,7 +715,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedFileCount: 2,
|
expectedFileCount: 2,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedSkippedCount: 1,
|
expectedSkippedCount: 1,
|
||||||
expectedMetadataPaths: map[string]string{
|
expectedPrevPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
"package": expectedPath("/package"),
|
"package": expectedPath("/package"),
|
||||||
@ -727,26 +724,23 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, test := range tests {
|
||||||
suite.Run(tt.testCase, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
excludes = map[string]struct{}{}
|
excludes = map[string]struct{}{}
|
||||||
outputFolderMap = map[string]string{}
|
currPrevPaths = map[string]string{}
|
||||||
itemCollection = map[string]map[string]string{
|
errs = fault.New(true)
|
||||||
driveID: {},
|
|
||||||
}
|
|
||||||
errs = fault.New(true)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
maps.Copy(currPrevPaths, test.inputFolderMap)
|
||||||
|
|
||||||
c := NewCollections(
|
c := NewCollections(
|
||||||
&itemBackupHandler{api.Drives{}, user, tt.scope},
|
&itemBackupHandler{api.Drives{}, user, test.scope},
|
||||||
tenant,
|
tenant,
|
||||||
idname.NewProvider(user, user),
|
idname.NewProvider(user, user),
|
||||||
nil,
|
nil,
|
||||||
@ -754,25 +748,24 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
|
|
||||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||||
|
|
||||||
err := c.UpdateCollections(
|
newPrevPaths, err := c.UpdateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
"General",
|
"General",
|
||||||
tt.items,
|
test.items,
|
||||||
tt.inputFolderMap,
|
test.inputFolderMap,
|
||||||
outputFolderMap,
|
currPrevPaths,
|
||||||
excludes,
|
excludes,
|
||||||
itemCollection,
|
|
||||||
false,
|
false,
|
||||||
errs)
|
errs)
|
||||||
tt.expect(t, err, clues.ToCore(err))
|
test.expect(t, err, clues.ToCore(err))
|
||||||
assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
||||||
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
|
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
||||||
assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count")
|
assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count")
|
||||||
assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count")
|
assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count")
|
||||||
assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
||||||
|
|
||||||
for id, sp := range tt.expectedCollectionIDs {
|
for id, sp := range test.expectedCollectionIDs {
|
||||||
if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) {
|
if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) {
|
||||||
// Skip collections we don't find so we don't get an NPE.
|
// Skip collections we don't find so we don't get an NPE.
|
||||||
continue
|
continue
|
||||||
@ -783,8 +776,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id)
|
assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths")
|
assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths")
|
||||||
assert.Equal(t, tt.expectedExcludes, excludes, "exclude list")
|
assert.Equal(t, test.expectedExcludes, excludes, "exclude list")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1306,7 +1299,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1344,7 +1338,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1421,7 +1416,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &empty, // probably will never happen with graph
|
DeltaLink: &empty, // probably will never happen with graph
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1458,7 +1454,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
NextLink: &next,
|
NextLink: &next,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
@ -1466,7 +1463,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1508,7 +1506,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
driveID2: {
|
driveID2: {
|
||||||
@ -1518,7 +1517,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false),
|
driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false),
|
||||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false),
|
driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1570,7 +1570,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
driveID2: {
|
driveID2: {
|
||||||
@ -1580,7 +1581,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath2, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath2, "root", false, true, false),
|
||||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false),
|
driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1638,87 +1640,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
expectedFolderPaths: nil,
|
expectedFolderPaths: nil,
|
||||||
expectedDelList: nil,
|
expectedDelList: nil,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "OneDrive_OneItemPage_DeltaError",
|
|
||||||
drives: []models.Driveable{drive1},
|
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
|
||||||
driveID1: {
|
|
||||||
{
|
|
||||||
Err: getDeltaError(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Values: []models.DriveItemable{
|
|
||||||
driveRootItem("root"),
|
|
||||||
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
|
||||||
},
|
|
||||||
DeltaLink: &delta,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
canUsePreviousBackup: true,
|
|
||||||
errCheck: assert.NoError,
|
|
||||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
|
||||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
|
||||||
},
|
|
||||||
expectedDeltaURLs: map[string]string{
|
|
||||||
driveID1: delta,
|
|
||||||
},
|
|
||||||
expectedFolderPaths: map[string]map[string]string{
|
|
||||||
driveID1: {
|
|
||||||
"root": rootFolderPath1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
|
||||||
doNotMergeItems: map[string]bool{
|
|
||||||
rootFolderPath1: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "OneDrive_TwoItemPage_DeltaError",
|
|
||||||
drives: []models.Driveable{drive1},
|
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
|
||||||
driveID1: {
|
|
||||||
{
|
|
||||||
Err: getDeltaError(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Values: []models.DriveItemable{
|
|
||||||
driveRootItem("root"),
|
|
||||||
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
|
||||||
},
|
|
||||||
NextLink: &next,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Values: []models.DriveItemable{
|
|
||||||
driveRootItem("root"),
|
|
||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
|
||||||
driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
|
||||||
},
|
|
||||||
DeltaLink: &delta,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
canUsePreviousBackup: true,
|
|
||||||
errCheck: assert.NoError,
|
|
||||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
|
||||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
|
||||||
expectedPath1("/folder"): {data.NewState: {"folder", "file2"}},
|
|
||||||
},
|
|
||||||
expectedDeltaURLs: map[string]string{
|
|
||||||
driveID1: delta,
|
|
||||||
},
|
|
||||||
expectedFolderPaths: map[string]map[string]string{
|
|
||||||
driveID1: {
|
|
||||||
"root": rootFolderPath1,
|
|
||||||
"folder": folderPath1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
|
||||||
doNotMergeItems: map[string]bool{
|
|
||||||
rootFolderPath1: true,
|
|
||||||
folderPath1: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "OneDrive_TwoItemPage_NoDeltaError",
|
name: "OneDrive_TwoItemPage_NoDeltaError",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
@ -1771,16 +1692,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
{
|
|
||||||
Err: getDeltaError(),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false),
|
driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1818,16 +1737,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
{
|
|
||||||
Err: getDeltaError(),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder2", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder2", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1884,7 +1801,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false),
|
malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1914,13 +1832,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
expectedSkippedCount: 2,
|
expectedSkippedCount: 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Delta Error Deleted Folder In New Results",
|
name: "One Drive Deleted Folder In New Results",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
{
|
|
||||||
Err: getDeltaError(),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
@ -1937,7 +1852,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
delItem("folder2", driveBasePath1, "root", false, true, false),
|
delItem("folder2", driveBasePath1, "root", false, true, false),
|
||||||
delItem("file2", driveBasePath1, "root", true, false, false),
|
delItem("file2", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1972,19 +1888,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Delta Error Random Folder Delete",
|
name: "One Drive Random Folder Delete",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
{
|
|
||||||
Err: getDeltaError(),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2015,19 +1929,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Delta Error Random Item Delete",
|
name: "One Drive Random Item Delete",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
{
|
|
||||||
Err: getDeltaError(),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2073,7 +1985,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2116,7 +2029,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2154,7 +2068,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2189,7 +2104,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
|
ResetDelta: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2271,6 +2187,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
mbh := mock.DefaultOneDriveBH("a-user")
|
mbh := mock.DefaultOneDriveBH("a-user")
|
||||||
mbh.DrivePagerV = mockDrivePager
|
mbh.DrivePagerV = mockDrivePager
|
||||||
mbh.ItemPagerV = itemPagers
|
mbh.ItemPagerV = itemPagers
|
||||||
|
mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items)
|
||||||
|
|
||||||
c := NewCollections(
|
c := NewCollections(
|
||||||
mbh,
|
mbh,
|
||||||
@ -2501,121 +2418,6 @@ func delItem(
|
|||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDeltaError() error {
|
|
||||||
syncStateNotFound := "SyncStateNotFound"
|
|
||||||
me := odataerrors.NewMainError()
|
|
||||||
me.SetCode(&syncStateNotFound)
|
|
||||||
|
|
||||||
deltaError := odataerrors.NewODataError()
|
|
||||||
deltaError.SetErrorEscaped(me)
|
|
||||||
|
|
||||||
return deltaError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() {
|
|
||||||
next := "next"
|
|
||||||
delta := "delta"
|
|
||||||
prevDelta := "prev-delta"
|
|
||||||
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
items []apiMock.PagerResult[models.DriveItemable]
|
|
||||||
deltaURL string
|
|
||||||
prevDeltaSuccess bool
|
|
||||||
prevDelta string
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "delta on first run",
|
|
||||||
deltaURL: delta,
|
|
||||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
|
||||||
{DeltaLink: &delta},
|
|
||||||
},
|
|
||||||
prevDeltaSuccess: true,
|
|
||||||
prevDelta: prevDelta,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty prev delta",
|
|
||||||
deltaURL: delta,
|
|
||||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
|
||||||
{DeltaLink: &delta},
|
|
||||||
},
|
|
||||||
prevDeltaSuccess: false,
|
|
||||||
prevDelta: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "next then delta",
|
|
||||||
deltaURL: delta,
|
|
||||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
|
||||||
{NextLink: &next},
|
|
||||||
{DeltaLink: &delta},
|
|
||||||
},
|
|
||||||
prevDeltaSuccess: true,
|
|
||||||
prevDelta: prevDelta,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid prev delta",
|
|
||||||
deltaURL: delta,
|
|
||||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
|
||||||
{Err: getDeltaError()},
|
|
||||||
{DeltaLink: &delta}, // works on retry
|
|
||||||
},
|
|
||||||
prevDelta: prevDelta,
|
|
||||||
prevDeltaSuccess: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "fail a normal delta query",
|
|
||||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
|
||||||
{NextLink: &next},
|
|
||||||
{Err: assert.AnError},
|
|
||||||
},
|
|
||||||
prevDelta: prevDelta,
|
|
||||||
prevDeltaSuccess: true,
|
|
||||||
err: assert.AnError,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
|
||||||
ToReturn: test.items,
|
|
||||||
}
|
|
||||||
|
|
||||||
collectorFunc := func(
|
|
||||||
ctx context.Context,
|
|
||||||
driveID, driveName string,
|
|
||||||
driveItems []models.DriveItemable,
|
|
||||||
oldPaths map[string]string,
|
|
||||||
newPaths map[string]string,
|
|
||||||
excluded map[string]struct{},
|
|
||||||
itemCollection map[string]map[string]string,
|
|
||||||
doNotMergeItems bool,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
delta, _, _, err := collectItems(
|
|
||||||
ctx,
|
|
||||||
itemPager,
|
|
||||||
"",
|
|
||||||
"General",
|
|
||||||
collectorFunc,
|
|
||||||
map[string]string{},
|
|
||||||
test.prevDelta,
|
|
||||||
fault.New(true))
|
|
||||||
|
|
||||||
require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err))
|
|
||||||
require.Equal(t, test.deltaURL, delta.URL, "delta url")
|
|
||||||
require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
|
func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
|
||||||
driveID := "test-drive"
|
driveID := "test-drive"
|
||||||
collCount := 3
|
collCount := 3
|
||||||
|
|||||||
@ -38,6 +38,7 @@ type BackupHandler interface {
|
|||||||
GetItemPermissioner
|
GetItemPermissioner
|
||||||
GetItemer
|
GetItemer
|
||||||
NewDrivePagerer
|
NewDrivePagerer
|
||||||
|
EnumerateDriveItemsDeltaer
|
||||||
|
|
||||||
// PathPrefix constructs the service and category specific path prefix for
|
// PathPrefix constructs the service and category specific path prefix for
|
||||||
// the given values.
|
// the given values.
|
||||||
@ -52,7 +53,7 @@ type BackupHandler interface {
|
|||||||
|
|
||||||
// ServiceCat returns the service and category used by this implementation.
|
// ServiceCat returns the service and category used by this implementation.
|
||||||
ServiceCat() (path.ServiceType, path.CategoryType)
|
ServiceCat() (path.ServiceType, path.CategoryType)
|
||||||
NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable]
|
|
||||||
// FormatDisplayPath creates a human-readable string to represent the
|
// FormatDisplayPath creates a human-readable string to represent the
|
||||||
// provided path.
|
// provided path.
|
||||||
FormatDisplayPath(driveName string, parentPath *path.Builder) string
|
FormatDisplayPath(driveName string, parentPath *path.Builder) string
|
||||||
@ -81,6 +82,17 @@ type GetItemer interface {
|
|||||||
) (models.DriveItemable, error)
|
) (models.DriveItemable, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EnumerateDriveItemsDeltaer interface {
|
||||||
|
EnumerateDriveItemsDelta(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID, prevDeltaLink string,
|
||||||
|
) (
|
||||||
|
[]models.DriveItemable,
|
||||||
|
api.DeltaUpdate,
|
||||||
|
error,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// restore
|
// restore
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -10,17 +10,24 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
|
jwt "github.com/alcionai/corso/src/internal/common/jwt"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/common/readers"
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/common/str"
|
"github.com/alcionai/corso/src/internal/common/str"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
acceptHeaderKey = "Accept"
|
acceptHeaderKey = "Accept"
|
||||||
acceptHeaderValue = "*/*"
|
acceptHeaderValue = "*/*"
|
||||||
|
|
||||||
|
// JWTQueryParam is a query param embed in graph download URLs which holds
|
||||||
|
// JWT token.
|
||||||
|
JWTQueryParam = "tempauth"
|
||||||
)
|
)
|
||||||
|
|
||||||
// downloadUrlKeys is used to find the download URL in a DriveItem response.
|
// downloadUrlKeys is used to find the download URL in a DriveItem response.
|
||||||
@ -121,6 +128,19 @@ func downloadFile(
|
|||||||
return nil, clues.New("empty file url").WithClues(ctx)
|
return nil, clues.New("empty file url").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Precheck for url expiry before we make a call to graph to download the
|
||||||
|
// file. If the url is expired, we can return early and save a call to graph.
|
||||||
|
//
|
||||||
|
// Ignore all errors encountered during the check. We can rely on graph to
|
||||||
|
// return errors on malformed urls. Ignoring errors also future proofs against
|
||||||
|
// any sudden graph changes, for e.g. if graph decides to embed the token in a
|
||||||
|
// new query param.
|
||||||
|
expired, err := isURLExpired(ctx, url)
|
||||||
|
if err == nil && expired {
|
||||||
|
logger.Ctx(ctx).Debug("expired item download url")
|
||||||
|
return nil, graph.ErrTokenExpired
|
||||||
|
}
|
||||||
|
|
||||||
rc, err := readers.NewResetRetryHandler(
|
rc, err := readers.NewResetRetryHandler(
|
||||||
ctx,
|
ctx,
|
||||||
&downloadWithRetries{
|
&downloadWithRetries{
|
||||||
@ -193,3 +213,27 @@ func setName(orig models.ItemReferenceable, driveName string) models.ItemReferen
|
|||||||
|
|
||||||
return orig
|
return orig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isURLExpired inspects the jwt token embed in the item download url
|
||||||
|
// and returns true if it is expired.
|
||||||
|
func isURLExpired(
|
||||||
|
ctx context.Context,
|
||||||
|
url string,
|
||||||
|
) (bool, error) {
|
||||||
|
// Extract the raw JWT string from the download url.
|
||||||
|
rawJWT, err := common.GetQueryParamFromURL(url, JWTQueryParam)
|
||||||
|
if err != nil {
|
||||||
|
logger.CtxErr(ctx, err).Info("query param not found")
|
||||||
|
|
||||||
|
return false, clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
expired, err := jwt.IsJWTExpired(rawJWT)
|
||||||
|
if err != nil {
|
||||||
|
logger.CtxErr(ctx, err).Info("checking jwt expiry")
|
||||||
|
|
||||||
|
return false, clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return expired, nil
|
||||||
|
}
|
||||||
|
|||||||
@ -1,142 +0,0 @@
|
|||||||
package drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeltaUpdate holds the results of a current delta token. It normally
|
|
||||||
// gets produced when aggregating the addition and removal of items in
|
|
||||||
// a delta-queryable folder.
|
|
||||||
// FIXME: This is same as exchange.api.DeltaUpdate
|
|
||||||
type DeltaUpdate struct {
|
|
||||||
// the deltaLink itself
|
|
||||||
URL string
|
|
||||||
// true if the old delta was marked as invalid
|
|
||||||
Reset bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// itemCollector functions collect the items found in a drive
|
|
||||||
type itemCollector func(
|
|
||||||
ctx context.Context,
|
|
||||||
driveID, driveName string,
|
|
||||||
driveItems []models.DriveItemable,
|
|
||||||
oldPaths map[string]string,
|
|
||||||
newPaths map[string]string,
|
|
||||||
excluded map[string]struct{},
|
|
||||||
itemCollections map[string]map[string]string,
|
|
||||||
validPrevDelta bool,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) error
|
|
||||||
|
|
||||||
// collectItems will enumerate all items in the specified drive and hand them to the
|
|
||||||
// provided `collector` method
|
|
||||||
func collectItems(
|
|
||||||
ctx context.Context,
|
|
||||||
pager api.DeltaPager[models.DriveItemable],
|
|
||||||
driveID, driveName string,
|
|
||||||
collector itemCollector,
|
|
||||||
oldPaths map[string]string,
|
|
||||||
prevDelta string,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) (
|
|
||||||
DeltaUpdate,
|
|
||||||
map[string]string, // newPaths
|
|
||||||
map[string]struct{}, // excluded
|
|
||||||
error,
|
|
||||||
) {
|
|
||||||
var (
|
|
||||||
newDeltaURL = ""
|
|
||||||
newPaths = map[string]string{}
|
|
||||||
excluded = map[string]struct{}{}
|
|
||||||
invalidPrevDelta = len(prevDelta) == 0
|
|
||||||
|
|
||||||
// itemCollection is used to identify which collection a
|
|
||||||
// file belongs to. This is useful to delete a file from the
|
|
||||||
// collection it was previously in, in case it was moved to a
|
|
||||||
// different collection within the same delta query
|
|
||||||
// drive ID -> item ID -> item ID
|
|
||||||
itemCollection = map[string]map[string]string{
|
|
||||||
driveID: {},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if !invalidPrevDelta {
|
|
||||||
maps.Copy(newPaths, oldPaths)
|
|
||||||
pager.SetNextLink(prevDelta)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// assume delta urls here, which allows single-token consumption
|
|
||||||
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC))
|
|
||||||
|
|
||||||
if graph.IsErrInvalidDelta(err) {
|
|
||||||
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
|
||||||
|
|
||||||
invalidPrevDelta = true
|
|
||||||
newPaths = map[string]string{}
|
|
||||||
|
|
||||||
pager.Reset(ctx)
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page")
|
|
||||||
}
|
|
||||||
|
|
||||||
vals := page.GetValue()
|
|
||||||
|
|
||||||
err = collector(
|
|
||||||
ctx,
|
|
||||||
driveID,
|
|
||||||
driveName,
|
|
||||||
vals,
|
|
||||||
oldPaths,
|
|
||||||
newPaths,
|
|
||||||
excluded,
|
|
||||||
itemCollection,
|
|
||||||
invalidPrevDelta,
|
|
||||||
errs)
|
|
||||||
if err != nil {
|
|
||||||
return DeltaUpdate{}, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
nextLink, deltaLink := api.NextAndDeltaLink(page)
|
|
||||||
|
|
||||||
if len(deltaLink) > 0 {
|
|
||||||
newDeltaURL = deltaLink
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if there are more items
|
|
||||||
if len(nextLink) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink)
|
|
||||||
pager.SetNextLink(nextLink)
|
|
||||||
}
|
|
||||||
|
|
||||||
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
|
|
||||||
func newItem(name string, folder bool) *models.DriveItem {
|
|
||||||
itemToCreate := models.NewDriveItem()
|
|
||||||
itemToCreate.SetName(&name)
|
|
||||||
|
|
||||||
if folder {
|
|
||||||
itemToCreate.SetFolder(models.NewFolder())
|
|
||||||
} else {
|
|
||||||
itemToCreate.SetFile(models.NewFile())
|
|
||||||
}
|
|
||||||
|
|
||||||
return itemToCreate
|
|
||||||
}
|
|
||||||
@ -88,13 +88,6 @@ func (h itemBackupHandler) NewDrivePager(
|
|||||||
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h itemBackupHandler) NewItemPager(
|
|
||||||
driveID, link string,
|
|
||||||
fields []string,
|
|
||||||
) api.DeltaPager[models.DriveItemable] {
|
|
||||||
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h itemBackupHandler) AugmentItemInfo(
|
func (h itemBackupHandler) AugmentItemInfo(
|
||||||
dii details.ItemInfo,
|
dii details.ItemInfo,
|
||||||
resource idname.Provider,
|
resource idname.Provider,
|
||||||
@ -141,6 +134,13 @@ func (h itemBackupHandler) IncludesDir(dir string) bool {
|
|||||||
return h.scope.Matches(selectors.OneDriveFolder, dir)
|
return h.scope.Matches(selectors.OneDriveFolder, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h itemBackupHandler) EnumerateDriveItemsDelta(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID, prevDeltaLink string,
|
||||||
|
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||||
|
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Restore
|
// Restore
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -16,12 +16,11 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -49,6 +48,8 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
|||||||
suite.service = loadTestService(t)
|
suite.service = loadTestService(t)
|
||||||
suite.user = tconfig.SecondaryM365UserID(t)
|
suite.user = tconfig.SecondaryM365UserID(t)
|
||||||
|
|
||||||
|
graph.InitializeConcurrencyLimiter(ctx, true, 4)
|
||||||
|
|
||||||
pager := suite.service.ac.Drives().NewUserDrivePager(suite.user, nil)
|
pager := suite.service.ac.Drives().NewUserDrivePager(suite.user, nil)
|
||||||
|
|
||||||
odDrives, err := api.GetAllDrives(ctx, pager)
|
odDrives, err := api.GetAllDrives(ctx, pager)
|
||||||
@ -60,83 +61,6 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
|||||||
suite.userDriveID = ptr.Val(odDrives[0].GetId())
|
suite.userDriveID = ptr.Val(odDrives[0].GetId())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestItemReader is an integration test that makes a few assumptions
|
|
||||||
// about the test environment
|
|
||||||
// 1) It assumes the test user has a drive
|
|
||||||
// 2) It assumes the drive has a file it can use to test `driveItemReader`
|
|
||||||
// The test checks these in below
|
|
||||||
func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
var driveItem models.DriveItemable
|
|
||||||
// This item collector tries to find "a" drive item that is a non-empty
|
|
||||||
// file to test the reader function
|
|
||||||
itemCollector := func(
|
|
||||||
_ context.Context,
|
|
||||||
_, _ string,
|
|
||||||
items []models.DriveItemable,
|
|
||||||
_ map[string]string,
|
|
||||||
_ map[string]string,
|
|
||||||
_ map[string]struct{},
|
|
||||||
_ map[string]map[string]string,
|
|
||||||
_ bool,
|
|
||||||
_ *fault.Bus,
|
|
||||||
) error {
|
|
||||||
if driveItem != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range items {
|
|
||||||
if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 {
|
|
||||||
driveItem = item
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := suite.service.ac.
|
|
||||||
Drives().
|
|
||||||
NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault())
|
|
||||||
|
|
||||||
_, _, _, err := collectItems(
|
|
||||||
ctx,
|
|
||||||
ip,
|
|
||||||
suite.userDriveID,
|
|
||||||
"General",
|
|
||||||
itemCollector,
|
|
||||||
map[string]string{},
|
|
||||||
"",
|
|
||||||
fault.New(true))
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
// Test Requirement 2: Need a file
|
|
||||||
require.NotEmpty(
|
|
||||||
t,
|
|
||||||
driveItem,
|
|
||||||
"no file item found for user %s drive %s",
|
|
||||||
suite.user,
|
|
||||||
suite.userDriveID)
|
|
||||||
|
|
||||||
bh := itemBackupHandler{
|
|
||||||
suite.service.ac.Drives(),
|
|
||||||
suite.user,
|
|
||||||
(&selectors.OneDriveBackup{}).Folders(selectors.Any())[0],
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read data for the file
|
|
||||||
itemData, err := downloadItem(ctx, bh, driveItem)
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
size, err := io.Copy(io.Discard, itemData)
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
require.NotZero(t, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestItemWriter is an integration test for uploading data to OneDrive
|
// TestItemWriter is an integration test for uploading data to OneDrive
|
||||||
// It creates a new folder with a new item and writes data to it
|
// It creates a new folder with a new item and writes data to it
|
||||||
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||||
@ -171,7 +95,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.driveID,
|
test.driveID,
|
||||||
ptr.Val(root.GetId()),
|
ptr.Val(root.GetId()),
|
||||||
newItem(newFolderName, true),
|
api.NewDriveItem(newFolderName, true),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, newFolder.GetId())
|
require.NotNil(t, newFolder.GetId())
|
||||||
@ -183,7 +107,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.driveID,
|
test.driveID,
|
||||||
ptr.Val(newFolder.GetId()),
|
ptr.Val(newFolder.GetId()),
|
||||||
newItem(newItemName, false),
|
api.NewDriveItem(newItemName, false),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, newItem.GetId())
|
require.NotNil(t, newItem.GetId())
|
||||||
@ -317,7 +241,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "success",
|
name: "success",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := newItem("test", false)
|
di := api.NewDriveItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -336,7 +260,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "success, content url set instead of download url",
|
name: "success, content url set instead of download url",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := newItem("test", false)
|
di := api.NewDriveItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@content.downloadUrl": url,
|
"@content.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -355,7 +279,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "api getter returns error",
|
name: "api getter returns error",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := newItem("test", false)
|
di := api.NewDriveItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -371,7 +295,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "download url is empty",
|
name: "download url is empty",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := newItem("test", false)
|
di := api.NewDriveItem("test", false)
|
||||||
return di
|
return di
|
||||||
},
|
},
|
||||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||||
@ -386,7 +310,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "malware",
|
name: "malware",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := newItem("test", false)
|
di := api.NewDriveItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -408,7 +332,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "non-2xx http response",
|
name: "non-2xx http response",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := newItem("test", false)
|
di := api.NewDriveItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -457,7 +381,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead
|
|||||||
url = "https://example.com"
|
url = "https://example.com"
|
||||||
|
|
||||||
itemFunc = func() models.DriveItemable {
|
itemFunc = func() models.DriveItemable {
|
||||||
di := newItem("test", false)
|
di := api.NewDriveItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
|
|||||||
@ -91,13 +91,6 @@ func (h libraryBackupHandler) NewDrivePager(
|
|||||||
return h.ac.NewSiteDrivePager(resourceOwner, fields)
|
return h.ac.NewSiteDrivePager(resourceOwner, fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h libraryBackupHandler) NewItemPager(
|
|
||||||
driveID, link string,
|
|
||||||
fields []string,
|
|
||||||
) api.DeltaPager[models.DriveItemable] {
|
|
||||||
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h libraryBackupHandler) AugmentItemInfo(
|
func (h libraryBackupHandler) AugmentItemInfo(
|
||||||
dii details.ItemInfo,
|
dii details.ItemInfo,
|
||||||
resource idname.Provider,
|
resource idname.Provider,
|
||||||
@ -144,6 +137,13 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
|
|||||||
return h.scope.Matches(selectors.SharePointLibraryFolder, dir)
|
return h.scope.Matches(selectors.SharePointLibraryFolder, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h libraryBackupHandler) EnumerateDriveItemsDelta(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID, prevDeltaLink string,
|
||||||
|
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||||
|
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Restore
|
// Restore
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -671,7 +671,7 @@ func createFolder(
|
|||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
parentFolderID,
|
parentFolderID,
|
||||||
newItem(folderName, true),
|
api.NewDriveItem(folderName, true),
|
||||||
control.Replace)
|
control.Replace)
|
||||||
|
|
||||||
// ErrItemAlreadyExistsConflict can only occur for folders if the
|
// ErrItemAlreadyExistsConflict can only occur for folders if the
|
||||||
@ -692,7 +692,7 @@ func createFolder(
|
|||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
parentFolderID,
|
parentFolderID,
|
||||||
newItem(folderName, true),
|
api.NewDriveItem(folderName, true),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "creating folder")
|
return nil, clues.Wrap(err, "creating folder")
|
||||||
@ -733,7 +733,7 @@ func restoreFile(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
item = newItem(name, false)
|
item = api.NewDriveItem(name, false)
|
||||||
collisionKey = api.DriveItemCollisionKey(item)
|
collisionKey = api.DriveItemCollisionKey(item)
|
||||||
collision api.DriveItemIDType
|
collision api.DriveItemIDType
|
||||||
shouldDeleteOriginal bool
|
shouldDeleteOriginal bool
|
||||||
|
|||||||
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/str"
|
"github.com/alcionai/corso/src/internal/common/str"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -47,7 +46,7 @@ type urlCache struct {
|
|||||||
refreshMu sync.Mutex
|
refreshMu sync.Mutex
|
||||||
deltaQueryCount int
|
deltaQueryCount int
|
||||||
|
|
||||||
itemPager api.DeltaPager[models.DriveItemable]
|
edid EnumerateDriveItemsDeltaer
|
||||||
|
|
||||||
errs *fault.Bus
|
errs *fault.Bus
|
||||||
}
|
}
|
||||||
@ -56,13 +55,10 @@ type urlCache struct {
|
|||||||
func newURLCache(
|
func newURLCache(
|
||||||
driveID, prevDelta string,
|
driveID, prevDelta string,
|
||||||
refreshInterval time.Duration,
|
refreshInterval time.Duration,
|
||||||
itemPager api.DeltaPager[models.DriveItemable],
|
edid EnumerateDriveItemsDeltaer,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (*urlCache, error) {
|
) (*urlCache, error) {
|
||||||
err := validateCacheParams(
|
err := validateCacheParams(driveID, refreshInterval, edid)
|
||||||
driveID,
|
|
||||||
refreshInterval,
|
|
||||||
itemPager)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "cache params")
|
return nil, clues.Wrap(err, "cache params")
|
||||||
}
|
}
|
||||||
@ -71,9 +67,9 @@ func newURLCache(
|
|||||||
idToProps: make(map[string]itemProps),
|
idToProps: make(map[string]itemProps),
|
||||||
lastRefreshTime: time.Time{},
|
lastRefreshTime: time.Time{},
|
||||||
driveID: driveID,
|
driveID: driveID,
|
||||||
|
edid: edid,
|
||||||
prevDelta: prevDelta,
|
prevDelta: prevDelta,
|
||||||
refreshInterval: refreshInterval,
|
refreshInterval: refreshInterval,
|
||||||
itemPager: itemPager,
|
|
||||||
errs: errs,
|
errs: errs,
|
||||||
},
|
},
|
||||||
nil
|
nil
|
||||||
@ -83,7 +79,7 @@ func newURLCache(
|
|||||||
func validateCacheParams(
|
func validateCacheParams(
|
||||||
driveID string,
|
driveID string,
|
||||||
refreshInterval time.Duration,
|
refreshInterval time.Duration,
|
||||||
itemPager api.DeltaPager[models.DriveItemable],
|
edid EnumerateDriveItemsDeltaer,
|
||||||
) error {
|
) error {
|
||||||
if len(driveID) == 0 {
|
if len(driveID) == 0 {
|
||||||
return clues.New("drive id is empty")
|
return clues.New("drive id is empty")
|
||||||
@ -93,8 +89,8 @@ func validateCacheParams(
|
|||||||
return clues.New("invalid refresh interval")
|
return clues.New("invalid refresh interval")
|
||||||
}
|
}
|
||||||
|
|
||||||
if itemPager == nil {
|
if edid == nil {
|
||||||
return clues.New("nil item pager")
|
return clues.New("nil item enumerator")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -160,44 +156,23 @@ func (uc *urlCache) refreshCache(
|
|||||||
// Issue a delta query to graph
|
// Issue a delta query to graph
|
||||||
logger.Ctx(ctx).Info("refreshing url cache")
|
logger.Ctx(ctx).Info("refreshing url cache")
|
||||||
|
|
||||||
err := uc.deltaQuery(ctx)
|
items, du, err := uc.edid.EnumerateDriveItemsDelta(ctx, uc.driveID, uc.prevDelta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// clear cache
|
|
||||||
uc.idToProps = make(map[string]itemProps)
|
uc.idToProps = make(map[string]itemProps)
|
||||||
|
return clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
uc.deltaQueryCount++
|
||||||
|
|
||||||
|
if err := uc.updateCache(ctx, items, uc.errs); err != nil {
|
||||||
|
return clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Info("url cache refreshed")
|
logger.Ctx(ctx).Info("url cache refreshed")
|
||||||
|
|
||||||
// Update last refresh time
|
// Update last refresh time
|
||||||
uc.lastRefreshTime = time.Now()
|
uc.lastRefreshTime = time.Now()
|
||||||
|
uc.prevDelta = du.URL
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// deltaQuery performs a delta query on the drive and update the cache
|
|
||||||
func (uc *urlCache) deltaQuery(
|
|
||||||
ctx context.Context,
|
|
||||||
) error {
|
|
||||||
logger.Ctx(ctx).Debug("starting delta query")
|
|
||||||
// Reset item pager to remove any previous state
|
|
||||||
uc.itemPager.Reset(ctx)
|
|
||||||
|
|
||||||
_, _, _, err := collectItems(
|
|
||||||
ctx,
|
|
||||||
uc.itemPager,
|
|
||||||
uc.driveID,
|
|
||||||
"",
|
|
||||||
uc.updateCache,
|
|
||||||
map[string]string{},
|
|
||||||
uc.prevDelta,
|
|
||||||
uc.errs)
|
|
||||||
if err != nil {
|
|
||||||
return clues.Wrap(err, "delta query")
|
|
||||||
}
|
|
||||||
|
|
||||||
uc.deltaQueryCount++
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -224,13 +199,7 @@ func (uc *urlCache) readCache(
|
|||||||
// It assumes that cacheMu is held by caller in write mode
|
// It assumes that cacheMu is held by caller in write mode
|
||||||
func (uc *urlCache) updateCache(
|
func (uc *urlCache) updateCache(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
_, _ string,
|
|
||||||
items []models.DriveItemable,
|
items []models.DriveItemable,
|
||||||
_ map[string]string,
|
|
||||||
_ map[string]string,
|
|
||||||
_ map[string]struct{},
|
|
||||||
_ map[string]map[string]string,
|
|
||||||
_ bool,
|
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
el := errs.Local()
|
el := errs.Local()
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -18,15 +17,19 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// integration
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
type URLCacheIntegrationSuite struct {
|
type URLCacheIntegrationSuite struct {
|
||||||
tester.Suite
|
tester.Suite
|
||||||
ac api.Client
|
ac api.Client
|
||||||
@ -68,11 +71,10 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
|||||||
// url cache
|
// url cache
|
||||||
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
ac = suite.ac.Drives()
|
ac = suite.ac.Drives()
|
||||||
driveID = suite.driveID
|
driveID = suite.driveID
|
||||||
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
||||||
driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault())
|
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
@ -82,11 +84,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
root, err := ac.GetRootFolder(ctx, driveID)
|
root, err := ac.GetRootFolder(ctx, driveID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
newFolder, err := ac.Drives().PostItemInContainer(
|
newFolder, err := ac.PostItemInContainer(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
ptr.Val(root.GetId()),
|
ptr.Val(root.GetId()),
|
||||||
newItem(newFolderName, true),
|
api.NewDriveItem(newFolderName, true),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -94,33 +96,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
|
|
||||||
nfid := ptr.Val(newFolder.GetId())
|
nfid := ptr.Val(newFolder.GetId())
|
||||||
|
|
||||||
collectorFunc := func(
|
|
||||||
context.Context,
|
|
||||||
string,
|
|
||||||
string,
|
|
||||||
[]models.DriveItemable,
|
|
||||||
map[string]string,
|
|
||||||
map[string]string,
|
|
||||||
map[string]struct{},
|
|
||||||
map[string]map[string]string,
|
|
||||||
bool,
|
|
||||||
*fault.Bus,
|
|
||||||
) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the previous delta to feed into url cache
|
// Get the previous delta to feed into url cache
|
||||||
prevDelta, _, _, err := collectItems(
|
_, du, err := ac.EnumerateDriveItemsDelta(ctx, suite.driveID, "")
|
||||||
ctx,
|
|
||||||
suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()),
|
|
||||||
suite.driveID,
|
|
||||||
"drive-name",
|
|
||||||
collectorFunc,
|
|
||||||
map[string]string{},
|
|
||||||
"",
|
|
||||||
fault.New(true))
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, prevDelta.URL)
|
require.NotEmpty(t, du.URL)
|
||||||
|
|
||||||
// Create a bunch of files in the new folder
|
// Create a bunch of files in the new folder
|
||||||
var items []models.DriveItemable
|
var items []models.DriveItemable
|
||||||
@ -128,11 +107,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||||
|
|
||||||
item, err := ac.Drives().PostItemInContainer(
|
item, err := ac.PostItemInContainer(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
nfid,
|
nfid,
|
||||||
newItem(newItemName, false),
|
api.NewDriveItem(newItemName, false),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -142,9 +121,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
// Create a new URL cache with a long TTL
|
// Create a new URL cache with a long TTL
|
||||||
uc, err := newURLCache(
|
uc, err := newURLCache(
|
||||||
suite.driveID,
|
suite.driveID,
|
||||||
prevDelta.URL,
|
du.URL,
|
||||||
1*time.Hour,
|
1*time.Hour,
|
||||||
driveItemPager,
|
suite.ac.Drives(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -195,6 +174,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
require.Equal(t, 1, uc.deltaQueryCount)
|
require.Equal(t, 1, uc.deltaQueryCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// unit
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
type URLCacheUnitSuite struct {
|
type URLCacheUnitSuite struct {
|
||||||
tester.Suite
|
tester.Suite
|
||||||
}
|
}
|
||||||
@ -205,27 +188,20 @@ func TestURLCacheUnitSuite(t *testing.T) {
|
|||||||
|
|
||||||
func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||||
deltaString := "delta"
|
deltaString := "delta"
|
||||||
next := "next"
|
|
||||||
driveID := "drive1"
|
driveID := "drive1"
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
pagerResult map[string][]apiMock.PagerResult[models.DriveItemable]
|
pagerItems map[string][]models.DriveItemable
|
||||||
|
pagerErr map[string]error
|
||||||
expectedItemProps map[string]itemProps
|
expectedItemProps map[string]itemProps
|
||||||
expectedErr require.ErrorAssertionFunc
|
expectedErr require.ErrorAssertionFunc
|
||||||
cacheAssert func(*urlCache, time.Time)
|
cacheAssert func(*urlCache, time.Time)
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "single item in cache",
|
name: "single item in cache",
|
||||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
pagerItems: map[string][]models.DriveItemable{
|
||||||
driveID: {
|
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
||||||
{
|
|
||||||
Values: []models.DriveItemable{
|
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
|
||||||
},
|
|
||||||
DeltaLink: &deltaString,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
"1": {
|
"1": {
|
||||||
@ -242,18 +218,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "multiple items in cache",
|
name: "multiple items in cache",
|
||||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
pagerItems: map[string][]models.DriveItemable{
|
||||||
driveID: {
|
driveID: {
|
||||||
{
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
Values: []models.DriveItemable{
|
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
||||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
||||||
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
|
||||||
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
|
||||||
},
|
|
||||||
DeltaLink: &deltaString,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -287,18 +258,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "duplicate items with potentially new urls",
|
name: "duplicate items with potentially new urls",
|
||||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
pagerItems: map[string][]models.DriveItemable{
|
||||||
driveID: {
|
driveID: {
|
||||||
{
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
Values: []models.DriveItemable{
|
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
||||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
||||||
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
|
||||||
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
|
||||||
},
|
|
||||||
DeltaLink: &deltaString,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -324,16 +290,11 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted items",
|
name: "deleted items",
|
||||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
pagerItems: map[string][]models.DriveItemable{
|
||||||
driveID: {
|
driveID: {
|
||||||
{
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
Values: []models.DriveItemable{
|
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
||||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
|
||||||
},
|
|
||||||
DeltaLink: &deltaString,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -355,15 +316,8 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "item not found in cache",
|
name: "item not found in cache",
|
||||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
pagerItems: map[string][]models.DriveItemable{
|
||||||
driveID: {
|
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
||||||
{
|
|
||||||
Values: []models.DriveItemable{
|
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
|
||||||
},
|
|
||||||
DeltaLink: &deltaString,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
"2": {},
|
"2": {},
|
||||||
@ -376,23 +330,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "multi-page delta query error",
|
name: "delta query error",
|
||||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
pagerItems: map[string][]models.DriveItemable{},
|
||||||
driveID: {
|
pagerErr: map[string]error{
|
||||||
{
|
driveID: errors.New("delta query error"),
|
||||||
Values: []models.DriveItemable{
|
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
|
||||||
},
|
|
||||||
NextLink: &next,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Values: []models.DriveItemable{
|
|
||||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
|
||||||
},
|
|
||||||
DeltaLink: &deltaString,
|
|
||||||
Err: errors.New("delta query error"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
"1": {},
|
"1": {},
|
||||||
@ -408,15 +349,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
name: "folder item",
|
name: "folder item",
|
||||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
pagerItems: map[string][]models.DriveItemable{
|
||||||
driveID: {
|
driveID: {
|
||||||
{
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
Values: []models.DriveItemable{
|
driveItem("2", "folder2", "root", "root", false, true, false),
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
|
||||||
driveItem("2", "folder2", "root", "root", false, true, false),
|
|
||||||
},
|
|
||||||
DeltaLink: &deltaString,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -437,15 +373,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
medi := mock.EnumeratesDriveItemsDelta{
|
||||||
ToReturn: test.pagerResult[driveID],
|
Items: test.pagerItems,
|
||||||
|
Err: test.pagerErr,
|
||||||
|
DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}},
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, err := newURLCache(
|
cache, err := newURLCache(
|
||||||
driveID,
|
driveID,
|
||||||
"",
|
"",
|
||||||
1*time.Hour,
|
1*time.Hour,
|
||||||
itemPager,
|
&medi,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
@ -480,15 +418,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
|
|
||||||
// Test needsRefresh
|
// Test needsRefresh
|
||||||
func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
||||||
driveID := "drive1"
|
var (
|
||||||
t := suite.T()
|
t = suite.T()
|
||||||
refreshInterval := 1 * time.Second
|
driveID = "drive1"
|
||||||
|
refreshInterval = 1 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
cache, err := newURLCache(
|
cache, err := newURLCache(
|
||||||
driveID,
|
driveID,
|
||||||
"",
|
"",
|
||||||
refreshInterval,
|
refreshInterval,
|
||||||
&apiMock.DeltaPager[models.DriveItemable]{},
|
&mock.EnumeratesDriveItemsDelta{},
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
@ -510,14 +450,12 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
|||||||
require.False(t, cache.needsRefresh())
|
require.False(t, cache.needsRefresh())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test newURLCache
|
|
||||||
func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||||
// table driven tests
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
driveID string
|
driveID string
|
||||||
refreshInt time.Duration
|
refreshInt time.Duration
|
||||||
itemPager api.DeltaPager[models.DriveItemable]
|
itemPager EnumerateDriveItemsDeltaer
|
||||||
errors *fault.Bus
|
errors *fault.Bus
|
||||||
expectedErr require.ErrorAssertionFunc
|
expectedErr require.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
@ -525,7 +463,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
|||||||
name: "invalid driveID",
|
name: "invalid driveID",
|
||||||
driveID: "",
|
driveID: "",
|
||||||
refreshInt: 1 * time.Hour,
|
refreshInt: 1 * time.Hour,
|
||||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||||
errors: fault.New(true),
|
errors: fault.New(true),
|
||||||
expectedErr: require.Error,
|
expectedErr: require.Error,
|
||||||
},
|
},
|
||||||
@ -533,12 +471,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
|||||||
name: "invalid refresh interval",
|
name: "invalid refresh interval",
|
||||||
driveID: "drive1",
|
driveID: "drive1",
|
||||||
refreshInt: 100 * time.Millisecond,
|
refreshInt: 100 * time.Millisecond,
|
||||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||||
errors: fault.New(true),
|
errors: fault.New(true),
|
||||||
expectedErr: require.Error,
|
expectedErr: require.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid itemPager",
|
name: "invalid item enumerator",
|
||||||
driveID: "drive1",
|
driveID: "drive1",
|
||||||
refreshInt: 1 * time.Hour,
|
refreshInt: 1 * time.Hour,
|
||||||
itemPager: nil,
|
itemPager: nil,
|
||||||
@ -549,7 +487,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
|||||||
name: "valid",
|
name: "valid",
|
||||||
driveID: "drive1",
|
driveID: "drive1",
|
||||||
refreshInt: 1 * time.Hour,
|
refreshInt: 1 * time.Hour,
|
||||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||||
errors: fault.New(true),
|
errors: fault.New(true),
|
||||||
expectedErr: require.NoError,
|
expectedErr: require.NoError,
|
||||||
},
|
},
|
||||||
|
|||||||
@ -160,7 +160,7 @@ func populateCollections(
|
|||||||
|
|
||||||
ictx = clues.Add(ictx, "previous_path", prevPath)
|
ictx = clues.Add(ictx, "previous_path", prevPath)
|
||||||
|
|
||||||
added, _, removed, newDelta, err := bh.itemEnumerator().
|
added, validModTimes, removed, newDelta, err := bh.itemEnumerator().
|
||||||
GetAddedAndRemovedItemIDs(
|
GetAddedAndRemovedItemIDs(
|
||||||
ictx,
|
ictx,
|
||||||
qp.ProtectedResource.ID(),
|
qp.ProtectedResource.ID(),
|
||||||
@ -199,9 +199,7 @@ func populateCollections(
|
|||||||
bh.itemHandler(),
|
bh.itemHandler(),
|
||||||
added,
|
added,
|
||||||
removed,
|
removed,
|
||||||
// TODO(ashmrtn): Set to value returned by pager when we have deletion
|
validModTimes,
|
||||||
// markers in files.
|
|
||||||
false,
|
|
||||||
statusUpdater)
|
statusUpdater)
|
||||||
|
|
||||||
collections[cID] = edc
|
collections[cID] = edc
|
||||||
|
|||||||
@ -278,7 +278,7 @@ func (col *prefetchCollection) streamItems(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
item, err := data.NewPrefetchedItem(
|
item, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(itemData)),
|
io.NopCloser(bytes.NewReader(itemData)),
|
||||||
id,
|
id,
|
||||||
details.ItemInfo{Exchange: info})
|
details.ItemInfo{Exchange: info})
|
||||||
@ -403,7 +403,7 @@ func (col *lazyFetchCollection) streamItems(
|
|||||||
"service", path.ExchangeService.String(),
|
"service", path.ExchangeService.String(),
|
||||||
"category", col.Category().String())
|
"category", col.Category().String())
|
||||||
|
|
||||||
stream <- data.NewLazyItem(
|
stream <- data.NewLazyItemWithInfo(
|
||||||
ictx,
|
ictx,
|
||||||
&lazyItemGetter{
|
&lazyItemGetter{
|
||||||
userID: user,
|
userID: user,
|
||||||
|
|||||||
@ -56,7 +56,7 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
ed, err := data.NewPrefetchedItem(
|
ed, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(test.readData)),
|
io.NopCloser(bytes.NewReader(test.readData)),
|
||||||
"itemID",
|
"itemID",
|
||||||
details.ItemInfo{})
|
details.ItemInfo{})
|
||||||
@ -494,7 +494,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
li := data.NewLazyItem(
|
li := data.NewLazyItemWithInfo(
|
||||||
ctx,
|
ctx,
|
||||||
nil,
|
nil,
|
||||||
"itemID",
|
"itemID",
|
||||||
@ -552,7 +552,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() {
|
|||||||
SerializeErr: test.serializeErr,
|
SerializeErr: test.serializeErr,
|
||||||
}
|
}
|
||||||
|
|
||||||
li := data.NewLazyItem(
|
li := data.NewLazyItemWithInfo(
|
||||||
ctx,
|
ctx,
|
||||||
&lazyItemGetter{
|
&lazyItemGetter{
|
||||||
userID: "userID",
|
userID: "userID",
|
||||||
@ -592,7 +592,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlig
|
|||||||
|
|
||||||
getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight}
|
getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight}
|
||||||
|
|
||||||
li := data.NewLazyItem(
|
li := data.NewLazyItemWithInfo(
|
||||||
ctx,
|
ctx,
|
||||||
&lazyItemGetter{
|
&lazyItemGetter{
|
||||||
userID: "userID",
|
userID: "userID",
|
||||||
@ -645,7 +645,7 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
|
|||||||
|
|
||||||
getter := &mock.ItemGetSerialize{GetData: testData}
|
getter := &mock.ItemGetSerialize{GetData: testData}
|
||||||
|
|
||||||
li := data.NewLazyItem(
|
li := data.NewLazyItemWithInfo(
|
||||||
ctx,
|
ctx,
|
||||||
&lazyItemGetter{
|
&lazyItemGetter{
|
||||||
userID: "userID",
|
userID: "userID",
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package groups
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -527,8 +526,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
|||||||
|
|
||||||
require.NotEmpty(t, c.FullPath().Folder(false))
|
require.NotEmpty(t, c.FullPath().Folder(false))
|
||||||
|
|
||||||
fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false))
|
|
||||||
|
|
||||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||||
// interface.
|
// interface.
|
||||||
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
||||||
@ -537,8 +534,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
|||||||
|
|
||||||
loc := c.(data.LocationPather).LocationPath().String()
|
loc := c.(data.LocationPather).LocationPath().String()
|
||||||
|
|
||||||
fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String())
|
|
||||||
|
|
||||||
require.NotEmpty(t, loc)
|
require.NotEmpty(t, loc)
|
||||||
|
|
||||||
delete(test.channelNames, loc)
|
delete(test.channelNames, loc)
|
||||||
|
|||||||
@ -176,7 +176,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
|
|
||||||
info.ParentPath = col.LocationPath().String()
|
info.ParentPath = col.LocationPath().String()
|
||||||
|
|
||||||
storeItem, err := data.NewPrefetchedItem(
|
storeItem, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(itemData)),
|
io.NopCloser(bytes.NewReader(itemData)),
|
||||||
id,
|
id,
|
||||||
details.ItemInfo{Groups: info})
|
details.ItemInfo{Groups: info})
|
||||||
|
|||||||
@ -49,7 +49,7 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
ed, err := data.NewPrefetchedItem(
|
ed, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(test.readData)),
|
io.NopCloser(bytes.NewReader(test.readData)),
|
||||||
"itemID",
|
"itemID",
|
||||||
details.ItemInfo{})
|
details.ItemInfo{})
|
||||||
|
|||||||
@ -212,7 +212,7 @@ func (sc *Collection) retrieveLists(
|
|||||||
|
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
|
|
||||||
item, err := data.NewPrefetchedItem(
|
item, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
ptr.Val(lst.GetId()),
|
ptr.Val(lst.GetId()),
|
||||||
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
|
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
|
||||||
@ -279,7 +279,7 @@ func (sc *Collection) retrievePages(
|
|||||||
metrics.Bytes += size
|
metrics.Bytes += size
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
|
|
||||||
item, err := data.NewPrefetchedItem(
|
item, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
ptr.Val(pg.GetId()),
|
ptr.Val(pg.GetId()),
|
||||||
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
|
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
|
||||||
|
|||||||
@ -103,7 +103,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
|||||||
byteArray, err := ow.GetSerializedContent()
|
byteArray, err := ow.GetSerializedContent()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
data, err := data.NewPrefetchedItem(
|
data, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
name,
|
name,
|
||||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||||
@ -133,7 +133,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
|||||||
page, err := betaAPI.CreatePageFromBytes(byteArray)
|
page, err := betaAPI.CreatePageFromBytes(byteArray)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
data, err := data.NewPrefetchedItem(
|
data, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
itemName,
|
itemName,
|
||||||
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
|
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
|
||||||
@ -196,7 +196,7 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
|||||||
byteArray, err := service.Serialize(listing)
|
byteArray, err := service.Serialize(listing)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
listData, err := data.NewPrefetchedItem(
|
listData, err := data.NewPrefetchedItemWithInfo(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
testName,
|
testName,
|
||||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||||
|
|||||||
@ -124,6 +124,8 @@ var (
|
|||||||
ErrTimeout = clues.New("communication timeout")
|
ErrTimeout = clues.New("communication timeout")
|
||||||
|
|
||||||
ErrResourceOwnerNotFound = clues.New("resource owner not found in tenant")
|
ErrResourceOwnerNotFound = clues.New("resource owner not found in tenant")
|
||||||
|
|
||||||
|
ErrTokenExpired = clues.New("jwt token expired")
|
||||||
)
|
)
|
||||||
|
|
||||||
func IsErrApplicationThrottled(err error) bool {
|
func IsErrApplicationThrottled(err error) bool {
|
||||||
@ -224,7 +226,8 @@ func IsErrUnauthorized(err error) bool {
|
|||||||
// TODO: refine this investigation. We don't currently know if
|
// TODO: refine this investigation. We don't currently know if
|
||||||
// a specific item download url expired, or if the full connection
|
// a specific item download url expired, or if the full connection
|
||||||
// auth expired.
|
// auth expired.
|
||||||
return clues.HasLabel(err, LabelStatus(http.StatusUnauthorized))
|
return clues.HasLabel(err, LabelStatus(http.StatusUnauthorized)) ||
|
||||||
|
errors.Is(err, ErrTokenExpired)
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsErrItemAlreadyExistsConflict(err error) bool {
|
func IsErrItemAlreadyExistsConflict(err error) bool {
|
||||||
|
|||||||
@ -478,11 +478,16 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUnauthorized() {
|
|||||||
expect: assert.False,
|
expect: assert.False,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "as",
|
name: "graph 401",
|
||||||
err: clues.Stack(assert.AnError).
|
err: clues.Stack(assert.AnError).
|
||||||
Label(LabelStatus(http.StatusUnauthorized)),
|
Label(LabelStatus(http.StatusUnauthorized)),
|
||||||
expect: assert.True,
|
expect: assert.True,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "token expired",
|
||||||
|
err: clues.Stack(assert.AnError, ErrTokenExpired),
|
||||||
|
expect: assert.True,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
|
|||||||
@ -57,7 +57,7 @@ func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) {
|
|||||||
return metadataItem{}, clues.Wrap(err, "serializing metadata")
|
return metadataItem{}, clues.Wrap(err, "serializing metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
item, err := data.NewUnindexedPrefetchedItem(
|
item, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(buf),
|
io.NopCloser(buf),
|
||||||
mce.fileName,
|
mce.fileName,
|
||||||
time.Now())
|
time.Now())
|
||||||
|
|||||||
@ -70,7 +70,7 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
|||||||
items := []metadataItem{}
|
items := []metadataItem{}
|
||||||
|
|
||||||
for i := 0; i < len(itemNames); i++ {
|
for i := 0; i < len(itemNames); i++ {
|
||||||
item, err := data.NewUnindexedPrefetchedItem(
|
item, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(itemData[i])),
|
io.NopCloser(bytes.NewReader(itemData[i])),
|
||||||
itemNames[i],
|
itemNames[i],
|
||||||
time.Time{})
|
time.Time{})
|
||||||
|
|||||||
@ -9,11 +9,13 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
|
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -23,6 +25,8 @@ import (
|
|||||||
type BackupHandler struct {
|
type BackupHandler struct {
|
||||||
ItemInfo details.ItemInfo
|
ItemInfo details.ItemInfo
|
||||||
|
|
||||||
|
DriveItemEnumeration EnumeratesDriveItemsDelta
|
||||||
|
|
||||||
GI GetsItem
|
GI GetsItem
|
||||||
GIP GetsItemPermission
|
GIP GetsItemPermission
|
||||||
|
|
||||||
@ -56,6 +60,7 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler {
|
|||||||
OneDrive: &details.OneDriveInfo{},
|
OneDrive: &details.OneDriveInfo{},
|
||||||
Extension: &details.ExtensionData{},
|
Extension: &details.ExtensionData{},
|
||||||
},
|
},
|
||||||
|
DriveItemEnumeration: EnumeratesDriveItemsDelta{},
|
||||||
GI: GetsItem{Err: clues.New("not defined")},
|
GI: GetsItem{Err: clues.New("not defined")},
|
||||||
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
||||||
PathPrefixFn: defaultOneDrivePathPrefixer,
|
PathPrefixFn: defaultOneDrivePathPrefixer,
|
||||||
@ -125,10 +130,6 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl
|
|||||||
return h.DrivePagerV
|
return h.DrivePagerV
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] {
|
|
||||||
return h.ItemPagerV[driveID]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
|
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
|
||||||
return "/" + pb.String()
|
return "/" + pb.String()
|
||||||
}
|
}
|
||||||
@ -159,6 +160,13 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R
|
|||||||
return h.GetResps[c], h.GetErrs[c]
|
return h.GetResps[c], h.GetErrs[c]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h BackupHandler) EnumerateDriveItemsDelta(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID, prevDeltaLink string,
|
||||||
|
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||||
|
return h.DriveItemEnumeration.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||||
|
}
|
||||||
|
|
||||||
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
|
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
|
||||||
return h.GI.GetItem(ctx, "", "")
|
return h.GI.GetItem(ctx, "", "")
|
||||||
}
|
}
|
||||||
@ -261,6 +269,65 @@ func (m GetsItem) GetItem(
|
|||||||
return m.Item, m.Err
|
return m.Item, m.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Enumerates Drive Items
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type EnumeratesDriveItemsDelta struct {
|
||||||
|
Items map[string][]models.DriveItemable
|
||||||
|
DeltaUpdate map[string]api.DeltaUpdate
|
||||||
|
Err map[string]error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta(
|
||||||
|
_ context.Context,
|
||||||
|
driveID, _ string,
|
||||||
|
) (
|
||||||
|
[]models.DriveItemable,
|
||||||
|
api.DeltaUpdate,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID]
|
||||||
|
}
|
||||||
|
|
||||||
|
func PagerResultToEDID(
|
||||||
|
m map[string][]apiMock.PagerResult[models.DriveItemable],
|
||||||
|
) EnumeratesDriveItemsDelta {
|
||||||
|
edi := EnumeratesDriveItemsDelta{
|
||||||
|
Items: map[string][]models.DriveItemable{},
|
||||||
|
DeltaUpdate: map[string]api.DeltaUpdate{},
|
||||||
|
Err: map[string]error{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for driveID, results := range m {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
items = []models.DriveItemable{}
|
||||||
|
deltaUpdate api.DeltaUpdate
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, pr := range results {
|
||||||
|
items = append(items, pr.Values...)
|
||||||
|
|
||||||
|
if pr.DeltaLink != nil {
|
||||||
|
deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pr.Err != nil {
|
||||||
|
err = pr.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta
|
||||||
|
}
|
||||||
|
|
||||||
|
edi.Items[driveID] = items
|
||||||
|
edi.Err[driveID] = err
|
||||||
|
edi.DeltaUpdate[driveID] = deltaUpdate
|
||||||
|
}
|
||||||
|
|
||||||
|
return edi
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Get Item Permissioner
|
// Get Item Permissioner
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -109,7 +109,7 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
|||||||
//nolint:lll
|
//nolint:lll
|
||||||
byteArray := spMock.Page("Byte Test")
|
byteArray := spMock.Page("Byte Test")
|
||||||
|
|
||||||
pageData, err := data.NewUnindexedPrefetchedItem(
|
pageData, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
testName,
|
testName,
|
||||||
time.Now())
|
time.Now())
|
||||||
|
|||||||
@ -91,12 +91,9 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
paths = map[string]string{}
|
paths = map[string]string{}
|
||||||
newPaths = map[string]string{}
|
currPaths = map[string]string{}
|
||||||
excluded = map[string]struct{}{}
|
excluded = map[string]struct{}{}
|
||||||
itemColls = map[string]map[string]string{
|
collMap = map[string]map[string]*drive.Collection{
|
||||||
driveID: {},
|
|
||||||
}
|
|
||||||
collMap = map[string]map[string]*drive.Collection{
|
|
||||||
driveID: {},
|
driveID: {},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -110,15 +107,14 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
|||||||
|
|
||||||
c.CollectionMap = collMap
|
c.CollectionMap = collMap
|
||||||
|
|
||||||
err := c.UpdateCollections(
|
_, err := c.UpdateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
"General",
|
"General",
|
||||||
test.items,
|
test.items,
|
||||||
paths,
|
paths,
|
||||||
newPaths,
|
currPaths,
|
||||||
excluded,
|
excluded,
|
||||||
itemColls,
|
|
||||||
true,
|
true,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
|
|||||||
@ -48,9 +48,9 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
log = logger.Ctx(ctx)
|
log = logger.Ctx(ctx)
|
||||||
pfxMsg = prefix + ":"
|
pfxMsg = prefix + ":"
|
||||||
li, ls, lr = len(fe.Items), len(fe.Skipped), len(fe.Recovered)
|
li, ls, lr, la = len(fe.Items), len(fe.Skipped), len(fe.Recovered), len(fe.Alerts)
|
||||||
)
|
)
|
||||||
|
|
||||||
if fe.Failure == nil && li+ls+lr == 0 {
|
if fe.Failure == nil && li+ls+lr == 0 {
|
||||||
@ -73,4 +73,8 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) {
|
|||||||
for i, err := range fe.Recovered {
|
for i, err := range fe.Recovered {
|
||||||
log.With("recovered_error", err).Errorf("%s recoverable error %d of %d: %s", pfxMsg, i+1, lr, err.Msg)
|
log.With("recovered_error", err).Errorf("%s recoverable error %d of %d: %s", pfxMsg, i+1, lr, err.Msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i, alert := range fe.Alerts {
|
||||||
|
log.With("alert", alert).Infof("%s alert %d of %d: %s", pfxMsg, i+1, la, alert.Message)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -76,28 +76,28 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
|
|||||||
category path.CategoryType
|
category path.CategoryType
|
||||||
metadataFiles [][]string
|
metadataFiles [][]string
|
||||||
}{
|
}{
|
||||||
// {
|
{
|
||||||
// name: "Mail",
|
name: "Mail",
|
||||||
// selector: func() *selectors.ExchangeBackup {
|
selector: func() *selectors.ExchangeBackup {
|
||||||
// sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||||
// sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
|
sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
|
||||||
// sel.DiscreteOwner = suite.its.user.ID
|
sel.DiscreteOwner = suite.its.user.ID
|
||||||
|
|
||||||
// return sel
|
return sel
|
||||||
// },
|
},
|
||||||
// category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
// metadataFiles: exchange.MetadataFileNames(path.EmailCategory),
|
metadataFiles: MetadataFileNames(path.EmailCategory),
|
||||||
// },
|
},
|
||||||
// {
|
{
|
||||||
// name: "Contacts",
|
name: "Contacts",
|
||||||
// selector: func() *selectors.ExchangeBackup {
|
selector: func() *selectors.ExchangeBackup {
|
||||||
// sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||||
// sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()))
|
sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()))
|
||||||
// return sel
|
return sel
|
||||||
// },
|
},
|
||||||
// category: path.ContactsCategory,
|
category: path.ContactsCategory,
|
||||||
// metadataFiles: exchange.MetadataFileNames(path.ContactsCategory),
|
metadataFiles: MetadataFileNames(path.ContactsCategory),
|
||||||
// },
|
},
|
||||||
{
|
{
|
||||||
name: "Calendar Events",
|
name: "Calendar Events",
|
||||||
selector: func() *selectors.ExchangeBackup {
|
selector: func() *selectors.ExchangeBackup {
|
||||||
|
|||||||
@ -226,18 +226,18 @@ func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsWithAdvancedOp
|
|||||||
suite.its.group.RootSite.DriveRootFolderID)
|
suite.its.group.RootSite.DriveRootFolderID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsAlternateProtectedResource() {
|
// func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsAlternateProtectedResource() {
|
||||||
sel := selectors.NewGroupsBackup([]string{suite.its.group.ID})
|
// sel := selectors.NewGroupsBackup([]string{suite.its.group.ID})
|
||||||
sel.Include(selTD.GroupsBackupLibraryFolderScope(sel))
|
// sel.Include(selTD.GroupsBackupLibraryFolderScope(sel))
|
||||||
sel.Filter(sel.Library("documents"))
|
// sel.Filter(sel.Library("documents"))
|
||||||
sel.DiscreteOwner = suite.its.group.ID
|
// sel.DiscreteOwner = suite.its.group.ID
|
||||||
|
|
||||||
runDriveRestoreToAlternateProtectedResource(
|
// runDriveRestoreToAlternateProtectedResource(
|
||||||
suite.T(),
|
// suite.T(),
|
||||||
suite,
|
// suite,
|
||||||
suite.its.ac,
|
// suite.its.ac,
|
||||||
sel.Selector,
|
// sel.Selector,
|
||||||
suite.its.group.RootSite,
|
// suite.its.group.RootSite,
|
||||||
suite.its.secondaryGroup.RootSite,
|
// suite.its.secondaryGroup.RootSite,
|
||||||
suite.its.secondaryGroup.ID)
|
// suite.its.secondaryGroup.ID)
|
||||||
}
|
// }
|
||||||
|
|||||||
@ -182,7 +182,7 @@ func collect(
|
|||||||
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx)
|
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
item, err := data.NewUnindexedPrefetchedItem(
|
item, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(bs)),
|
io.NopCloser(bytes.NewReader(bs)),
|
||||||
col.itemName,
|
col.itemName,
|
||||||
time.Now())
|
time.Now())
|
||||||
|
|||||||
70
src/pkg/fault/alert.go
Normal file
70
src/pkg/fault/alert.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package fault
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alcionai/corso/src/cli/print"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ print.Printable = &Alert{}
|
||||||
|
|
||||||
|
// Alerts are informational-only notifications. The purpose of alerts is to
|
||||||
|
// provide a means of end-user communication about important events without
|
||||||
|
// needing to generate runtime failures or recoverable errors. When generating
|
||||||
|
// an alert, no other fault feature (failure, recoverable, skip, etc) should
|
||||||
|
// be in use. IE: Errors do not also get alerts, since the error itself is a
|
||||||
|
// form of end-user communication already.
|
||||||
|
type Alert struct {
|
||||||
|
Item Item `json:"item"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String complies with the stringer interface.
|
||||||
|
func (a *Alert) String() string {
|
||||||
|
msg := "<nil>"
|
||||||
|
|
||||||
|
if a != nil {
|
||||||
|
msg = a.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msg) == 0 {
|
||||||
|
msg = "<missing>"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "Alert: " + msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Alert) MinimumPrintable() any {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the human-readable names of properties of a skipped Item
|
||||||
|
// for printing out to a terminal.
|
||||||
|
func (a Alert) Headers() []string {
|
||||||
|
return []string{"Action", "Message", "Container", "Name", "ID"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values populates the printable values matching the Headers list.
|
||||||
|
func (a Alert) Values() []string {
|
||||||
|
var cn string
|
||||||
|
|
||||||
|
acn, ok := a.Item.Additional[AddtlContainerName]
|
||||||
|
if ok {
|
||||||
|
str, ok := acn.(string)
|
||||||
|
if ok {
|
||||||
|
cn = str
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{"Alert", a.Message, cn, a.Item.Name, a.Item.ID}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAlert(message, namespace, itemID, name string, addtl map[string]any) *Alert {
|
||||||
|
return &Alert{
|
||||||
|
Message: message,
|
||||||
|
Item: Item{
|
||||||
|
Namespace: namespace,
|
||||||
|
ID: itemID,
|
||||||
|
Name: name,
|
||||||
|
Additional: addtl,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
88
src/pkg/fault/alert_test.go
Normal file
88
src/pkg/fault/alert_test.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package fault_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AlertUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &AlertUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *AlertUnitSuite) TestAlert_String() {
|
||||||
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
a fault.Alert
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.Contains(t, a.String(), "Alert: <missing>")
|
||||||
|
|
||||||
|
a = fault.Alert{
|
||||||
|
Item: fault.Item{},
|
||||||
|
Message: "",
|
||||||
|
}
|
||||||
|
assert.Contains(t, a.String(), "Alert: <missing>")
|
||||||
|
|
||||||
|
a = fault.Alert{
|
||||||
|
Item: fault.Item{
|
||||||
|
ID: "item_id",
|
||||||
|
},
|
||||||
|
Message: "msg",
|
||||||
|
}
|
||||||
|
assert.NotContains(t, a.String(), "item_id")
|
||||||
|
assert.Contains(t, a.String(), "Alert: msg")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *AlertUnitSuite) TestNewAlert() {
|
||||||
|
t := suite.T()
|
||||||
|
addtl := map[string]any{"foo": "bar"}
|
||||||
|
a := fault.NewAlert("message-to-show", "ns", "item_id", "item_name", addtl)
|
||||||
|
|
||||||
|
expect := fault.Alert{
|
||||||
|
Item: fault.Item{
|
||||||
|
Namespace: "ns",
|
||||||
|
ID: "item_id",
|
||||||
|
Name: "item_name",
|
||||||
|
Additional: addtl,
|
||||||
|
},
|
||||||
|
Message: "message-to-show",
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, expect, *a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *AlertUnitSuite) TestAlert_HeadersValues() {
|
||||||
|
addtl := map[string]any{
|
||||||
|
fault.AddtlContainerID: "cid",
|
||||||
|
fault.AddtlContainerName: "cname",
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
alert *fault.Alert
|
||||||
|
expect []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "new alert",
|
||||||
|
alert: fault.NewAlert("message-to-show", "ns", "id", "name", addtl),
|
||||||
|
expect: []string{"Alert", "message-to-show", "cname", "name", "id"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
assert.Equal(t, []string{"Action", "Message", "Container", "Name", "ID"}, test.alert.Headers())
|
||||||
|
assert.Equal(t, test.expect, test.alert.Values())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -441,3 +441,28 @@ func ExampleBus_AddSkip() {
|
|||||||
|
|
||||||
// Output: skipped processing file: malware_detected
|
// Output: skipped processing file: malware_detected
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExampleBus_AddAlert showcases when to use AddAlert.
|
||||||
|
func ExampleBus_AddAlert() {
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
// Some events should be communicated to the end user without recording an
|
||||||
|
// error to the operation. Logs aren't sufficient because we don't promote
|
||||||
|
// log messages to the terminal. But errors and skips are too heavy and hacky
|
||||||
|
// to use. In these cases, we can create informational Alerts.
|
||||||
|
//
|
||||||
|
// Only the message gets shown to the user. But since we're persisting this
|
||||||
|
// data along with the backup details and other fault info, we have the option
|
||||||
|
// of packing any other contextual data that we want.
|
||||||
|
errs.AddAlert(ctx, fault.NewAlert(
|
||||||
|
"something important happened!",
|
||||||
|
"deduplication-namespace",
|
||||||
|
"file-id",
|
||||||
|
"file-name",
|
||||||
|
map[string]any{"foo": "bar"}))
|
||||||
|
|
||||||
|
// later on, after processing, end users can scrutinize the alerts.
|
||||||
|
fmt.Println(errs.Alerts()[0].String())
|
||||||
|
|
||||||
|
// Alert: something important happened!
|
||||||
|
}
|
||||||
|
|||||||
@ -15,11 +15,24 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// temporary hack identifier
|
||||||
|
// see: https://github.com/alcionai/corso/pull/2510#discussion_r1113532530
|
||||||
|
// TODO: https://github.com/alcionai/corso/issues/4003
|
||||||
|
const LabelForceNoBackupCreation = "label_forces_no_backup_creations"
|
||||||
|
|
||||||
type Bus struct {
|
type Bus struct {
|
||||||
mu *sync.Mutex
|
mu *sync.Mutex
|
||||||
|
|
||||||
|
// When creating a local bus, the parent property retains a pointer
|
||||||
|
// to the root Bus. Even in the case of multiple chained creations of
|
||||||
|
// local busses, the parent reference remains the original root bus,
|
||||||
|
// and does not create a linked list of lineage. Any errors and failures
|
||||||
|
// created by a local instance will get fielded to the parent. But only
|
||||||
|
// local errors will returned by property getter funcs.
|
||||||
|
parent *Bus
|
||||||
|
|
||||||
// Failure probably identifies errors that were added to the bus
|
// Failure probably identifies errors that were added to the bus
|
||||||
// or localBus via AddRecoverable, but which were promoted
|
// or a local Bus via AddRecoverable, but which were promoted
|
||||||
// to the failure position due to failFast=true configuration.
|
// to the failure position due to failFast=true configuration.
|
||||||
// Alternatively, the process controller might have set failure
|
// Alternatively, the process controller might have set failure
|
||||||
// by calling Fail(err).
|
// by calling Fail(err).
|
||||||
@ -36,6 +49,12 @@ type Bus struct {
|
|||||||
// inability to process an item, due to a well-known cause.
|
// inability to process an item, due to a well-known cause.
|
||||||
skipped []Skipped
|
skipped []Skipped
|
||||||
|
|
||||||
|
// alerts contain purely informational messages and data. They
|
||||||
|
// represent situations where the end user should be aware of some
|
||||||
|
// occurrence that is not an error, exception, skipped data, or
|
||||||
|
// other runtime/persistence impacting issue.
|
||||||
|
alerts []Alert
|
||||||
|
|
||||||
// if failFast is true, the first errs addition will
|
// if failFast is true, the first errs addition will
|
||||||
// get promoted to the err value. This signifies a
|
// get promoted to the err value. This signifies a
|
||||||
// non-recoverable processing state, causing any running
|
// non-recoverable processing state, causing any running
|
||||||
@ -52,62 +71,61 @@ func New(failFast bool) *Bus {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Local constructs a new bus with a local reference to handle error aggregation
|
||||||
|
// in a constrained scope. This allows the caller to review recoverable errors and
|
||||||
|
// failures within only the current codespace, as opposed to the global set of errors.
|
||||||
|
// The function that spawned the local bus should always return `bus.Failure()` to
|
||||||
|
// ensure that hard failures are propagated back upstream.
|
||||||
|
func (e *Bus) Local() *Bus {
|
||||||
|
parent := e.parent
|
||||||
|
|
||||||
|
// only use e if it is already the root instance
|
||||||
|
if parent == nil {
|
||||||
|
parent = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Bus{
|
||||||
|
mu: &sync.Mutex{},
|
||||||
|
parent: parent,
|
||||||
|
failFast: parent.failFast,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// FailFast returns the failFast flag in the bus.
|
// FailFast returns the failFast flag in the bus.
|
||||||
func (e *Bus) FailFast() bool {
|
func (e *Bus) FailFast() bool {
|
||||||
return e.failFast
|
return e.failFast
|
||||||
}
|
}
|
||||||
|
|
||||||
// Failure returns the primary error. If not nil, this
|
|
||||||
// indicates the operation exited prior to completion.
|
|
||||||
func (e *Bus) Failure() error {
|
|
||||||
return e.failure
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recovered returns the slice of errors that occurred in
|
|
||||||
// recoverable points of processing. This is often during
|
|
||||||
// iteration where a single failure (ex: retrieving an item),
|
|
||||||
// doesn't require the entire process to end.
|
|
||||||
func (e *Bus) Recovered() []error {
|
|
||||||
return slices.Clone(e.recoverable)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skipped returns the slice of items that were permanently
|
|
||||||
// skipped during processing.
|
|
||||||
func (e *Bus) Skipped() []Skipped {
|
|
||||||
return slices.Clone(e.skipped)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fail sets the non-recoverable error (ie: bus.failure)
|
// Fail sets the non-recoverable error (ie: bus.failure)
|
||||||
// in the bus. If a failure error is already present,
|
// in the bus. If a failure error is already present,
|
||||||
// the error gets added to the recoverable slice for
|
// the error gets added to the recoverable slice for
|
||||||
// purposes of tracking.
|
// purposes of tracking.
|
||||||
//
|
|
||||||
// TODO: Return Data, not Bus. The consumers of a failure
|
|
||||||
// should care about the state of data, not the communication
|
|
||||||
// pattern.
|
|
||||||
func (e *Bus) Fail(err error) *Bus {
|
func (e *Bus) Fail(err error) *Bus {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
return e.setFailure(err)
|
return e.setFailure(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setErr handles setting bus.failure. Sync locking gets
|
// setErr handles setting bus.failure. Sync locking gets
|
||||||
// handled upstream of this call.
|
// handled upstream of this call.
|
||||||
func (e *Bus) setFailure(err error) *Bus {
|
func (e *Bus) setFailure(err error) *Bus {
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
if e.failure == nil {
|
if e.failure == nil {
|
||||||
e.failure = err
|
e.failure = err
|
||||||
return e
|
} else {
|
||||||
|
// technically not a recoverable error: we're using the
|
||||||
|
// recoverable slice as an overflow container here to
|
||||||
|
// ensure everything is tracked.
|
||||||
|
e.recoverable = append(e.recoverable, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// technically not a recoverable error: we're using the
|
if e.parent != nil {
|
||||||
// recoverable slice as an overflow container here to
|
e.parent.setFailure(err)
|
||||||
// ensure everything is tracked.
|
}
|
||||||
e.recoverable = append(e.recoverable, err)
|
|
||||||
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
@ -116,17 +134,11 @@ func (e *Bus) setFailure(err error) *Bus {
|
|||||||
// errors (ie: bus.recoverable). If failFast is true, the first
|
// errors (ie: bus.recoverable). If failFast is true, the first
|
||||||
// added error will get copied to bus.failure, causing the bus
|
// added error will get copied to bus.failure, causing the bus
|
||||||
// to identify as non-recoverably failed.
|
// to identify as non-recoverably failed.
|
||||||
//
|
|
||||||
// TODO: nil return, not Bus, since we don't want people to return
|
|
||||||
// from errors.AddRecoverable().
|
|
||||||
func (e *Bus) AddRecoverable(ctx context.Context, err error) {
|
func (e *Bus) AddRecoverable(ctx context.Context, err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
e.logAndAddRecoverable(ctx, err, 1)
|
e.logAndAddRecoverable(ctx, err, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,19 +159,77 @@ func (e *Bus) logAndAddRecoverable(ctx context.Context, err error, skip int) {
|
|||||||
// gets handled upstream of this call. Returns true if the
|
// gets handled upstream of this call. Returns true if the
|
||||||
// error is a failure, false otherwise.
|
// error is a failure, false otherwise.
|
||||||
func (e *Bus) addRecoverableErr(err error) bool {
|
func (e *Bus) addRecoverableErr(err error) bool {
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
var isFail bool
|
var isFail bool
|
||||||
|
|
||||||
if e.failure == nil && e.failFast {
|
if e.failure == nil && e.failFast {
|
||||||
e.setFailure(err)
|
if e.failure == nil {
|
||||||
|
e.failure = err
|
||||||
|
} else {
|
||||||
|
// technically not a recoverable error: we're using the
|
||||||
|
// recoverable slice as an overflow container here to
|
||||||
|
// ensure everything is tracked.
|
||||||
|
e.recoverable = append(e.recoverable, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.parent != nil {
|
||||||
|
e.parent.setFailure(err)
|
||||||
|
}
|
||||||
|
|
||||||
isFail = true
|
isFail = true
|
||||||
}
|
}
|
||||||
|
|
||||||
e.recoverable = append(e.recoverable, err)
|
e.recoverable = append(e.recoverable, err)
|
||||||
|
|
||||||
|
// local bus instances must promote errors to the root bus.
|
||||||
|
if e.parent != nil {
|
||||||
|
e.parent.addRecoverableErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
return isFail
|
return isFail
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Non-error adders
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// AddAlert appends a record of an Alert message to the fault bus.
|
||||||
|
// Importantly, alerts are not errors, exceptions, or skipped items.
|
||||||
|
// An alert should only be generated if no other fault functionality
|
||||||
|
// is in use, but that we still want the end user to clearly and
|
||||||
|
// plainly receive a notification about a runtime event.
|
||||||
|
func (e *Bus) AddAlert(ctx context.Context, a *Alert) {
|
||||||
|
if a == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
e.logAndAddAlert(ctx, a, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// logs the error and adds an alert.
|
||||||
|
func (e *Bus) logAndAddAlert(ctx context.Context, a *Alert, trace int) {
|
||||||
|
logger.CtxStack(ctx, trace+1).
|
||||||
|
With("alert", a).
|
||||||
|
Info("alert: " + a.Message)
|
||||||
|
e.addAlert(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Bus) addAlert(a *Alert) *Bus {
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
|
e.alerts = append(e.alerts, *a)
|
||||||
|
|
||||||
|
// local bus instances must promote alerts to the root bus.
|
||||||
|
if e.parent != nil {
|
||||||
|
e.parent.addAlert(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
// AddSkip appends a record of a Skipped item to the fault bus.
|
// AddSkip appends a record of a Skipped item to the fault bus.
|
||||||
// Importantly, skipped items are not the same as recoverable
|
// Importantly, skipped items are not the same as recoverable
|
||||||
// errors. An item should only be skipped under the following
|
// errors. An item should only be skipped under the following
|
||||||
@ -175,25 +245,35 @@ func (e *Bus) AddSkip(ctx context.Context, s *Skipped) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
e.logAndAddSkip(ctx, s, 1)
|
e.logAndAddSkip(ctx, s, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// logs the error and adds a skipped item.
|
// logs the error and adds a skipped item.
|
||||||
func (e *Bus) logAndAddSkip(ctx context.Context, s *Skipped, skip int) {
|
func (e *Bus) logAndAddSkip(ctx context.Context, s *Skipped, trace int) {
|
||||||
logger.CtxStack(ctx, skip+1).
|
logger.CtxStack(ctx, trace+1).
|
||||||
With("skipped", s).
|
With("skipped", s).
|
||||||
Info("recoverable error")
|
Info("skipped an item")
|
||||||
e.addSkip(s)
|
e.addSkip(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Bus) addSkip(s *Skipped) *Bus {
|
func (e *Bus) addSkip(s *Skipped) *Bus {
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
e.skipped = append(e.skipped, *s)
|
e.skipped = append(e.skipped, *s)
|
||||||
|
|
||||||
|
// local bus instances must promote skipped items to the root bus.
|
||||||
|
if e.parent != nil {
|
||||||
|
e.parent.addSkip(s)
|
||||||
|
}
|
||||||
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Results
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// Errors returns the plain record of errors that were aggregated
|
// Errors returns the plain record of errors that were aggregated
|
||||||
// within a fult Bus.
|
// within a fult Bus.
|
||||||
func (e *Bus) Errors() *Errors {
|
func (e *Bus) Errors() *Errors {
|
||||||
@ -204,10 +284,44 @@ func (e *Bus) Errors() *Errors {
|
|||||||
Recovered: nonItems,
|
Recovered: nonItems,
|
||||||
Items: items,
|
Items: items,
|
||||||
Skipped: slices.Clone(e.skipped),
|
Skipped: slices.Clone(e.skipped),
|
||||||
|
Alerts: slices.Clone(e.alerts),
|
||||||
FailFast: e.failFast,
|
FailFast: e.failFast,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Failure returns the primary error. If not nil, this
|
||||||
|
// indicates the operation exited prior to completion.
|
||||||
|
// If the bus is a local instance, this only returns the
|
||||||
|
// local failure, and will not return parent data.
|
||||||
|
func (e *Bus) Failure() error {
|
||||||
|
return e.failure
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recovered returns the slice of errors that occurred in
|
||||||
|
// recoverable points of processing. This is often during
|
||||||
|
// iteration where a single failure (ex: retrieving an item),
|
||||||
|
// doesn't require the entire process to end.
|
||||||
|
// If the bus is a local instance, this only returns the
|
||||||
|
// local recovered errors, and will not return parent data.
|
||||||
|
func (e *Bus) Recovered() []error {
|
||||||
|
return slices.Clone(e.recoverable)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skipped returns the slice of items that were permanently
|
||||||
|
// skipped during processing.
|
||||||
|
// If the bus is a local instance, this only returns the
|
||||||
|
// local skipped items, and will not return parent data.
|
||||||
|
func (e *Bus) Skipped() []Skipped {
|
||||||
|
return slices.Clone(e.skipped)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alerts returns the slice of alerts generated during runtime.
|
||||||
|
// If the bus is a local alerts, this only returns the
|
||||||
|
// local failure, and will not return parent data.
|
||||||
|
func (e *Bus) Alerts() []Alert {
|
||||||
|
return slices.Clone(e.alerts)
|
||||||
|
}
|
||||||
|
|
||||||
// ItemsAndRecovered returns the items that failed along with other
|
// ItemsAndRecovered returns the items that failed along with other
|
||||||
// recoverable errors
|
// recoverable errors
|
||||||
func (e *Bus) ItemsAndRecovered() ([]Item, []error) {
|
func (e *Bus) ItemsAndRecovered() ([]Item, []error) {
|
||||||
@ -234,10 +348,6 @@ func (e *Bus) ItemsAndRecovered() ([]Item, []error) {
|
|||||||
return maps.Values(is), non
|
return maps.Values(is), non
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Errors Data
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// Errors provides the errors data alone, without sync controls
|
// Errors provides the errors data alone, without sync controls
|
||||||
// or adders/setters. Expected to get called at the end of processing,
|
// or adders/setters. Expected to get called at the end of processing,
|
||||||
// as a way to aggregate results.
|
// as a way to aggregate results.
|
||||||
@ -265,6 +375,12 @@ type Errors struct {
|
|||||||
// inability to process an item, due to a well-known cause.
|
// inability to process an item, due to a well-known cause.
|
||||||
Skipped []Skipped `json:"skipped"`
|
Skipped []Skipped `json:"skipped"`
|
||||||
|
|
||||||
|
// Alerts contain purely informational messages and data. They
|
||||||
|
// represent situations where the end user should be aware of some
|
||||||
|
// occurrence that is not an error, exception, skipped data, or
|
||||||
|
// other runtime/persistence impacting issue.
|
||||||
|
Alerts []Alert
|
||||||
|
|
||||||
// If FailFast is true, then the first Recoverable error will
|
// If FailFast is true, then the first Recoverable error will
|
||||||
// promote to the Failure spot, causing processing to exit.
|
// promote to the Failure spot, causing processing to exit.
|
||||||
FailFast bool `json:"failFast"`
|
FailFast bool `json:"failFast"`
|
||||||
@ -313,16 +429,29 @@ func UnmarshalErrorsTo(e *Errors) func(io.ReadCloser) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Print compatibility
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// Print writes the DetailModel Entries to StdOut, in the format
|
// Print writes the DetailModel Entries to StdOut, in the format
|
||||||
// requested by the caller.
|
// requested by the caller.
|
||||||
func (e *Errors) PrintItems(ctx context.Context, ignoreErrors, ignoreSkips, ignoreRecovered bool) {
|
func (e *Errors) PrintItems(
|
||||||
if len(e.Items)+len(e.Skipped)+len(e.Recovered) == 0 ||
|
ctx context.Context,
|
||||||
ignoreErrors && ignoreSkips && ignoreRecovered {
|
ignoreAlerts, ignoreErrors, ignoreSkips, ignoreRecovered bool,
|
||||||
|
) {
|
||||||
|
if len(e.Alerts)+len(e.Items)+len(e.Skipped)+len(e.Recovered) == 0 ||
|
||||||
|
(ignoreAlerts && ignoreErrors && ignoreSkips && ignoreRecovered) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sl := make([]print.Printable, 0)
|
sl := make([]print.Printable, 0)
|
||||||
|
|
||||||
|
if !ignoreAlerts {
|
||||||
|
for _, a := range e.Alerts {
|
||||||
|
sl = append(sl, print.Printable(a))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !ignoreSkips {
|
if !ignoreSkips {
|
||||||
for _, s := range e.Skipped {
|
for _, s := range e.Skipped {
|
||||||
sl = append(sl, print.Printable(s))
|
sl = append(sl, print.Printable(s))
|
||||||
@ -374,73 +503,3 @@ func (pec printableErrCore) Values() []string {
|
|||||||
|
|
||||||
return []string{pec.Msg}
|
return []string{pec.Msg}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Local aggregator
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// Local constructs a new local bus to handle error aggregation in a
|
|
||||||
// constrained scope. Local busses shouldn't be passed down to other
|
|
||||||
// funcs, and the function that spawned the local bus should always
|
|
||||||
// return `local.Failure()` to ensure that hard failures are propagated
|
|
||||||
// back upstream.
|
|
||||||
func (e *Bus) Local() *localBus {
|
|
||||||
return &localBus{
|
|
||||||
mu: &sync.Mutex{},
|
|
||||||
bus: e,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type localBus struct {
|
|
||||||
mu *sync.Mutex
|
|
||||||
bus *Bus
|
|
||||||
current error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *localBus) AddRecoverable(ctx context.Context, err error) {
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
if e.current == nil && e.bus.failFast {
|
|
||||||
e.current = err
|
|
||||||
}
|
|
||||||
|
|
||||||
e.bus.logAndAddRecoverable(ctx, err, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddSkip appends a record of a Skipped item to the local bus.
|
|
||||||
// Importantly, skipped items are not the same as recoverable
|
|
||||||
// errors. An item should only be skipped under the following
|
|
||||||
// conditions. All other cases should be handled as errors.
|
|
||||||
// 1. The conditions for skipping the item are well-known and
|
|
||||||
// well-documented. End users need to be able to understand
|
|
||||||
// both the conditions and identifications of skips.
|
|
||||||
// 2. Skipping avoids a permanent and consistent failure. If
|
|
||||||
// the underlying reason is transient or otherwise recoverable,
|
|
||||||
// the item should not be skipped.
|
|
||||||
func (e *localBus) AddSkip(ctx context.Context, s *Skipped) {
|
|
||||||
if s == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
e.bus.logAndAddSkip(ctx, s, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Failure returns the failure that happened within the local bus.
|
|
||||||
// It does not return the underlying bus.Failure(), only the failure
|
|
||||||
// that was recorded within the local bus instance. This error should
|
|
||||||
// get returned by any func which created a local bus.
|
|
||||||
func (e *localBus) Failure() error {
|
|
||||||
return e.current
|
|
||||||
}
|
|
||||||
|
|
||||||
// temporary hack identifier
|
|
||||||
// see: https://github.com/alcionai/corso/pull/2510#discussion_r1113532530
|
|
||||||
const LabelForceNoBackupCreation = "label_forces_no_backup_creations"
|
|
||||||
|
|||||||
@ -189,25 +189,6 @@ func (suite *FaultErrorsUnitSuite) TestAdd() {
|
|||||||
assert.Len(t, n.Recovered(), 2)
|
assert.Len(t, n.Recovered(), 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *FaultErrorsUnitSuite) TestAddSkip() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
n := fault.New(true)
|
|
||||||
require.NotNil(t, n)
|
|
||||||
|
|
||||||
n.Fail(assert.AnError)
|
|
||||||
assert.Len(t, n.Skipped(), 0)
|
|
||||||
|
|
||||||
n.AddRecoverable(ctx, assert.AnError)
|
|
||||||
assert.Len(t, n.Skipped(), 0)
|
|
||||||
|
|
||||||
n.AddSkip(ctx, fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil))
|
|
||||||
assert.Len(t, n.Skipped(), 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *FaultErrorsUnitSuite) TestErrors() {
|
func (suite *FaultErrorsUnitSuite) TestErrors() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
|
|||||||
@ -11,15 +11,15 @@ const (
|
|||||||
AddtlMalwareDesc = "malware_description"
|
AddtlMalwareDesc = "malware_description"
|
||||||
)
|
)
|
||||||
|
|
||||||
type itemType string
|
type ItemType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
FileType itemType = "file"
|
FileType ItemType = "file"
|
||||||
ContainerType itemType = "container"
|
ContainerType ItemType = "container"
|
||||||
ResourceOwnerType itemType = "resource_owner"
|
ResourceOwnerType ItemType = "resource_owner"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (it itemType) Printable() string {
|
func (it ItemType) Printable() string {
|
||||||
switch it {
|
switch it {
|
||||||
case FileType:
|
case FileType:
|
||||||
return "File"
|
return "File"
|
||||||
@ -62,7 +62,7 @@ type Item struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|
||||||
// tracks the type of item represented by this entry.
|
// tracks the type of item represented by this entry.
|
||||||
Type itemType `json:"type"`
|
Type ItemType `json:"type"`
|
||||||
|
|
||||||
// Error() of the causal error, or a sentinel if this is the
|
// Error() of the causal error, or a sentinel if this is the
|
||||||
// source of the error. In case of ID collisions, the first
|
// source of the error. In case of ID collisions, the first
|
||||||
@ -138,7 +138,7 @@ func OwnerErr(cause error, namespace, id, name string, addtl map[string]any) *It
|
|||||||
}
|
}
|
||||||
|
|
||||||
// itemErr produces a Item of the provided type for tracking erroneous items.
|
// itemErr produces a Item of the provided type for tracking erroneous items.
|
||||||
func itemErr(t itemType, cause error, namespace, id, name string, addtl map[string]any) *Item {
|
func itemErr(t ItemType, cause error, namespace, id, name string, addtl map[string]any) *Item {
|
||||||
return &Item{
|
return &Item{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
ID: id,
|
ID: id,
|
||||||
@ -148,119 +148,3 @@ func itemErr(t itemType, cause error, namespace, id, name string, addtl map[stri
|
|||||||
Additional: addtl,
|
Additional: addtl,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Skipped Items
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// skipCause identifies the well-known conditions to Skip an item. It is
|
|
||||||
// important that skip cause enumerations do not overlap with general error
|
|
||||||
// handling. Skips must be well known, well documented, and consistent.
|
|
||||||
// Transient failures, undocumented or unknown conditions, and arbitrary
|
|
||||||
// handling should never produce a skipped item. Those cases should get
|
|
||||||
// handled as normal errors.
|
|
||||||
type skipCause string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SkipMalware identifies a malware detection case. Files that graph
|
|
||||||
// api identifies as malware cannot be downloaded or uploaded, and will
|
|
||||||
// permanently fail any attempts to backup or restore.
|
|
||||||
SkipMalware skipCause = "malware_detected"
|
|
||||||
|
|
||||||
// SkipBigOneNote identifies that a file was skipped because it
|
|
||||||
// was big OneNote file and we can only download OneNote files which
|
|
||||||
// are less that 2GB in size.
|
|
||||||
//nolint:lll
|
|
||||||
// https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa#onenotenotebooks
|
|
||||||
SkipBigOneNote skipCause = "big_one_note_file"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ print.Printable = &Skipped{}
|
|
||||||
|
|
||||||
// Skipped items are permanently unprocessable due to well-known conditions.
|
|
||||||
// In order to skip an item, the following conditions should be met:
|
|
||||||
// 1. The conditions for skipping the item are well-known and
|
|
||||||
// well-documented. End users need to be able to understand
|
|
||||||
// both the conditions and identifications of skips.
|
|
||||||
// 2. Skipping avoids a permanent and consistent failure. If
|
|
||||||
// the underlying reason is transient or otherwise recoverable,
|
|
||||||
// the item should not be skipped.
|
|
||||||
//
|
|
||||||
// Skipped wraps Item primarily to minimize confusion when sharing the
|
|
||||||
// fault interface. Skipped items are not errors, and Item{} errors are
|
|
||||||
// not the basis for a Skip.
|
|
||||||
type Skipped struct {
|
|
||||||
Item Item `json:"item"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// String complies with the stringer interface.
|
|
||||||
func (s *Skipped) String() string {
|
|
||||||
if s == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
|
|
||||||
return "skipped " + s.Item.Error() + ": " + s.Item.Cause
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasCause compares the underlying cause against the parameter.
|
|
||||||
func (s *Skipped) HasCause(c skipCause) bool {
|
|
||||||
if s == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.Item.Cause == string(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s Skipped) MinimumPrintable() any {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Headers returns the human-readable names of properties of a skipped Item
|
|
||||||
// for printing out to a terminal.
|
|
||||||
func (s Skipped) Headers() []string {
|
|
||||||
return []string{"Action", "Type", "Name", "Container", "Cause"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values populates the printable values matching the Headers list.
|
|
||||||
func (s Skipped) Values() []string {
|
|
||||||
var cn string
|
|
||||||
|
|
||||||
acn, ok := s.Item.Additional[AddtlContainerName]
|
|
||||||
if ok {
|
|
||||||
str, ok := acn.(string)
|
|
||||||
if ok {
|
|
||||||
cn = str
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return []string{"Skip", s.Item.Type.Printable(), s.Item.Name, cn, s.Item.Cause}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerSkip produces a Container-kind Item for tracking skipped items.
|
|
||||||
func ContainerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
|
||||||
return itemSkip(ContainerType, cause, namespace, id, name, addtl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileSkip produces a File-kind Item for tracking skipped items.
|
|
||||||
func FileSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
|
||||||
return itemSkip(FileType, cause, namespace, id, name, addtl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnwerSkip produces a ResourceOwner-kind Item for tracking skipped items.
|
|
||||||
func OwnerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
|
||||||
return itemSkip(ResourceOwnerType, cause, namespace, id, name, addtl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// itemSkip produces a Item of the provided type for tracking skipped items.
|
|
||||||
func itemSkip(t itemType, cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
|
||||||
return &Skipped{
|
|
||||||
Item: Item{
|
|
||||||
Namespace: namespace,
|
|
||||||
ID: id,
|
|
||||||
Name: name,
|
|
||||||
Type: t,
|
|
||||||
Cause: string(cause),
|
|
||||||
Additional: addtl,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
package fault
|
package fault_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ItemUnitSuite struct {
|
type ItemUnitSuite struct {
|
||||||
@ -21,28 +22,28 @@ func TestItemUnitSuite(t *testing.T) {
|
|||||||
func (suite *ItemUnitSuite) TestItem_Error() {
|
func (suite *ItemUnitSuite) TestItem_Error() {
|
||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
i *Item
|
i *fault.Item
|
||||||
)
|
)
|
||||||
|
|
||||||
assert.Contains(t, i.Error(), "nil")
|
assert.Contains(t, i.Error(), "nil")
|
||||||
|
|
||||||
i = &Item{}
|
i = &fault.Item{}
|
||||||
assert.Contains(t, i.Error(), "unknown type")
|
assert.Contains(t, i.Error(), "unknown type")
|
||||||
|
|
||||||
i = &Item{Type: FileType}
|
i = &fault.Item{Type: fault.FileType}
|
||||||
assert.Contains(t, i.Error(), FileType)
|
assert.Contains(t, i.Error(), fault.FileType)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestContainerErr() {
|
func (suite *ItemUnitSuite) TestContainerErr() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
addtl := map[string]any{"foo": "bar"}
|
addtl := map[string]any{"foo": "bar"}
|
||||||
i := ContainerErr(clues.New("foo"), "ns", "id", "name", addtl)
|
i := fault.ContainerErr(clues.New("foo"), "ns", "id", "name", addtl)
|
||||||
|
|
||||||
expect := Item{
|
expect := fault.Item{
|
||||||
Namespace: "ns",
|
Namespace: "ns",
|
||||||
ID: "id",
|
ID: "id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
Type: ContainerType,
|
Type: fault.ContainerType,
|
||||||
Cause: "foo",
|
Cause: "foo",
|
||||||
Additional: addtl,
|
Additional: addtl,
|
||||||
}
|
}
|
||||||
@ -53,13 +54,13 @@ func (suite *ItemUnitSuite) TestContainerErr() {
|
|||||||
func (suite *ItemUnitSuite) TestFileErr() {
|
func (suite *ItemUnitSuite) TestFileErr() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
addtl := map[string]any{"foo": "bar"}
|
addtl := map[string]any{"foo": "bar"}
|
||||||
i := FileErr(clues.New("foo"), "ns", "id", "name", addtl)
|
i := fault.FileErr(clues.New("foo"), "ns", "id", "name", addtl)
|
||||||
|
|
||||||
expect := Item{
|
expect := fault.Item{
|
||||||
Namespace: "ns",
|
Namespace: "ns",
|
||||||
ID: "id",
|
ID: "id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
Type: FileType,
|
Type: fault.FileType,
|
||||||
Cause: "foo",
|
Cause: "foo",
|
||||||
Additional: addtl,
|
Additional: addtl,
|
||||||
}
|
}
|
||||||
@ -70,13 +71,13 @@ func (suite *ItemUnitSuite) TestFileErr() {
|
|||||||
func (suite *ItemUnitSuite) TestOwnerErr() {
|
func (suite *ItemUnitSuite) TestOwnerErr() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
addtl := map[string]any{"foo": "bar"}
|
addtl := map[string]any{"foo": "bar"}
|
||||||
i := OwnerErr(clues.New("foo"), "ns", "id", "name", addtl)
|
i := fault.OwnerErr(clues.New("foo"), "ns", "id", "name", addtl)
|
||||||
|
|
||||||
expect := Item{
|
expect := fault.Item{
|
||||||
Namespace: "ns",
|
Namespace: "ns",
|
||||||
ID: "id",
|
ID: "id",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
Type: ResourceOwnerType,
|
Type: fault.ResourceOwnerType,
|
||||||
Cause: "foo",
|
Cause: "foo",
|
||||||
Additional: addtl,
|
Additional: addtl,
|
||||||
}
|
}
|
||||||
@ -86,23 +87,23 @@ func (suite *ItemUnitSuite) TestOwnerErr() {
|
|||||||
|
|
||||||
func (suite *ItemUnitSuite) TestItemType_Printable() {
|
func (suite *ItemUnitSuite) TestItemType_Printable() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
t itemType
|
t fault.ItemType
|
||||||
expect string
|
expect string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
t: FileType,
|
t: fault.FileType,
|
||||||
expect: "File",
|
expect: "File",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
t: ContainerType,
|
t: fault.ContainerType,
|
||||||
expect: "Container",
|
expect: "Container",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
t: ResourceOwnerType,
|
t: fault.ResourceOwnerType,
|
||||||
expect: "Resource Owner",
|
expect: "Resource Owner",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
t: itemType("foo"),
|
t: fault.ItemType("foo"),
|
||||||
expect: "Unknown",
|
expect: "Unknown",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -118,30 +119,30 @@ func (suite *ItemUnitSuite) TestItem_HeadersValues() {
|
|||||||
err = assert.AnError
|
err = assert.AnError
|
||||||
cause = err.Error()
|
cause = err.Error()
|
||||||
addtl = map[string]any{
|
addtl = map[string]any{
|
||||||
AddtlContainerID: "cid",
|
fault.AddtlContainerID: "cid",
|
||||||
AddtlContainerName: "cname",
|
fault.AddtlContainerName: "cname",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
item *Item
|
item *fault.Item
|
||||||
expect []string
|
expect []string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "file",
|
name: "file",
|
||||||
item: FileErr(assert.AnError, "ns", "id", "name", addtl),
|
item: fault.FileErr(assert.AnError, "ns", "id", "name", addtl),
|
||||||
expect: []string{"Error", FileType.Printable(), "name", "cname", cause},
|
expect: []string{"Error", fault.FileType.Printable(), "name", "cname", cause},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "container",
|
name: "container",
|
||||||
item: ContainerErr(assert.AnError, "ns", "id", "name", addtl),
|
item: fault.ContainerErr(assert.AnError, "ns", "id", "name", addtl),
|
||||||
expect: []string{"Error", ContainerType.Printable(), "name", "cname", cause},
|
expect: []string{"Error", fault.ContainerType.Printable(), "name", "cname", cause},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "owner",
|
name: "owner",
|
||||||
item: OwnerErr(assert.AnError, "ns", "id", "name", nil),
|
item: fault.OwnerErr(assert.AnError, "ns", "id", "name", nil),
|
||||||
expect: []string{"Error", ResourceOwnerType.Printable(), "name", "", cause},
|
expect: []string{"Error", fault.ResourceOwnerType.Printable(), "name", "", cause},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
@ -153,106 +154,3 @@ func (suite *ItemUnitSuite) TestItem_HeadersValues() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestSkipped_String() {
|
|
||||||
var (
|
|
||||||
t = suite.T()
|
|
||||||
i *Skipped
|
|
||||||
)
|
|
||||||
|
|
||||||
assert.Contains(t, i.String(), "nil")
|
|
||||||
|
|
||||||
i = &Skipped{Item{}}
|
|
||||||
assert.Contains(t, i.String(), "unknown type")
|
|
||||||
|
|
||||||
i = &Skipped{Item{Type: FileType}}
|
|
||||||
assert.Contains(t, i.Item.Error(), FileType)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestContainerSkip() {
|
|
||||||
t := suite.T()
|
|
||||||
addtl := map[string]any{"foo": "bar"}
|
|
||||||
i := ContainerSkip(SkipMalware, "ns", "id", "name", addtl)
|
|
||||||
|
|
||||||
expect := Item{
|
|
||||||
Namespace: "ns",
|
|
||||||
ID: "id",
|
|
||||||
Name: "name",
|
|
||||||
Type: ContainerType,
|
|
||||||
Cause: string(SkipMalware),
|
|
||||||
Additional: addtl,
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, Skipped{expect}, *i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestFileSkip() {
|
|
||||||
t := suite.T()
|
|
||||||
addtl := map[string]any{"foo": "bar"}
|
|
||||||
i := FileSkip(SkipMalware, "ns", "id", "name", addtl)
|
|
||||||
|
|
||||||
expect := Item{
|
|
||||||
Namespace: "ns",
|
|
||||||
ID: "id",
|
|
||||||
Name: "name",
|
|
||||||
Type: FileType,
|
|
||||||
Cause: string(SkipMalware),
|
|
||||||
Additional: addtl,
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, Skipped{expect}, *i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestOwnerSkip() {
|
|
||||||
t := suite.T()
|
|
||||||
addtl := map[string]any{"foo": "bar"}
|
|
||||||
i := OwnerSkip(SkipMalware, "ns", "id", "name", addtl)
|
|
||||||
|
|
||||||
expect := Item{
|
|
||||||
Namespace: "ns",
|
|
||||||
ID: "id",
|
|
||||||
Name: "name",
|
|
||||||
Type: ResourceOwnerType,
|
|
||||||
Cause: string(SkipMalware),
|
|
||||||
Additional: addtl,
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, Skipped{expect}, *i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestSkipped_HeadersValues() {
|
|
||||||
addtl := map[string]any{
|
|
||||||
AddtlContainerID: "cid",
|
|
||||||
AddtlContainerName: "cname",
|
|
||||||
}
|
|
||||||
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
skip *Skipped
|
|
||||||
expect []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "file",
|
|
||||||
skip: FileSkip(SkipMalware, "ns", "id", "name", addtl),
|
|
||||||
expect: []string{"Skip", FileType.Printable(), "name", "cname", string(SkipMalware)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "container",
|
|
||||||
skip: ContainerSkip(SkipMalware, "ns", "id", "name", addtl),
|
|
||||||
expect: []string{"Skip", ContainerType.Printable(), "name", "cname", string(SkipMalware)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "owner",
|
|
||||||
skip: OwnerSkip(SkipMalware, "ns", "id", "name", nil),
|
|
||||||
expect: []string{"Skip", ResourceOwnerType.Printable(), "name", "", string(SkipMalware)},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
assert.Equal(t, []string{"Action", "Type", "Name", "Container", "Cause"}, test.skip.Headers())
|
|
||||||
assert.Equal(t, test.expect, test.skip.Values())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
117
src/pkg/fault/skipped.go
Normal file
117
src/pkg/fault/skipped.go
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
package fault
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alcionai/corso/src/cli/print"
|
||||||
|
)
|
||||||
|
|
||||||
|
// skipCause identifies the well-known conditions to Skip an item. It is
|
||||||
|
// important that skip cause enumerations do not overlap with general error
|
||||||
|
// handling. Skips must be well known, well documented, and consistent.
|
||||||
|
// Transient failures, undocumented or unknown conditions, and arbitrary
|
||||||
|
// handling should never produce a skipped item. Those cases should get
|
||||||
|
// handled as normal errors.
|
||||||
|
type skipCause string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SkipMalware identifies a malware detection case. Files that graph
|
||||||
|
// api identifies as malware cannot be downloaded or uploaded, and will
|
||||||
|
// permanently fail any attempts to backup or restore.
|
||||||
|
SkipMalware skipCause = "malware_detected"
|
||||||
|
|
||||||
|
// SkipBigOneNote identifies that a file was skipped because it
|
||||||
|
// was big OneNote file and we can only download OneNote files which
|
||||||
|
// are less that 2GB in size.
|
||||||
|
//nolint:lll
|
||||||
|
// https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa#onenotenotebooks
|
||||||
|
SkipBigOneNote skipCause = "big_one_note_file"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ print.Printable = &Skipped{}
|
||||||
|
|
||||||
|
// Skipped items are permanently unprocessable due to well-known conditions.
|
||||||
|
// In order to skip an item, the following conditions should be met:
|
||||||
|
// 1. The conditions for skipping the item are well-known and
|
||||||
|
// well-documented. End users need to be able to understand
|
||||||
|
// both the conditions and identifications of skips.
|
||||||
|
// 2. Skipping avoids a permanent and consistent failure. If
|
||||||
|
// the underlying reason is transient or otherwise recoverable,
|
||||||
|
// the item should not be skipped.
|
||||||
|
//
|
||||||
|
// Skipped wraps Item primarily to minimize confusion when sharing the
|
||||||
|
// fault interface. Skipped items are not errors, and Item{} errors are
|
||||||
|
// not the basis for a Skip.
|
||||||
|
type Skipped struct {
|
||||||
|
Item Item `json:"item"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String complies with the stringer interface.
|
||||||
|
func (s *Skipped) String() string {
|
||||||
|
if s == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "skipped " + s.Item.Error() + ": " + s.Item.Cause
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasCause compares the underlying cause against the parameter.
|
||||||
|
func (s *Skipped) HasCause(c skipCause) bool {
|
||||||
|
if s == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.Item.Cause == string(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Skipped) MinimumPrintable() any {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the human-readable names of properties of a skipped Item
|
||||||
|
// for printing out to a terminal.
|
||||||
|
func (s Skipped) Headers() []string {
|
||||||
|
return []string{"Action", "Type", "Name", "Container", "Cause"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values populates the printable values matching the Headers list.
|
||||||
|
func (s Skipped) Values() []string {
|
||||||
|
var cn string
|
||||||
|
|
||||||
|
acn, ok := s.Item.Additional[AddtlContainerName]
|
||||||
|
if ok {
|
||||||
|
str, ok := acn.(string)
|
||||||
|
if ok {
|
||||||
|
cn = str
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{"Skip", s.Item.Type.Printable(), s.Item.Name, cn, s.Item.Cause}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerSkip produces a Container-kind Item for tracking skipped items.
|
||||||
|
func ContainerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
||||||
|
return itemSkip(ContainerType, cause, namespace, id, name, addtl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSkip produces a File-kind Item for tracking skipped items.
|
||||||
|
func FileSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
||||||
|
return itemSkip(FileType, cause, namespace, id, name, addtl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnwerSkip produces a ResourceOwner-kind Item for tracking skipped items.
|
||||||
|
func OwnerSkip(cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
||||||
|
return itemSkip(ResourceOwnerType, cause, namespace, id, name, addtl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// itemSkip produces a Item of the provided type for tracking skipped items.
|
||||||
|
func itemSkip(t ItemType, cause skipCause, namespace, id, name string, addtl map[string]any) *Skipped {
|
||||||
|
return &Skipped{
|
||||||
|
Item: Item{
|
||||||
|
Namespace: namespace,
|
||||||
|
ID: id,
|
||||||
|
Name: name,
|
||||||
|
Type: t,
|
||||||
|
Cause: string(cause),
|
||||||
|
Additional: addtl,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
146
src/pkg/fault/skipped_test.go
Normal file
146
src/pkg/fault/skipped_test.go
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
package fault_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SkippedUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSkippedUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &SkippedUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SkippedUnitSuite) TestSkipped_String() {
|
||||||
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
i *fault.Skipped
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.Contains(t, i.String(), "nil")
|
||||||
|
|
||||||
|
i = &fault.Skipped{fault.Item{}}
|
||||||
|
assert.Contains(t, i.String(), "unknown type")
|
||||||
|
|
||||||
|
i = &fault.Skipped{
|
||||||
|
fault.Item{
|
||||||
|
Type: fault.FileType,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Contains(t, i.Item.Error(), fault.FileType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SkippedUnitSuite) TestContainerSkip() {
|
||||||
|
t := suite.T()
|
||||||
|
addtl := map[string]any{"foo": "bar"}
|
||||||
|
i := fault.ContainerSkip(fault.SkipMalware, "ns", "id", "name", addtl)
|
||||||
|
|
||||||
|
expect := fault.Item{
|
||||||
|
Namespace: "ns",
|
||||||
|
ID: "id",
|
||||||
|
Name: "name",
|
||||||
|
Type: fault.ContainerType,
|
||||||
|
Cause: string(fault.SkipMalware),
|
||||||
|
Additional: addtl,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, fault.Skipped{expect}, *i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SkippedUnitSuite) TestFileSkip() {
|
||||||
|
t := suite.T()
|
||||||
|
addtl := map[string]any{"foo": "bar"}
|
||||||
|
i := fault.FileSkip(fault.SkipMalware, "ns", "id", "name", addtl)
|
||||||
|
|
||||||
|
expect := fault.Item{
|
||||||
|
Namespace: "ns",
|
||||||
|
ID: "id",
|
||||||
|
Name: "name",
|
||||||
|
Type: fault.FileType,
|
||||||
|
Cause: string(fault.SkipMalware),
|
||||||
|
Additional: addtl,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, fault.Skipped{expect}, *i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SkippedUnitSuite) TestOwnerSkip() {
|
||||||
|
t := suite.T()
|
||||||
|
addtl := map[string]any{"foo": "bar"}
|
||||||
|
i := fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", addtl)
|
||||||
|
|
||||||
|
expect := fault.Item{
|
||||||
|
Namespace: "ns",
|
||||||
|
ID: "id",
|
||||||
|
Name: "name",
|
||||||
|
Type: fault.ResourceOwnerType,
|
||||||
|
Cause: string(fault.SkipMalware),
|
||||||
|
Additional: addtl,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, fault.Skipped{expect}, *i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SkippedUnitSuite) TestSkipped_HeadersValues() {
|
||||||
|
addtl := map[string]any{
|
||||||
|
fault.AddtlContainerID: "cid",
|
||||||
|
fault.AddtlContainerName: "cname",
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
skip *fault.Skipped
|
||||||
|
expect []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "file",
|
||||||
|
skip: fault.FileSkip(fault.SkipMalware, "ns", "id", "name", addtl),
|
||||||
|
expect: []string{"Skip", fault.FileType.Printable(), "name", "cname", string(fault.SkipMalware)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "container",
|
||||||
|
skip: fault.ContainerSkip(fault.SkipMalware, "ns", "id", "name", addtl),
|
||||||
|
expect: []string{"Skip", fault.ContainerType.Printable(), "name", "cname", string(fault.SkipMalware)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "owner",
|
||||||
|
skip: fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil),
|
||||||
|
expect: []string{"Skip", fault.ResourceOwnerType.Printable(), "name", "", string(fault.SkipMalware)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
assert.Equal(t, []string{"Action", "Type", "Name", "Container", "Cause"}, test.skip.Headers())
|
||||||
|
assert.Equal(t, test.expect, test.skip.Values())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SkippedUnitSuite) TestBus_AddSkip() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
n := fault.New(true)
|
||||||
|
require.NotNil(t, n)
|
||||||
|
|
||||||
|
n.Fail(assert.AnError)
|
||||||
|
assert.Len(t, n.Skipped(), 0)
|
||||||
|
|
||||||
|
n.AddRecoverable(ctx, assert.AnError)
|
||||||
|
assert.Len(t, n.Skipped(), 0)
|
||||||
|
|
||||||
|
n.AddSkip(ctx, fault.OwnerSkip(fault.SkipMalware, "ns", "id", "name", nil))
|
||||||
|
assert.Len(t, n.Skipped(), 1)
|
||||||
|
}
|
||||||
@ -697,7 +697,7 @@ func (s ExchangeScope) IncludesCategory(cat exchangeCategory) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s ExchangeScope) IsAny(cat exchangeCategory) bool {
|
func (s ExchangeScope) IsAny(cat exchangeCategory) bool {
|
||||||
return isAnyTarget(s, cat)
|
return IsAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
@ -699,7 +699,7 @@ func (s GroupsScope) IncludesCategory(cat groupsCategory) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s GroupsScope) IsAny(cat groupsCategory) bool {
|
func (s GroupsScope) IsAny(cat groupsCategory) bool {
|
||||||
return isAnyTarget(s, cat)
|
return IsAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
@ -484,7 +484,7 @@ func (s OneDriveScope) Matches(cat oneDriveCategory, target string) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s OneDriveScope) IsAny(cat oneDriveCategory) bool {
|
func (s OneDriveScope) IsAny(cat oneDriveCategory) bool {
|
||||||
return isAnyTarget(s, cat)
|
return IsAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
@ -694,7 +694,7 @@ func matchesPathValues[T scopeT, C categoryT](
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if isAnyTarget(sc, cc) {
|
if IsAnyTarget(sc, cc) {
|
||||||
// continue, not return: all path keys must match the entry to succeed
|
// continue, not return: all path keys must match the entry to succeed
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -795,7 +795,7 @@ func isNoneTarget[T scopeT, C categoryT](s T, cat C) bool {
|
|||||||
|
|
||||||
// returns true if the category is included in the scope's category type,
|
// returns true if the category is included in the scope's category type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func isAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
func IsAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||||
if !typeAndCategoryMatches(cat, s.categorizer()) {
|
if !typeAndCategoryMatches(cat, s.categorizer()) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@ -125,14 +125,14 @@ func (suite *SelectorScopesSuite) TestGetCatValue() {
|
|||||||
func (suite *SelectorScopesSuite) TestIsAnyTarget() {
|
func (suite *SelectorScopesSuite) TestIsAnyTarget() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
stub := stubScope("")
|
stub := stubScope("")
|
||||||
assert.True(t, isAnyTarget(stub, rootCatStub))
|
assert.True(t, IsAnyTarget(stub, rootCatStub))
|
||||||
assert.True(t, isAnyTarget(stub, leafCatStub))
|
assert.True(t, IsAnyTarget(stub, leafCatStub))
|
||||||
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
||||||
|
|
||||||
stub = stubScope("none")
|
stub = stubScope("none")
|
||||||
assert.False(t, isAnyTarget(stub, rootCatStub))
|
assert.False(t, IsAnyTarget(stub, rootCatStub))
|
||||||
assert.False(t, isAnyTarget(stub, leafCatStub))
|
assert.False(t, IsAnyTarget(stub, leafCatStub))
|
||||||
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
||||||
}
|
}
|
||||||
|
|
||||||
var reduceTestTable = []struct {
|
var reduceTestTable = []struct {
|
||||||
|
|||||||
@ -625,7 +625,7 @@ func (s SharePointScope) IncludesCategory(cat sharePointCategory) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s SharePointScope) IsAny(cat sharePointCategory) bool {
|
func (s SharePointScope) IsAny(cat sharePointCategory) bool {
|
||||||
return isAnyTarget(s, cat)
|
return IsAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
@ -101,7 +101,7 @@ func idAnd(ss ...string) []string {
|
|||||||
// exported
|
// exported
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
func DriveItemSelectDefault() []string {
|
func DefaultDriveItemProps() []string {
|
||||||
return idAnd(
|
return idAnd(
|
||||||
"content.downloadUrl",
|
"content.downloadUrl",
|
||||||
"createdBy",
|
"createdBy",
|
||||||
|
|||||||
@ -1,11 +0,0 @@
|
|||||||
package api
|
|
||||||
|
|
||||||
// DeltaUpdate holds the results of a current delta token. It normally
|
|
||||||
// gets produced when aggregating the addition and removal of items in
|
|
||||||
// a delta-queryable folder.
|
|
||||||
type DeltaUpdate struct {
|
|
||||||
// the deltaLink itself
|
|
||||||
URL string
|
|
||||||
// true if the old delta was marked as invalid
|
|
||||||
Reset bool
|
|
||||||
}
|
|
||||||
@ -351,6 +351,10 @@ func (c Drives) PostItemLinkShareUpdate(
|
|||||||
return itm, nil
|
return itm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// helper funcs
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// DriveItemCollisionKeyy constructs a key from the item name.
|
// DriveItemCollisionKeyy constructs a key from the item name.
|
||||||
// collision keys are used to identify duplicate item conflicts for handling advanced restoration config.
|
// collision keys are used to identify duplicate item conflicts for handling advanced restoration config.
|
||||||
func DriveItemCollisionKey(item models.DriveItemable) string {
|
func DriveItemCollisionKey(item models.DriveItemable) string {
|
||||||
@ -360,3 +364,17 @@ func DriveItemCollisionKey(item models.DriveItemable) string {
|
|||||||
|
|
||||||
return ptr.Val(item.GetName())
|
return ptr.Val(item.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewDriveItem initializes a `models.DriveItemable` with either a folder or file entry.
|
||||||
|
func NewDriveItem(name string, folder bool) *models.DriveItem {
|
||||||
|
itemToCreate := models.NewDriveItem()
|
||||||
|
itemToCreate.SetName(&name)
|
||||||
|
|
||||||
|
if folder {
|
||||||
|
itemToCreate.SetFolder(models.NewFolder())
|
||||||
|
} else {
|
||||||
|
itemToCreate.SetFile(models.NewFile())
|
||||||
|
}
|
||||||
|
|
||||||
|
return itemToCreate
|
||||||
|
}
|
||||||
|
|||||||
@ -15,6 +15,11 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type DriveItemIDType struct {
|
||||||
|
ItemID string
|
||||||
|
IsFolder bool
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// non-delta item pager
|
// non-delta item pager
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -65,11 +70,6 @@ func (p *driveItemPageCtrl) ValidModTimes() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
type DriveItemIDType struct {
|
|
||||||
ItemID string
|
|
||||||
IsFolder bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Drives) GetItemsInContainerByCollisionKey(
|
func (c Drives) GetItemsInContainerByCollisionKey(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
driveID, containerID string,
|
driveID, containerID string,
|
||||||
@ -131,9 +131,9 @@ type DriveItemDeltaPageCtrl struct {
|
|||||||
options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration
|
options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Drives) NewDriveItemDeltaPager(
|
func (c Drives) newDriveItemDeltaPager(
|
||||||
driveID, link string,
|
driveID, prevDeltaLink string,
|
||||||
selectFields []string,
|
selectProps ...string,
|
||||||
) *DriveItemDeltaPageCtrl {
|
) *DriveItemDeltaPageCtrl {
|
||||||
preferHeaderItems := []string{
|
preferHeaderItems := []string{
|
||||||
"deltashowremovedasdeleted",
|
"deltashowremovedasdeleted",
|
||||||
@ -142,28 +142,32 @@ func (c Drives) NewDriveItemDeltaPager(
|
|||||||
"hierarchicalsharing",
|
"hierarchicalsharing",
|
||||||
}
|
}
|
||||||
|
|
||||||
requestConfig := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{
|
options := &drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration{
|
||||||
Headers: newPreferHeaders(preferHeaderItems...),
|
Headers: newPreferHeaders(preferHeaderItems...),
|
||||||
QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{
|
QueryParameters: &drives.ItemItemsItemDeltaRequestBuilderGetQueryParameters{},
|
||||||
Select: selectFields,
|
}
|
||||||
},
|
|
||||||
|
if len(selectProps) > 0 {
|
||||||
|
options.QueryParameters.Select = selectProps
|
||||||
|
}
|
||||||
|
|
||||||
|
builder := c.Stable.
|
||||||
|
Client().
|
||||||
|
Drives().
|
||||||
|
ByDriveId(driveID).
|
||||||
|
Items().
|
||||||
|
ByDriveItemId(onedrive.RootID).
|
||||||
|
Delta()
|
||||||
|
|
||||||
|
if len(prevDeltaLink) > 0 {
|
||||||
|
builder = drives.NewItemItemsItemDeltaRequestBuilder(prevDeltaLink, c.Stable.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
res := &DriveItemDeltaPageCtrl{
|
res := &DriveItemDeltaPageCtrl{
|
||||||
gs: c.Stable,
|
gs: c.Stable,
|
||||||
driveID: driveID,
|
driveID: driveID,
|
||||||
options: requestConfig,
|
options: options,
|
||||||
builder: c.Stable.
|
builder: builder,
|
||||||
Client().
|
|
||||||
Drives().
|
|
||||||
ByDriveId(driveID).
|
|
||||||
Items().
|
|
||||||
ByDriveItemId(onedrive.RootID).
|
|
||||||
Delta(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(link) > 0 {
|
|
||||||
res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, c.Stable.Adapter())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
@ -193,6 +197,27 @@ func (p *DriveItemDeltaPageCtrl) ValidModTimes() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnumerateDriveItems will enumerate all items in the specified drive and hand them to the
|
||||||
|
// provided `collector` method
|
||||||
|
func (c Drives) EnumerateDriveItemsDelta(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID string,
|
||||||
|
prevDeltaLink string,
|
||||||
|
) (
|
||||||
|
[]models.DriveItemable,
|
||||||
|
DeltaUpdate,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
pager := c.newDriveItemDeltaPager(driveID, prevDeltaLink, DefaultDriveItemProps()...)
|
||||||
|
|
||||||
|
items, du, err := deltaEnumerateItems[models.DriveItemable](ctx, pager, prevDeltaLink)
|
||||||
|
if err != nil {
|
||||||
|
return nil, du, clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, du, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// user's drives pager
|
// user's drives pager
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -178,3 +178,18 @@ func (suite *DrivePagerIntgSuite) TestDrives_GetItemIDsInContainer() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *DrivePagerIntgSuite) TestEnumerateDriveItems() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
items, du, err := suite.its.
|
||||||
|
ac.
|
||||||
|
Drives().
|
||||||
|
EnumerateDriveItemsDelta(ctx, suite.its.user.driveID, "")
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
require.NotEmpty(t, items, "no items found in user's drive")
|
||||||
|
assert.NotEmpty(t, du.URL, "should have a delta link")
|
||||||
|
}
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DriveAPIIntgSuite struct {
|
type DriveAPIIntgSuite struct {
|
||||||
@ -50,20 +51,6 @@ func (suite *DriveAPIIntgSuite) TestDrives_CreatePagerAndGetPage() {
|
|||||||
assert.NotNil(t, a)
|
assert.NotNil(t, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
|
|
||||||
func newItem(name string, folder bool) *models.DriveItem {
|
|
||||||
itemToCreate := models.NewDriveItem()
|
|
||||||
itemToCreate.SetName(&name)
|
|
||||||
|
|
||||||
if folder {
|
|
||||||
itemToCreate.SetFolder(models.NewFolder())
|
|
||||||
} else {
|
|
||||||
itemToCreate.SetFile(models.NewFile())
|
|
||||||
}
|
|
||||||
|
|
||||||
return itemToCreate
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() {
|
func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
@ -78,12 +65,12 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() {
|
|||||||
ctx,
|
ctx,
|
||||||
suite.its.user.driveID,
|
suite.its.user.driveID,
|
||||||
suite.its.user.driveRootFolderID,
|
suite.its.user.driveRootFolderID,
|
||||||
newItem(rc.Location, true),
|
api.NewDriveItem(rc.Location, true),
|
||||||
control.Replace)
|
control.Replace)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// generate a folder to use for collision testing
|
// generate a folder to use for collision testing
|
||||||
folder := newItem("collision", true)
|
folder := api.NewDriveItem("collision", true)
|
||||||
origFolder, err := acd.PostItemInContainer(
|
origFolder, err := acd.PostItemInContainer(
|
||||||
ctx,
|
ctx,
|
||||||
suite.its.user.driveID,
|
suite.its.user.driveID,
|
||||||
@ -93,7 +80,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer() {
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// generate an item to use for collision testing
|
// generate an item to use for collision testing
|
||||||
file := newItem("collision.txt", false)
|
file := api.NewDriveItem("collision.txt", false)
|
||||||
origFile, err := acd.PostItemInContainer(
|
origFile, err := acd.PostItemInContainer(
|
||||||
ctx,
|
ctx,
|
||||||
suite.its.user.driveID,
|
suite.its.user.driveID,
|
||||||
@ -241,7 +228,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr
|
|||||||
ctx,
|
ctx,
|
||||||
suite.its.user.driveID,
|
suite.its.user.driveID,
|
||||||
suite.its.user.driveRootFolderID,
|
suite.its.user.driveRootFolderID,
|
||||||
newItem(rc.Location, true),
|
api.NewDriveItem(rc.Location, true),
|
||||||
// skip instead of replace here to get
|
// skip instead of replace here to get
|
||||||
// an ErrItemAlreadyExistsConflict, just in case.
|
// an ErrItemAlreadyExistsConflict, just in case.
|
||||||
control.Skip)
|
control.Skip)
|
||||||
@ -249,7 +236,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr
|
|||||||
|
|
||||||
// generate items within that folder
|
// generate items within that folder
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
file := newItem(fmt.Sprintf("collision_%d.txt", i), false)
|
file := api.NewDriveItem(fmt.Sprintf("collision_%d.txt", i), false)
|
||||||
f, err := acd.PostItemInContainer(
|
f, err := acd.PostItemInContainer(
|
||||||
ctx,
|
ctx,
|
||||||
suite.its.user.driveID,
|
suite.its.user.driveID,
|
||||||
@ -265,7 +252,7 @@ func (suite *DriveAPIIntgSuite) TestDrives_PostItemInContainer_replaceFolderRegr
|
|||||||
ctx,
|
ctx,
|
||||||
suite.its.user.driveID,
|
suite.its.user.driveID,
|
||||||
ptr.Val(folder.GetParentReference().GetId()),
|
ptr.Val(folder.GetParentReference().GetId()),
|
||||||
newItem(rc.Location, true),
|
api.NewDriveItem(rc.Location, true),
|
||||||
control.Replace)
|
control.Replace)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotEmpty(t, ptr.Val(resultFolder.GetId()))
|
require.NotEmpty(t, ptr.Val(resultFolder.GetId()))
|
||||||
|
|||||||
@ -13,6 +13,20 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// common structs
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// DeltaUpdate holds the results of a current delta token. It normally
|
||||||
|
// gets produced when aggregating the addition and removal of items in
|
||||||
|
// a delta-queryable folder.
|
||||||
|
type DeltaUpdate struct {
|
||||||
|
// the deltaLink itself
|
||||||
|
URL string
|
||||||
|
// true if the old delta was marked as invalid
|
||||||
|
Reset bool
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// common interfaces
|
// common interfaces
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -32,10 +32,11 @@ func (dnl *DeltaNextLinkValues[T]) GetOdataDeltaLink() *string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PagerResult[T any] struct {
|
type PagerResult[T any] struct {
|
||||||
Values []T
|
Values []T
|
||||||
NextLink *string
|
NextLink *string
|
||||||
DeltaLink *string
|
DeltaLink *string
|
||||||
Err error
|
ResetDelta bool
|
||||||
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -33,3 +33,5 @@ Below is a list of known Corso issues and limitations:
|
|||||||
* Teams messages don't support Restore due to limited Graph API support for message creation.
|
* Teams messages don't support Restore due to limited Graph API support for message creation.
|
||||||
|
|
||||||
* Groups and Teams support is available in an early-access status, and may be subject to breaking changes.
|
* Groups and Teams support is available in an early-access status, and may be subject to breaking changes.
|
||||||
|
|
||||||
|
* Restoring the data into a different Group from the one it was backed up from isn't currently supported
|
||||||
|
|||||||
14
website/package-lock.json
generated
14
website/package-lock.json
generated
@ -20,7 +20,7 @@
|
|||||||
"feather-icons": "^4.29.1",
|
"feather-icons": "^4.29.1",
|
||||||
"jarallax": "^2.1.4",
|
"jarallax": "^2.1.4",
|
||||||
"mdx-mermaid": "^1.3.2",
|
"mdx-mermaid": "^1.3.2",
|
||||||
"mermaid": "^10.4.0",
|
"mermaid": "^10.5.0",
|
||||||
"prism-react-renderer": "^1.3.5",
|
"prism-react-renderer": "^1.3.5",
|
||||||
"react": "^17.0.2",
|
"react": "^17.0.2",
|
||||||
"react-dom": "^17.0.2",
|
"react-dom": "^17.0.2",
|
||||||
@ -9363,9 +9363,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/mermaid": {
|
"node_modules/mermaid": {
|
||||||
"version": "10.4.0",
|
"version": "10.5.0",
|
||||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.4.0.tgz",
|
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.5.0.tgz",
|
||||||
"integrity": "sha512-4QCQLp79lvz7UZxow5HUX7uWTPJOaQBVExduo91tliXC7v78i6kssZOPHxLL+Xs30KU72cpPn3g3imw/xm/gaw==",
|
"integrity": "sha512-9l0o1uUod78D3/FVYPGSsgV+Z0tSnzLBDiC9rVzvelPxuO80HbN1oDr9ofpPETQy9XpypPQa26fr09VzEPfvWA==",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@braintree/sanitize-url": "^6.0.1",
|
"@braintree/sanitize-url": "^6.0.1",
|
||||||
"@types/d3-scale": "^4.0.3",
|
"@types/d3-scale": "^4.0.3",
|
||||||
@ -21895,9 +21895,9 @@
|
|||||||
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
|
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
|
||||||
},
|
},
|
||||||
"mermaid": {
|
"mermaid": {
|
||||||
"version": "10.4.0",
|
"version": "10.5.0",
|
||||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.4.0.tgz",
|
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.5.0.tgz",
|
||||||
"integrity": "sha512-4QCQLp79lvz7UZxow5HUX7uWTPJOaQBVExduo91tliXC7v78i6kssZOPHxLL+Xs30KU72cpPn3g3imw/xm/gaw==",
|
"integrity": "sha512-9l0o1uUod78D3/FVYPGSsgV+Z0tSnzLBDiC9rVzvelPxuO80HbN1oDr9ofpPETQy9XpypPQa26fr09VzEPfvWA==",
|
||||||
"requires": {
|
"requires": {
|
||||||
"@braintree/sanitize-url": "^6.0.1",
|
"@braintree/sanitize-url": "^6.0.1",
|
||||||
"@types/d3-scale": "^4.0.3",
|
"@types/d3-scale": "^4.0.3",
|
||||||
|
|||||||
@ -26,7 +26,7 @@
|
|||||||
"feather-icons": "^4.29.1",
|
"feather-icons": "^4.29.1",
|
||||||
"jarallax": "^2.1.4",
|
"jarallax": "^2.1.4",
|
||||||
"mdx-mermaid": "^1.3.2",
|
"mdx-mermaid": "^1.3.2",
|
||||||
"mermaid": "^10.4.0",
|
"mermaid": "^10.5.0",
|
||||||
"prism-react-renderer": "^1.3.5",
|
"prism-react-renderer": "^1.3.5",
|
||||||
"react": "^17.0.2",
|
"react": "^17.0.2",
|
||||||
"react-dom": "^17.0.2",
|
"react-dom": "^17.0.2",
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user