merge commit

This commit is contained in:
neha-Gupta1 2023-10-11 12:25:10 +05:30
commit 50d8e1af6b
130 changed files with 2711 additions and 1976 deletions

View File

@ -7,6 +7,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] (beta)
### Added
- Skips graph calls for expired item download URLs.
- Export operation now shows the stats at the end of the run
### Fixed
- Catch and report cases where a protected resource is locked out of access. SDK consumers have a new errs sentinel that allows them to check for this case.
## [v0.14.0] (beta) - 2023-10-09
### Added
- Enables local or network-attached storage for Corso repositories.
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
@ -14,17 +23,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
- Backup now includes all sites that belongs to a team, not just the root site.
## Fixed
### Fixed
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.
### Known issues
- Restoring the data into a different Group from the one it was backed up from is not currently supported
### Other
- Groups and Teams service support is still in feature preview
## [v0.13.0] (beta) - 2023-09-18
### Added
- Groups and Teams service support available as a feature preview! Channel messages and Files are now available for backup and restore in the CLI: `corso backup create groups --group '*'`
* The cli commands for "groups" and "teams" can be used interchangably, and will operate on the same backup data.
* New permissions are required to backup Channel messages. See the [Corso Documentation](https://corsobackup.io/docs/setup/m365-access/#configure-required-permissions) for complete details.
- The cli commands for "groups" and "teams" can be used interchangeably, and will operate on the same backup data.
- New permissions are required to backup Channel messages. See the [Corso Documentation](https://corsobackup.io/docs/setup/m365-access/#configure-required-permissions) for complete details.
Even though Channel message restoration is not available, message write permissions are included to cover future integration.
* This is a feature preview, and may be subject to breaking changes based on feedback and testing.
- This is a feature preview, and may be subject to breaking changes based on feedback and testing.
### Changed
- Switched to Go 1.21
@ -379,7 +394,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Miscellaneous
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
[Unreleased]: https://github.com/alcionai/corso/compare/v0.11.1...HEAD
[Unreleased]: https://github.com/alcionai/corso/compare/v0.14.0...HEAD
[v0.14.0]: https://github.com/alcionai/corso/compare/v0.13.0...v0.14.0
[v0.13.0]: https://github.com/alcionai/corso/compare/v0.12.0...v0.13.0
[v0.12.0]: https://github.com/alcionai/corso/compare/v0.11.1...v0.12.0
[v0.11.1]: https://github.com/alcionai/corso/compare/v0.11.0...v0.11.1
[v0.11.0]: https://github.com/alcionai/corso/compare/v0.10.0...v0.11.0
[v0.10.0]: https://github.com/alcionai/corso/compare/v0.9.0...v0.10.0

View File

@ -317,6 +317,7 @@ func genericListCommand(
b.Print(ctx)
fe.PrintItems(
ctx,
!ifShow(flags.ListAlertsFV),
!ifShow(flags.ListFailedItemsFV),
!ifShow(flags.ListSkippedItemsFV),
!ifShow(flags.ListRecoveredErrorsFV))

View File

@ -17,7 +17,6 @@ import (
"github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/repo"
"github.com/alcionai/corso/src/cli/restore"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/logger"
@ -61,43 +60,6 @@ func preRun(cc *cobra.Command, args []string) error {
print.Infof(ctx, "Logging to file: %s", logger.ResolvedLogFile)
}
avoidTheseDescription := []string{
"Initialize a repository.",
"Initialize a S3 repository",
"Connect to a S3 repository",
"Initialize a repository on local or network storage.",
"Connect to a repository on local or network storage.",
"Help about any command",
"Free, Secure, Open-Source Backup for M365.",
"env var guide",
}
if !slices.Contains(avoidTheseDescription, cc.Short) {
provider, overrides, err := utils.GetStorageProviderAndOverrides(ctx, cc)
if err != nil {
return err
}
cfg, err := config.GetConfigRepoDetails(
ctx,
provider,
true,
false,
overrides)
if err != nil {
log.Error("Error while getting config info to run command: ", cc.Use)
return err
}
utils.SendStartCorsoEvent(
ctx,
cfg.Storage,
cfg.Account.ID(),
map[string]any{"command": cc.CommandPath()},
cfg.RepoID,
utils.Control())
}
// handle deprecated user flag in Backup exchange command
if cc.CommandPath() == "corso backup create exchange" {
handleMailBoxFlag(ctx, cc, flagSl)

View File

@ -5,6 +5,7 @@ import (
"errors"
"github.com/alcionai/clues"
"github.com/dustin/go-humanize"
"github.com/spf13/cobra"
"github.com/alcionai/corso/src/cli/flags"
@ -110,5 +111,14 @@ func runExport(
return Only(ctx, err)
}
stats := eo.GetStats()
if len(stats) > 0 {
Infof(ctx, "\nExport details")
}
for k, s := range stats {
Infof(ctx, "%s: %d items (%s)", k.HumanString(), s.ResourceCount, humanize.Bytes(uint64(s.BytesRead)))
}
return nil
}

View File

@ -8,6 +8,7 @@ func AddAllBackupListFlags(cmd *cobra.Command) {
AddFailedItemsFN(cmd)
AddSkippedItemsFN(cmd)
AddRecoveredErrorsFN(cmd)
AddAlertsFN(cmd)
}
func AddFailedItemsFN(cmd *cobra.Command) {
@ -27,3 +28,9 @@ func AddRecoveredErrorsFN(cmd *cobra.Command) {
&ListRecoveredErrorsFV, RecoveredErrorsFN, Show,
"Toggles showing or hiding the list of errors which Corso recovered from.")
}
func AddAlertsFN(cmd *cobra.Command) {
cmd.Flags().StringVar(
&ListAlertsFV, AlertsFN, Show,
"Toggles showing or hiding the list of alerts produced during the operation.")
}

View File

@ -5,6 +5,7 @@ import (
)
const (
AlertsFN = "alerts"
DeltaPageSizeFN = "delta-page-size"
DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
DisableDeltaFN = "disable-delta"
@ -31,6 +32,7 @@ var (
EnableImmutableIDFV bool
FailFastFV bool
FetchParallelismFV int
ListAlertsFV string
ListFailedItemsFV string
ListSkippedItemsFV string
ListRecoveredErrorsFV string

View File

@ -19,7 +19,7 @@ var (
)
// AddRestoreConfigFlags adds the restore config flag set.
func AddRestoreConfigFlags(cmd *cobra.Command) {
func AddRestoreConfigFlags(cmd *cobra.Command, canRestoreToAlternate bool) {
fs := cmd.Flags()
fs.StringVar(
&CollisionsFV, CollisionsFN, string(control.Skip),
@ -28,7 +28,10 @@ func AddRestoreConfigFlags(cmd *cobra.Command) {
fs.StringVar(
&DestinationFV, DestinationFN, "",
"Overrides the folder where items get restored; '/' places items into their original location")
fs.StringVar(
&ToResourceFV, ToResourceFN, "",
"Overrides the protected resource (mailbox, site, user, etc) where data gets restored")
if canRestoreToAlternate {
fs.StringVar(
&ToResourceFV, ToResourceFN, "",
"Overrides the protected resource (mailbox, site, user, etc) where data gets restored")
}
}

View File

@ -11,6 +11,7 @@ import (
func PreparedBackupListFlags() []string {
return []string{
"--" + flags.AlertsFN, flags.Show,
"--" + flags.FailedItemsFN, flags.Show,
"--" + flags.SkippedItemsFN, flags.Show,
"--" + flags.RecoveredErrorsFN, flags.Show,
@ -18,6 +19,7 @@ func PreparedBackupListFlags() []string {
}
func AssertBackupListFlags(t *testing.T, cmd *cobra.Command) {
assert.Equal(t, flags.Show, flags.ListAlertsFV)
assert.Equal(t, flags.Show, flags.ListFailedItemsFV)
assert.Equal(t, flags.Show, flags.ListSkippedItemsFV)
assert.Equal(t, flags.Show, flags.ListRecoveredErrorsFV)

View File

@ -87,15 +87,6 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
// Retention is not supported for filesystem repos.
retentionOpts := ctrlRepo.Retention{}
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
utils.SendStartCorsoEvent(
ctx,
cfg.Storage,
cfg.Account.ID(),
map[string]any{"command": "init repo"},
cfg.Account.ID(),
opt)
storageCfg, err := cfg.Storage.ToFilesystemConfig()
if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving filesystem configuration"))

View File

@ -102,15 +102,6 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
return Only(ctx, err)
}
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
utils.SendStartCorsoEvent(
ctx,
cfg.Storage,
cfg.Account.ID(),
map[string]any{"command": "init repo"},
cfg.Account.ID(),
opt)
s3Cfg, err := cfg.Storage.ToS3Config()
if err != nil {
return Only(ctx, clues.Wrap(err, "Retrieving s3 configuration"))

View File

@ -28,7 +28,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
flags.AddBackupIDFlag(c, true)
flags.AddExchangeDetailsAndRestoreFlags(c)
flags.AddRestoreConfigFlags(c)
flags.AddRestoreConfigFlags(c, true)
flags.AddFailFastFlag(c)
}

View File

@ -30,7 +30,7 @@ func addGroupsCommands(cmd *cobra.Command) *cobra.Command {
flags.AddNoPermissionsFlag(c)
flags.AddSharePointDetailsAndRestoreFlags(c) // for sp restores
flags.AddSiteIDFlag(c)
flags.AddRestoreConfigFlags(c)
flags.AddRestoreConfigFlags(c, false)
flags.AddFailFastFlag(c)
}

View File

@ -65,7 +65,7 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
"--" + flags.PageFolderFN, flagsTD.FlgInputs(flagsTD.PageFolderInput),
"--" + flags.CollisionsFN, flagsTD.Collisions,
"--" + flags.DestinationFN, flagsTD.Destination,
"--" + flags.ToResourceFN, flagsTD.ToResource,
// "--" + flags.ToResourceFN, flagsTD.ToResource,
"--" + flags.NoPermissionsFN,
},
flagsTD.PreparedProviderFlags(),
@ -91,7 +91,7 @@ func (suite *GroupsUnitSuite) TestAddGroupsCommands() {
assert.Equal(t, flagsTD.FileModifiedBeforeInput, opts.FileModifiedBefore)
assert.Equal(t, flagsTD.Collisions, opts.RestoreCfg.Collisions)
assert.Equal(t, flagsTD.Destination, opts.RestoreCfg.Destination)
assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
// assert.Equal(t, flagsTD.ToResource, opts.RestoreCfg.ProtectedResource)
assert.True(t, flags.NoPermissionsFV)
flagsTD.AssertProviderFlags(t, cmd)
flagsTD.AssertStorageFlags(t, cmd)

View File

@ -29,7 +29,7 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
flags.AddBackupIDFlag(c, true)
flags.AddOneDriveDetailsAndRestoreFlags(c)
flags.AddNoPermissionsFlag(c)
flags.AddRestoreConfigFlags(c)
flags.AddRestoreConfigFlags(c, true)
flags.AddFailFastFlag(c)
}

View File

@ -29,7 +29,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
flags.AddBackupIDFlag(c, true)
flags.AddSharePointDetailsAndRestoreFlags(c)
flags.AddNoPermissionsFlag(c)
flags.AddRestoreConfigFlags(c)
flags.AddRestoreConfigFlags(c, true)
flags.AddFailFastFlag(c)
}

View File

@ -239,24 +239,6 @@ func splitFoldersIntoContainsAndPrefix(folders []string) ([]string, []string) {
return containsFolders, prefixFolders
}
// SendStartCorsoEvent utility sends corso start event at start of each action
func SendStartCorsoEvent(
ctx context.Context,
s storage.Storage,
tenID string,
data map[string]any,
repoID string,
opts control.Options,
) {
bus, err := events.NewBus(ctx, s, tenID, opts)
if err != nil {
logger.CtxErr(ctx, err).Info("sending start event")
}
bus.SetRepoID(repoID)
bus.Event(ctx, events.CorsoStart, data)
}
// GetStorageProviderAndOverrides returns the storage provider type and
// any flags specified on the command line which are storage provider specific.
func GetStorageProviderAndOverrides(

View File

@ -120,7 +120,7 @@ func generateAndRestoreItems(
func getControllerAndVerifyResourceOwner(
ctx context.Context,
resourceOwner string,
protectedResource string,
pst path.ServiceType,
) (
*m365.Controller,
@ -150,12 +150,12 @@ func getControllerAndVerifyResourceOwner(
return nil, account.Account{}, nil, clues.Wrap(err, "connecting to graph api")
}
id, _, err := ctrl.PopulateProtectedResourceIDAndName(ctx, resourceOwner, nil)
pr, err := ctrl.PopulateProtectedResourceIDAndName(ctx, protectedResource, nil)
if err != nil {
return nil, account.Account{}, nil, clues.Wrap(err, "verifying user")
}
return ctrl, acct, ctrl.IDNameLookup.ProviderForID(id), nil
return ctrl, acct, pr, nil
}
type item struct {

View File

@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) {
}
}
else {
Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required"
Write-Host "User (for OneDrive) or Site (for Sharepoint) is required"
Exit
}

View File

@ -10,15 +10,16 @@ require (
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-xray-sdk-go v1.8.2
github.com/cenkalti/backoff/v4 v4.2.1
github.com/golang-jwt/jwt/v5 v5.0.0
github.com/google/uuid v1.3.1
github.com/h2non/gock v1.2.0
github.com/kopia/kopia v0.13.0
github.com/microsoft/kiota-abstractions-go v1.2.1
github.com/microsoft/kiota-abstractions-go v1.2.3
github.com/microsoft/kiota-authentication-azure-go v1.0.0
github.com/microsoft/kiota-http-go v1.1.0
github.com/microsoft/kiota-serialization-form-go v1.0.0
github.com/microsoft/kiota-serialization-json-go v1.0.4
github.com/microsoftgraph/msgraph-sdk-go v1.19.0
github.com/microsoftgraph/msgraph-sdk-go v1.20.0
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
github.com/pkg/errors v0.9.1
github.com/puzpuzpuz/xsync/v2 v2.5.1
@ -27,7 +28,7 @@ require (
github.com/spf13/cast v1.5.1
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.16.0
github.com/spf13/viper v1.17.0
github.com/stretchr/testify v1.8.4
github.com/tidwall/pretty v1.2.1
github.com/tomlazar/table v0.1.2
@ -35,7 +36,7 @@ require (
go.uber.org/zap v1.26.0
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/time v0.3.0
golang.org/x/tools v0.13.0
golang.org/x/tools v0.14.0
gotest.tools/v3 v3.5.1
)
@ -46,7 +47,6 @@ require (
github.com/aws/aws-sdk-go v1.45.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
@ -55,14 +55,17 @@ require (
github.com/magiconair/properties v1.8.7 // indirect
github.com/microsoft/kiota-serialization-multipart-go v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.3.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.10.0 // indirect
github.com/std-uritemplate/std-uritemplate/go v0.0.42 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasthttp v1.48.0 // indirect
go.opentelemetry.io/otel/metric v1.18.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
)
require (
@ -74,7 +77,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
github.com/cjlapao/common-go v0.0.39 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dustin/go-humanize v1.0.1
github.com/edsrzf/mmap-go v1.1.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
@ -84,7 +87,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/klauspost/compress v1.17.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/klauspost/reedsolomon v1.11.8 // indirect
@ -103,7 +106,7 @@ require (
github.com/natefinch/atomic v1.0.1 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
@ -115,18 +118,17 @@ require (
github.com/tidwall/gjson v1.15.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
github.com/zeebo/blake3 v0.2.3 // indirect
go.opentelemetry.io/otel v1.18.0 // indirect
go.opentelemetry.io/otel/trace v1.18.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.13.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.15.0
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.12.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.16.0
golang.org/x/sync v0.4.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
google.golang.org/grpc v1.57.0 // indirect
google.golang.org/grpc v1.58.2 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@ -102,8 +102,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE=
github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
@ -246,8 +247,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
@ -287,8 +288,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microsoft/kiota-abstractions-go v1.2.1 h1:TnLF7rjy1GfhuGK2ra/a3Vuz6piFXTR1OfdNoqesagA=
github.com/microsoft/kiota-abstractions-go v1.2.1/go.mod h1:rEeeaytcnal/If3f1tz6/spFz4V+Hiqvz3rxF+oWQFA=
github.com/microsoft/kiota-abstractions-go v1.2.3 h1:ir+p5o/0ytcLunikHSylhYyCm2Ojvoq3pXWSYomOACc=
github.com/microsoft/kiota-abstractions-go v1.2.3/go.mod h1:yPSuzNSOIVQSFFe1iT+3Lu5zmis22E8Wg+bkyjhd+pY=
github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk=
github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw=
github.com/microsoft/kiota-http-go v1.1.0 h1:L5I93EiNtlP/X6YzeTlhjWt7Q1DxzC9CmWSVtX3b0tE=
@ -301,8 +302,8 @@ github.com/microsoft/kiota-serialization-multipart-go v1.0.0 h1:3O5sb5Zj+moLBiJy
github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so=
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
github.com/microsoftgraph/msgraph-sdk-go v1.19.0 h1:hx+SvDTm5ENYZFqmMIskF7tOn48zzT2Xv3OVFrxl2dc=
github.com/microsoftgraph/msgraph-sdk-go v1.19.0/go.mod h1:3DArbqPS7riix0VsJhdtYsgPaAFAH9Jer64psW55riI=
github.com/microsoftgraph/msgraph-sdk-go v1.20.0 h1:Hi8URs+Ll07+GojbY9lyuYUMj8rxI4mcYW+GISO7BTA=
github.com/microsoftgraph/msgraph-sdk-go v1.20.0/go.mod h1:UTUjxLPExc1K+YLmFeyEyep6vYd1GOj2bLMSd7/lPWE=
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY=
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
@ -327,8 +328,8 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
@ -338,8 +339,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
@ -373,26 +375,32 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rudderlabs/analytics-go v3.3.3+incompatible h1:OG0XlKoXfr539e2t1dXtTB+Gr89uFW+OUNQBVhHIIBY=
github.com/rudderlabs/analytics-go v3.3.3+incompatible/go.mod h1:LF8/ty9kUX4PTY3l5c97K3nZZaX5Hwsvt+NBaRL/f30=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ=
github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N++y4=
github.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 h1:lQ3JvmcVO1/AMFbabvUSJ4YtJRpEAX9Qza73p5j03sw=
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1/go.mod h1:4aKqcbhASNqjbrG0h9BmkzcWvPJGxbef4B+j0XfFrZo=
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI=
github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI=
github.com/std-uritemplate/std-uritemplate/go v0.0.42 h1:rG+XlE4drkVWs2NLfGS15N+vg+CUcjXElQKvJ0fctlI=
github.com/std-uritemplate/std-uritemplate/go v0.0.42/go.mod h1:Qov4Ay4U83j37XjgxMYevGJFLbnZ2o9cEOhGufBKgKY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -406,8 +414,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU=
github.com/tg123/go-htpasswd v1.2.1/go.mod h1:erHp1B86KXdwQf1X5ZrLb7erXZnWueEQezb2dql4q58=
github.com/tidwall/gjson v1.15.0 h1:5n/pM+v3r5ujuNl4YLZLsQ+UE5jlkLVm7jMzT5Mpolw=
@ -428,8 +436,6 @@ github.com/vbauerster/mpb/v8 v8.1.6 h1:EswHDkAsy4OQ7QBAmU1MUPz4vHzl6KlINjlh7vJox
github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8=
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -449,12 +455,12 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs=
go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI=
go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ=
go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k=
go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10=
go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@ -470,8 +476,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -508,8 +514,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -546,8 +552,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos=
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -568,8 +574,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -619,8 +625,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -688,8 +694,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -756,8 +762,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -774,8 +780,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -1,8 +1,11 @@
package idname
import (
"context"
"fmt"
"strings"
"github.com/alcionai/clues"
"golang.org/x/exp/maps"
)
@ -21,7 +24,18 @@ type Provider interface {
Name() string
}
var _ Provider = &is{}
type GetResourceIDAndNamer interface {
GetResourceIDAndNameFrom(
ctx context.Context,
owner string,
cacher Cacher,
) (Provider, error)
}
var (
_ Provider = &is{}
_ clues.Concealer = &is{}
)
type is struct {
id string
@ -35,6 +49,24 @@ func NewProvider(id, name string) *is {
func (is is) ID() string { return is.id }
func (is is) Name() string { return is.name }
const isStringTmpl = "{id:%s, name:%s}"
func (is is) PlainString() string {
return fmt.Sprintf(isStringTmpl, clues.Hide(is.id), clues.Hide(is.name))
}
func (is is) Conceal() string {
return fmt.Sprintf(isStringTmpl, clues.Hide(is.id), clues.Hide(is.name))
}
func (is is) String() string {
return is.Conceal()
}
func (is is) Format(fs fmt.State, _ rune) {
fmt.Fprint(fs, is.Conceal())
}
type Cacher interface {
IDOf(name string) (string, bool)
NameOf(id string) (string, bool)

View File

@ -0,0 +1,39 @@
package jwt
import (
"time"
"github.com/alcionai/clues"
jwt "github.com/golang-jwt/jwt/v5"
)
// IsJWTExpired checks if the JWT token is past expiry by analyzing the
// "exp" claim present in the token. Token is considered expired if "exp"
// claim < current time. Missing "exp" claim is considered as non-expired.
// An error is returned if the supplied token is malformed.
func IsJWTExpired(
rawToken string,
) (bool, error) {
p := jwt.NewParser()
// Note: Call to ParseUnverified is intentional since token verification is
// not our objective. We only care about the embed claims in the token.
// We assume the token signature is valid & verified by caller stack.
token, _, err := p.ParseUnverified(rawToken, &jwt.RegisteredClaims{})
if err != nil {
return false, clues.Wrap(err, "invalid jwt")
}
t, err := token.Claims.GetExpirationTime()
if err != nil {
return false, clues.Wrap(err, "getting token expiry time")
}
if t == nil {
return false, nil
}
expired := t.Before(time.Now())
return expired, nil
}

View File

@ -0,0 +1,115 @@
package jwt
import (
"testing"
"time"
jwt "github.com/golang-jwt/jwt/v5"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/tester"
)
type JWTUnitSuite struct {
tester.Suite
}
func TestJWTUnitSuite(t *testing.T) {
suite.Run(t, &JWTUnitSuite{Suite: tester.NewUnitSuite(t)})
}
// createJWTToken creates a JWT token with the specified expiration time.
func createJWTToken(
claims jwt.RegisteredClaims,
) (string, error) {
// build claims from map
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
return token.SignedString([]byte(""))
}
const (
// Raw test token valid for 100 years.
rawToken = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9." +
"eyJuYmYiOiIxNjkxODE5NTc5IiwiZXhwIjoiMzk0NTUyOTE3OSIsImVuZHBvaW50dXJsTGVuZ3RoIjoiMTYw" +
"IiwiaXNsb29wYmFjayI6IlRydWUiLCJ2ZXIiOiJoYXNoZWRwcm9vZnRva2VuIiwicm9sZXMiOiJhbGxmaWxl" +
"cy53cml0ZSBhbGxzaXRlcy5mdWxsY29udHJvbCBhbGxwcm9maWxlcy5yZWFkIiwidHQiOiIxIiwiYWxnIjoi" +
"SFMyNTYifQ" +
".signature"
)
func (suite *JWTUnitSuite) TestIsJWTExpired() {
table := []struct {
name string
expect bool
getToken func() (string, error)
expectErr assert.ErrorAssertionFunc
}{
{
name: "alive token",
getToken: func() (string, error) {
return createJWTToken(
jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Hour)),
})
},
expect: false,
expectErr: assert.NoError,
},
{
name: "expired token",
getToken: func() (string, error) {
return createJWTToken(
jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(-time.Hour)),
})
},
expect: true,
expectErr: assert.NoError,
},
// Test with a raw token which is not generated with go-jwt lib.
{
name: "alive raw token",
getToken: func() (string, error) {
return rawToken, nil
},
expect: false,
expectErr: assert.NoError,
},
{
name: "alive token, missing exp claim",
getToken: func() (string, error) {
return createJWTToken(jwt.RegisteredClaims{})
},
expect: false,
expectErr: assert.NoError,
},
{
name: "malformed token",
getToken: func() (string, error) {
return "header.claims.signature", nil
},
expect: false,
expectErr: assert.Error,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
_, flush := tester.NewContext(t)
defer flush()
token, err := test.getToken()
require.NoError(t, err)
expired, err := IsJWTExpired(token)
test.expectErr(t, err)
assert.Equal(t, test.expect, expired)
})
}
}

View File

@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap {
func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) {
if pm.Empty() {
require.True(t, r.Empty(), "both prefix maps are empty")
require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys())
return
}

View File

@ -0,0 +1,27 @@
package common
import (
"net/url"
"github.com/alcionai/clues"
)
// GetQueryParamFromURL parses an URL and returns value of the specified
// query parameter. In case of multiple occurrences, first one is returned.
func GetQueryParamFromURL(
rawURL, queryParam string,
) (string, error) {
u, err := url.Parse(rawURL)
if err != nil {
return "", clues.Wrap(err, "parsing url")
}
qp := u.Query()
val := qp.Get(queryParam)
if len(val) == 0 {
return "", clues.New("query param not found").With("query_param", queryParam)
}
return val, nil
}

View File

@ -0,0 +1,72 @@
package common_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/tester"
)
type URLUnitSuite struct {
tester.Suite
}
func TestURLUnitSuite(t *testing.T) {
suite.Run(t, &URLUnitSuite{Suite: tester.NewUnitSuite(t)})
}
func (suite *URLUnitSuite) TestGetQueryParamFromURL() {
qp := "tempauth"
table := []struct {
name string
rawURL string
queryParam string
expectedResult string
expect assert.ErrorAssertionFunc
}{
{
name: "valid",
rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val",
queryParam: qp,
expectedResult: "h.c.s",
expect: assert.NoError,
},
{
name: "query param not found",
rawURL: "http://localhost:8080?other=val",
queryParam: qp,
expect: assert.Error,
},
{
name: "empty query param",
rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val",
queryParam: "",
expect: assert.Error,
},
// In case of multiple occurrences, the first occurrence of param is returned.
{
name: "multiple occurrences",
rawURL: "http://localhost:8080?" + qp + "=h.c.s&other=val&" + qp + "=h1.c1.s1",
queryParam: qp,
expectedResult: "h.c.s",
expect: assert.NoError,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
_, flush := tester.NewContext(t)
defer flush()
token, err := common.GetQueryParamFromURL(test.rawURL, test.queryParam)
test.expect(t, err)
assert.Equal(t, test.expectedResult, token)
})
}
}

View File

@ -16,23 +16,23 @@ import (
)
var (
_ Item = &unindexedPrefetchedItem{}
_ ItemModTime = &unindexedPrefetchedItem{}
_ Item = &prefetchedItem{}
_ ItemInfo = &prefetchedItem{}
_ ItemModTime = &prefetchedItem{}
_ Item = &unindexedLazyItem{}
_ ItemModTime = &unindexedLazyItem{}
_ Item = &prefetchedItemWithInfo{}
_ ItemInfo = &prefetchedItemWithInfo{}
_ ItemModTime = &prefetchedItemWithInfo{}
_ Item = &lazyItem{}
_ ItemInfo = &lazyItem{}
_ ItemModTime = &lazyItem{}
_ Item = &lazyItemWithInfo{}
_ ItemInfo = &lazyItemWithInfo{}
_ ItemModTime = &lazyItemWithInfo{}
)
func NewDeletedItem(itemID string) Item {
return &unindexedPrefetchedItem{
return &prefetchedItem{
id: itemID,
deleted: true,
// TODO(ashmrtn): This really doesn't need to be set since deleted items are
@ -42,11 +42,11 @@ func NewDeletedItem(itemID string) Item {
}
}
func NewUnindexedPrefetchedItem(
func NewPrefetchedItem(
reader io.ReadCloser,
itemID string,
modTime time.Time,
) (*unindexedPrefetchedItem, error) {
) (*prefetchedItem, error) {
r, err := readers.NewVersionedBackupReader(
readers.SerializationFormat{Version: readers.DefaultSerializationVersion},
reader)
@ -54,19 +54,18 @@ func NewUnindexedPrefetchedItem(
return nil, clues.Stack(err)
}
return &unindexedPrefetchedItem{
return &prefetchedItem{
id: itemID,
reader: r,
modTime: modTime,
}, nil
}
// unindexedPrefetchedItem represents a single item retrieved from the remote
// service.
// prefetchedItem represents a single item retrieved from the remote service.
//
// This item doesn't implement ItemInfo so it's safe to use for items like
// metadata that shouldn't appear in backup details.
type unindexedPrefetchedItem struct {
type prefetchedItem struct {
id string
reader io.ReadCloser
// modTime is the modified time of the item. It should match the modTime in
@ -79,48 +78,49 @@ type unindexedPrefetchedItem struct {
deleted bool
}
func (i unindexedPrefetchedItem) ID() string {
func (i prefetchedItem) ID() string {
return i.id
}
func (i *unindexedPrefetchedItem) ToReader() io.ReadCloser {
func (i *prefetchedItem) ToReader() io.ReadCloser {
return i.reader
}
func (i unindexedPrefetchedItem) Deleted() bool {
func (i prefetchedItem) Deleted() bool {
return i.deleted
}
func (i unindexedPrefetchedItem) ModTime() time.Time {
func (i prefetchedItem) ModTime() time.Time {
return i.modTime
}
func NewPrefetchedItem(
func NewPrefetchedItemWithInfo(
reader io.ReadCloser,
itemID string,
info details.ItemInfo,
) (*prefetchedItem, error) {
inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified())
) (*prefetchedItemWithInfo, error) {
inner, err := NewPrefetchedItem(reader, itemID, info.Modified())
if err != nil {
return nil, clues.Stack(err)
}
return &prefetchedItem{
unindexedPrefetchedItem: inner,
info: info,
return &prefetchedItemWithInfo{
prefetchedItem: inner,
info: info,
}, nil
}
// prefetchedItem represents a single item retrieved from the remote service.
// prefetchedItemWithInfo represents a single item retrieved from the remote
// service.
//
// This item implements ItemInfo so it should be used for things that need to
// appear in backup details.
type prefetchedItem struct {
*unindexedPrefetchedItem
type prefetchedItemWithInfo struct {
*prefetchedItem
info details.ItemInfo
}
func (i prefetchedItem) Info() (details.ItemInfo, error) {
func (i prefetchedItemWithInfo) Info() (details.ItemInfo, error) {
return i.info, nil
}
@ -131,14 +131,14 @@ type ItemDataGetter interface {
) (io.ReadCloser, *details.ItemInfo, bool, error)
}
func NewUnindexedLazyItem(
func NewLazyItem(
ctx context.Context,
itemGetter ItemDataGetter,
itemID string,
modTime time.Time,
errs *fault.Bus,
) *unindexedLazyItem {
return &unindexedLazyItem{
) *lazyItem {
return &lazyItem{
ctx: ctx,
id: itemID,
itemGetter: itemGetter,
@ -147,13 +147,13 @@ func NewUnindexedLazyItem(
}
}
// unindexedLazyItem represents a single item retrieved from the remote service.
// It lazily fetches the item's data when the first call to ToReader().Read() is
// lazyItem represents a single item retrieved from the remote service. It
// lazily fetches the item's data when the first call to ToReader().Read() is
// made.
//
// This item doesn't implement ItemInfo so it's safe to use for items like
// metadata that shouldn't appear in backup details.
type unindexedLazyItem struct {
type lazyItem struct {
ctx context.Context
mu sync.Mutex
id string
@ -165,19 +165,19 @@ type unindexedLazyItem struct {
// struct so we can tell if it's been set already or not.
//
// This also helps with garbage collection because now the golang garbage
// collector can collect the lazyItem struct once the storage engine is done
// with it. The ItemInfo struct needs to stick around until the end of the
// backup though as backup details is written last.
// collector can collect the lazyItemWithInfo struct once the storage engine
// is done with it. The ItemInfo struct needs to stick around until the end of
// the backup though as backup details is written last.
info *details.ItemInfo
delInFlight bool
}
func (i *unindexedLazyItem) ID() string {
func (i *lazyItem) ID() string {
return i.id
}
func (i *unindexedLazyItem) ToReader() io.ReadCloser {
func (i *lazyItem) ToReader() io.ReadCloser {
return lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
// Don't allow getting Item info while trying to initialize said info.
// GetData could be a long running call, but in theory nothing should happen
@ -219,23 +219,23 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser {
})
}
func (i *unindexedLazyItem) Deleted() bool {
func (i *lazyItem) Deleted() bool {
return false
}
func (i *unindexedLazyItem) ModTime() time.Time {
func (i *lazyItem) ModTime() time.Time {
return i.modTime
}
func NewLazyItem(
func NewLazyItemWithInfo(
ctx context.Context,
itemGetter ItemDataGetter,
itemID string,
modTime time.Time,
errs *fault.Bus,
) *lazyItem {
return &lazyItem{
unindexedLazyItem: NewUnindexedLazyItem(
) *lazyItemWithInfo {
return &lazyItemWithInfo{
lazyItem: NewLazyItem(
ctx,
itemGetter,
itemID,
@ -244,17 +244,17 @@ func NewLazyItem(
}
}
// lazyItem represents a single item retrieved from the remote service. It
// lazily fetches the item's data when the first call to ToReader().Read() is
// lazyItemWithInfo represents a single item retrieved from the remote service.
// It lazily fetches the item's data when the first call to ToReader().Read() is
// made.
//
// This item implements ItemInfo so it should be used for things that need to
// appear in backup details.
type lazyItem struct {
*unindexedLazyItem
type lazyItemWithInfo struct {
*lazyItem
}
func (i *lazyItem) Info() (details.ItemInfo, error) {
func (i *lazyItemWithInfo) Info() (details.ItemInfo, error) {
i.mu.Lock()
defer i.mu.Unlock()

View File

@ -51,7 +51,7 @@ func TestItemUnitSuite(t *testing.T) {
}
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
prefetch, err := data.NewUnindexedPrefetchedItem(
prefetch, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader([]byte{})),
"foo",
time.Time{})
@ -69,7 +69,7 @@ func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
ctx, flush := tester.NewContext(t)
defer flush()
lazy := data.NewUnindexedLazyItem(
lazy := data.NewLazyItem(
ctx,
nil,
"foo",
@ -148,7 +148,7 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() {
suite.Run(test.name, func() {
t := suite.T()
item, err := data.NewPrefetchedItem(test.reader, id, test.info)
item, err := data.NewPrefetchedItemWithInfo(test.reader, id, test.info)
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, id, item.ID(), "ID")
@ -291,7 +291,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
defer test.mid.check(t, true)
item := data.NewLazyItem(
item := data.NewLazyItemWithInfo(
ctx,
test.mid,
id,
@ -354,7 +354,7 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
mid := &mockItemDataGetter{delInFlight: true}
defer mid.check(t, true)
item := data.NewLazyItem(ctx, mid, id, now, errs)
item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs)
assert.Equal(t, id, item.ID(), "ID")
assert.False(t, item.Deleted(), "deleted")
@ -400,7 +400,7 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
mid := &mockItemDataGetter{}
defer mid.check(t, false)
item := data.NewLazyItem(ctx, mid, id, now, errs)
item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs)
assert.Equal(t, id, item.ID(), "ID")
assert.False(t, item.Deleted(), "deleted")

View File

@ -1,5 +1,12 @@
package data
import (
"io"
"sync/atomic"
"github.com/alcionai/corso/src/pkg/path"
)
type CollectionStats struct {
Folders,
Objects,
@ -15,3 +22,68 @@ func (cs CollectionStats) IsZero() bool {
func (cs CollectionStats) String() string {
return cs.Details
}
type KindStats struct {
BytesRead int64
ResourceCount int64
}
type ExportStats struct {
// data is kept private so that we can enforce atomic int updates
data map[path.CategoryType]KindStats
}
func (es *ExportStats) UpdateBytes(kind path.CategoryType, bytesRead int64) {
if es.data == nil {
es.data = map[path.CategoryType]KindStats{}
}
ks := es.data[kind]
atomic.AddInt64(&ks.BytesRead, bytesRead)
es.data[kind] = ks
}
func (es *ExportStats) UpdateResourceCount(kind path.CategoryType) {
if es.data == nil {
es.data = map[path.CategoryType]KindStats{}
}
ks := es.data[kind]
atomic.AddInt64(&ks.ResourceCount, 1)
es.data[kind] = ks
}
func (es *ExportStats) GetStats() map[path.CategoryType]KindStats {
return es.data
}
type statsReader struct {
io.ReadCloser
kind path.CategoryType
stats *ExportStats
}
func (sr *statsReader) Read(p []byte) (int, error) {
n, err := sr.ReadCloser.Read(p)
sr.stats.UpdateBytes(sr.kind, int64(n))
return n, err
}
// Create a function that will take a reader and return a reader that
// will update the stats
func ReaderWithStats(
reader io.ReadCloser,
kind path.CategoryType,
stats *ExportStats,
) io.ReadCloser {
if reader == nil {
return nil
}
return &statsReader{
ReadCloser: reader,
kind: kind,
stats: stats,
}
}

View File

@ -28,18 +28,13 @@ const (
tenantIDDeprecated = "m365_tenant_hash_deprecated"
// Event Keys
CorsoStart = "Corso Start"
RepoInit = "Repo Init"
RepoConnect = "Repo Connect"
BackupStart = "Backup Start"
BackupEnd = "Backup End"
CorsoError = "Corso Error"
RestoreStart = "Restore Start"
RestoreEnd = "Restore End"
ExportStart = "Export Start"
ExportEnd = "Export End"
MaintenanceStart = "Maintenance Start"
MaintenanceEnd = "Maintenance End"
RepoInit = "Repo Init"
RepoConnect = "Repo Connect"
BackupEnd = "Backup End"
RestoreEnd = "Restore End"
CorsoError = "Corso Error"
ExportEnd = "Export End"
MaintenanceEnd = "Maintenance End"
// Event Data Keys
BackupCreateTime = "backup_creation_time"

View File

@ -0,0 +1,165 @@
package kopia
import (
"context"
"fmt"
"testing"
"github.com/alcionai/clues"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/internal/data"
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/backup/identity"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
)
func BenchmarkHierarchyMerge(b *testing.B) {
ctx, flush := tester.NewContext(b)
defer flush()
c, err := openKopiaRepo(b, ctx)
require.NoError(b, err, clues.ToCore(err))
w := &Wrapper{c}
defer func() {
err := w.Close(ctx)
assert.NoError(b, err, clues.ToCore(err))
}()
var (
cols []data.BackupCollection
collectionLimit = 1000
collectionItemsLimit = 3
itemData = []byte("abcdefghijklmnopqrstuvwxyz")
)
baseStorePath, err := path.Build(
"a-tenant",
"a-user",
path.ExchangeService,
path.EmailCategory,
false,
"Inbox")
require.NoError(b, err, clues.ToCore(err))
for i := 0; i < collectionLimit; i++ {
folderName := fmt.Sprintf("folder%d", i)
storePath, err := baseStorePath.Append(false, folderName)
require.NoError(b, err, clues.ToCore(err))
col := exchMock.NewCollection(
storePath,
storePath,
collectionItemsLimit)
for j := 0; j < collectionItemsLimit; j++ {
itemName := fmt.Sprintf("item%d", j)
col.Names[j] = itemName
col.Data[j] = itemData
}
cols = append(cols, col)
}
reasons := []identity.Reasoner{
NewReason(
testTenant,
baseStorePath.ProtectedResource(),
baseStorePath.Service(),
baseStorePath.Category()),
}
type testCase struct {
name string
baseBackups func(base ManifestEntry) BackupBases
collections []data.BackupCollection
}
// Initial backup. All files should be considered new by kopia.
baseBackupCase := testCase{
name: "Setup",
baseBackups: func(ManifestEntry) BackupBases {
return NewMockBackupBases()
},
collections: cols,
}
runAndTestBackup := func(
t tester.TestT,
ctx context.Context,
test testCase,
base ManifestEntry,
) ManifestEntry {
bbs := test.baseBackups(base)
stats, _, _, err := w.ConsumeBackupCollections(
ctx,
reasons,
bbs,
test.collections,
nil,
nil,
true,
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, 0, stats.IgnoredErrorCount)
assert.Equal(t, 0, stats.ErrorCount)
assert.False(t, stats.Incomplete)
snap, err := snapshot.LoadSnapshot(
ctx,
w.c,
manifest.ID(stats.SnapshotID))
require.NoError(t, err, clues.ToCore(err))
return ManifestEntry{
Manifest: snap,
Reasons: reasons,
}
}
b.Logf("setting up base backup\n")
base := runAndTestBackup(b, ctx, baseBackupCase, ManifestEntry{})
table := []testCase{
{
name: "Merge All",
baseBackups: func(base ManifestEntry) BackupBases {
return NewMockBackupBases().WithMergeBases(base)
},
collections: func() []data.BackupCollection {
p, err := baseStorePath.Dir()
require.NoError(b, err, clues.ToCore(err))
col := exchMock.NewCollection(p, p, 0)
col.ColState = data.NotMovedState
col.PrevPath = p
return []data.BackupCollection{col}
}(),
},
}
b.ResetTimer()
for _, test := range table {
b.Run(fmt.Sprintf("num_dirs_%d", collectionLimit), func(b *testing.B) {
ctx, flush := tester.NewContext(b)
defer flush()
for i := 0; i < b.N; i++ {
runAndTestBackup(b, ctx, test, base)
}
})
}
}

View File

@ -380,18 +380,18 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
siteIDs = []string{siteID}
)
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
site, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewSharePointBackup(siteIDs)
sel.Include(sel.LibraryFolders([]string{"foo"}, selectors.PrefixMatch()))
sel.SetDiscreteOwnerIDName(id, name)
sel.SetDiscreteOwnerIDName(site.ID(), site.Name())
bpc := inject.BackupProducerConfig{
LastBackupVersion: version.NoBackup,
Options: control.DefaultOptions(),
ProtectedResource: inMock.NewProvider(id, name),
ProtectedResource: site,
Selector: sel.Selector,
}
@ -430,18 +430,18 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
siteIDs = []string{siteID}
)
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
site, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewSharePointBackup(siteIDs)
sel.Include(sel.Lists(selectors.Any()))
sel.SetDiscreteOwnerIDName(id, name)
sel.SetDiscreteOwnerIDName(site.ID(), site.Name())
bpc := inject.BackupProducerConfig{
LastBackupVersion: version.NoBackup,
Options: control.DefaultOptions(),
ProtectedResource: inMock.NewProvider(id, name),
ProtectedResource: site,
Selector: sel.Selector,
}
@ -516,18 +516,18 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint()
groupIDs = []string{groupID}
)
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
group, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewGroupsBackup(groupIDs)
sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
sel.SetDiscreteOwnerIDName(id, name)
sel.SetDiscreteOwnerIDName(group.ID(), group.Name())
bpc := inject.BackupProducerConfig{
LastBackupVersion: version.NoBackup,
Options: control.DefaultOptions(),
ProtectedResource: inMock.NewProvider(id, name),
ProtectedResource: group,
Selector: sel.Selector,
}
@ -590,13 +590,13 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_In
groupIDs = []string{groupID}
)
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
group, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
require.NoError(t, err, clues.ToCore(err))
sel := selectors.NewGroupsBackup(groupIDs)
sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
sel.SetDiscreteOwnerIDName(id, name)
sel.SetDiscreteOwnerIDName(group.ID(), group.Name())
site, err := suite.connector.AC.Groups().GetRootSite(ctx, groupID)
require.NoError(t, err, clues.ToCore(err))
@ -626,7 +626,7 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_In
bpc := inject.BackupProducerConfig{
LastBackupVersion: version.NoBackup,
Options: control.DefaultOptions(),
ProtectedResource: inMock.NewProvider(id, name),
ProtectedResource: group,
Selector: sel.Selector,
MetadataCollections: mmc,
}

View File

@ -13,6 +13,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/spatialcurrent/go-lazy/pkg/lazy"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
@ -39,6 +40,9 @@ var _ data.BackupCollection = &Collection{}
type Collection struct {
handler BackupHandler
// the protected resource represented in this collection.
protectedResource idname.Provider
// data is used to share data streams with the collection consumer
data chan data.Item
// folderPath indicates what level in the hierarchy this collection
@ -98,6 +102,7 @@ func pathToLocation(p path.Path) (*path.Builder, error) {
// NewCollection creates a Collection
func NewCollection(
handler BackupHandler,
resource idname.Provider,
currPath path.Path,
prevPath path.Path,
driveID string,
@ -123,6 +128,7 @@ func NewCollection(
c := newColl(
handler,
resource,
currPath,
prevPath,
driveID,
@ -140,6 +146,7 @@ func NewCollection(
func newColl(
handler BackupHandler,
resource idname.Provider,
currPath path.Path,
prevPath path.Path,
driveID string,
@ -150,18 +157,19 @@ func newColl(
urlCache getItemPropertyer,
) *Collection {
c := &Collection{
handler: handler,
folderPath: currPath,
prevPath: prevPath,
driveItems: map[string]models.DriveItemable{},
driveID: driveID,
data: make(chan data.Item, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize()),
statusUpdater: statusUpdater,
ctrl: ctrlOpts,
state: data.StateOf(prevPath, currPath),
scope: colScope,
doNotMergeItems: doNotMergeItems,
urlCache: urlCache,
handler: handler,
protectedResource: resource,
folderPath: currPath,
prevPath: prevPath,
driveItems: map[string]models.DriveItemable{},
driveID: driveID,
data: make(chan data.Item, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize()),
statusUpdater: statusUpdater,
ctrl: ctrlOpts,
state: data.StateOf(prevPath, currPath),
scope: colScope,
doNotMergeItems: doNotMergeItems,
urlCache: urlCache,
}
return c
@ -265,9 +273,9 @@ func (oc *Collection) getDriveItemContent(
// Skip big OneNote files as they can't be downloaded
if clues.HasLabel(err, graph.LabelStatus(http.StatusServiceUnavailable)) &&
// oc.scope == CollectionScopePackage && *item.GetSize() >= MaxOneNoteFileSize {
// TODO: We've removed the file size check because it looks like we've seen persistent
// 503's with smaller OneNote files also.
// oc.scope == CollectionScopePackage && *item.GetSize() >= MaxOneNoteFileSize {
oc.scope == CollectionScopePackage {
// FIXME: It is possible that in case of a OneNote file we
// will end up just backing up the `onetoc2` file without
@ -275,10 +283,18 @@ func (oc *Collection) getDriveItemContent(
// "item". This will have to be handled during the
// restore, or we have to handle it separately by somehow
// deleting the entire collection.
logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipBigOneNote).Info("max OneNote file size exceeded")
errs.AddSkip(ctx, fault.FileSkip(fault.SkipBigOneNote, driveID, itemID, itemName, graph.ItemInfo(item)))
logger.
CtxErr(ctx, err).
With("skipped_reason", fault.SkipOneNote).
Info("inaccessible one note file")
errs.AddSkip(ctx, fault.FileSkip(
fault.SkipOneNote,
driveID,
itemID,
itemName,
graph.ItemInfo(item)))
return nil, clues.Wrap(err, "max oneNote item").Label(graph.LabelsSkippable)
return nil, clues.Wrap(err, "inaccesible oneNote item").Label(graph.LabelsSkippable)
}
errs.AddRecoverable(
@ -551,9 +567,22 @@ func (oc *Collection) streamDriveItem(
return
}
itemInfo = oc.handler.AugmentItemInfo(itemInfo, item, itemSize, parentPath)
itemInfo = oc.handler.AugmentItemInfo(
itemInfo,
oc.protectedResource,
item,
itemSize,
parentPath)
ctx = clues.Add(ctx, "item_info", itemInfo)
// Drive content download requests are also rate limited by graph api.
// Ensure that this request goes through the drive limiter & not the default
// limiter.
ctx = graph.BindRateLimiterConfig(
ctx,
graph.LimiterCfg{
Service: path.OneDriveService,
})
if isFile {
dataSuffix := metadata.DataFileSuffix
@ -562,7 +591,7 @@ func (oc *Collection) streamDriveItem(
// This ensures that downloads won't be attempted unless that consumer
// attempts to read bytes. Assumption is that kopia will check things
// like file modtimes before attempting to read.
oc.data <- data.NewLazyItem(
oc.data <- data.NewLazyItemWithInfo(
ctx,
&lazyItemGetter{
info: &itemInfo,
@ -587,7 +616,7 @@ func (oc *Collection) streamDriveItem(
return progReader, nil
})
storeItem, err := data.NewUnindexedPrefetchedItem(
storeItem, err := data.NewPrefetchedItem(
metaReader,
metaFileName+metaSuffix,
// Metadata file should always use the latest time as

View File

@ -207,6 +207,7 @@ func (suite *CollectionUnitSuite) TestCollection() {
coll, err := NewCollection(
mbh,
mbh.ProtectedResource,
folderPath,
nil,
"drive-id",
@ -328,6 +329,7 @@ func (suite *CollectionUnitSuite) TestCollectionReadError() {
coll, err := NewCollection(
mbh,
mbh.ProtectedResource,
folderPath,
nil,
"fakeDriveID",
@ -405,6 +407,7 @@ func (suite *CollectionUnitSuite) TestCollectionReadUnauthorizedErrorRetry() {
coll, err := NewCollection(
mbh,
mbh.ProtectedResource,
folderPath,
nil,
"fakeDriveID",
@ -460,6 +463,7 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime()
coll, err := NewCollection(
mbh,
mbh.ProtectedResource,
folderPath,
nil,
"drive-id",
@ -971,6 +975,7 @@ func (suite *CollectionUnitSuite) TestItemExtensions() {
coll, err := NewCollection(
mbh,
mbh.ProtectedResource,
folderPath,
nil,
driveID,

View File

@ -11,6 +11,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/data"
@ -48,8 +49,8 @@ const restrictedDirectory = "Site Pages"
type Collections struct {
handler BackupHandler
tenantID string
resourceOwner string
tenantID string
protectedResource idname.Provider
statusUpdater support.StatusUpdater
@ -69,17 +70,17 @@ type Collections struct {
func NewCollections(
bh BackupHandler,
tenantID string,
resourceOwner string,
protectedResource idname.Provider,
statusUpdater support.StatusUpdater,
ctrlOpts control.Options,
) *Collections {
return &Collections{
handler: bh,
tenantID: tenantID,
resourceOwner: resourceOwner,
CollectionMap: map[string]map[string]*Collection{},
statusUpdater: statusUpdater,
ctrl: ctrlOpts,
handler: bh,
tenantID: tenantID,
protectedResource: protectedResource,
CollectionMap: map[string]map[string]*Collection{},
statusUpdater: statusUpdater,
ctrl: ctrlOpts,
}
}
@ -227,16 +228,16 @@ func (c *Collections) Get(
ssmb *prefixmatcher.StringSetMatchBuilder,
errs *fault.Bus,
) ([]data.BackupCollection, bool, error) {
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata)
if err != nil {
return nil, false, err
}
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup)
driveTombstones := map[string]struct{}{}
for driveID := range oldPathsByDriveID {
for driveID := range oldPrevPathsByDriveID {
driveTombstones[driveID] = struct{}{}
}
@ -246,7 +247,7 @@ func (c *Collections) Get(
defer close(progressBar)
// Enumerate drives for the specified resourceOwner
pager := c.handler.NewDrivePager(c.resourceOwner, nil)
pager := c.handler.NewDrivePager(c.protectedResource.ID(), nil)
drives, err := api.GetAllDrives(ctx, pager)
if err != nil {
@ -254,76 +255,89 @@ func (c *Collections) Get(
}
var (
// Drive ID -> delta URL for drive
deltaURLs = map[string]string{}
// Drive ID -> folder ID -> folder path
folderPaths = map[string]map[string]string{}
numPrevItems = 0
driveIDToDeltaLink = map[string]string{}
driveIDToPrevPaths = map[string]map[string]string{}
numPrevItems = 0
)
for _, d := range drives {
var (
driveID = ptr.Val(d.GetId())
driveName = ptr.Val(d.GetName())
prevDelta = prevDeltas[driveID]
oldPaths = oldPathsByDriveID[driveID]
numOldDelta = 0
ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
driveID = ptr.Val(d.GetId())
driveName = ptr.Val(d.GetName())
ictx = clues.Add(
ctx,
"drive_id", driveID,
"drive_name", clues.Hide(driveName))
excludedItemIDs = map[string]struct{}{}
oldPrevPaths = oldPrevPathsByDriveID[driveID]
prevDeltaLink = prevDriveIDToDelta[driveID]
// itemCollection is used to identify which collection a
// file belongs to. This is useful to delete a file from the
// collection it was previously in, in case it was moved to a
// different collection within the same delta query
// item ID -> item ID
itemCollection = map[string]string{}
)
delete(driveTombstones, driveID)
if _, ok := driveIDToPrevPaths[driveID]; !ok {
driveIDToPrevPaths[driveID] = map[string]string{}
}
if _, ok := c.CollectionMap[driveID]; !ok {
c.CollectionMap[driveID] = map[string]*Collection{}
}
if len(prevDelta) > 0 {
numOldDelta++
}
logger.Ctx(ictx).Infow(
"previous metadata for drive",
"num_paths_entries", len(oldPaths),
"num_deltas_entries", numOldDelta)
"num_paths_entries", len(oldPrevPaths))
delta, paths, excluded, err := collectItems(
items, du, err := c.handler.EnumerateDriveItemsDelta(
ictx,
c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()),
driveID,
driveName,
c.UpdateCollections,
oldPaths,
prevDelta,
errs)
prevDeltaLink,
api.DefaultDriveItemProps())
if err != nil {
return nil, false, err
}
// Used for logging below.
numDeltas := 0
// It's alright to have an empty folders map (i.e. no folders found) but not
// an empty delta token. This is because when deserializing the metadata we
// remove entries for which there is no corresponding delta token/folder. If
// we leave empty delta tokens then we may end up setting the State field
// for collections when not actually getting delta results.
if len(delta.URL) > 0 {
deltaURLs[driveID] = delta.URL
numDeltas++
if len(du.URL) > 0 {
driveIDToDeltaLink[driveID] = du.URL
}
newPrevPaths, err := c.UpdateCollections(
ctx,
driveID,
driveName,
items,
oldPrevPaths,
itemCollection,
excludedItemIDs,
du.Reset,
errs)
if err != nil {
return nil, false, clues.Stack(err)
}
// Avoid the edge case where there's no paths but we do have a valid delta
// token. We can accomplish this by adding an empty paths map for this
// drive. If we don't have this then the next backup won't use the delta
// token because it thinks the folder paths weren't persisted.
folderPaths[driveID] = map[string]string{}
maps.Copy(folderPaths[driveID], paths)
driveIDToPrevPaths[driveID] = map[string]string{}
maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths)
logger.Ctx(ictx).Infow(
"persisted metadata for drive",
"num_paths_entries", len(paths),
"num_deltas_entries", numDeltas,
"delta_reset", delta.Reset)
"num_new_paths_entries", len(newPrevPaths),
"delta_reset", du.Reset)
numDriveItems := c.NumItems - numPrevItems
numPrevItems = c.NumItems
@ -335,7 +349,7 @@ func (c *Collections) Get(
err = c.addURLCacheToDriveCollections(
ictx,
driveID,
prevDelta,
prevDeltaLink,
errs)
if err != nil {
return nil, false, err
@ -344,8 +358,8 @@ func (c *Collections) Get(
// For both cases we don't need to do set difference on folder map if the
// delta token was valid because we should see all the changes.
if !delta.Reset {
if len(excluded) == 0 {
if !du.Reset {
if len(excludedItemIDs) == 0 {
continue
}
@ -354,7 +368,7 @@ func (c *Collections) Get(
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
}
ssmb.Add(p.String(), excluded)
ssmb.Add(p.String(), excludedItemIDs)
continue
}
@ -369,13 +383,11 @@ func (c *Collections) Get(
foundFolders[id] = struct{}{}
}
for fldID, p := range oldPaths {
for fldID, p := range oldPrevPaths {
if _, ok := foundFolders[fldID]; ok {
continue
}
delete(paths, fldID)
prevPath, err := path.FromDataLayerPath(p, false)
if err != nil {
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
@ -384,6 +396,7 @@ func (c *Collections) Get(
col, err := NewCollection(
c.handler,
c.protectedResource,
nil, // delete the folder
prevPath,
driveID,
@ -420,6 +433,7 @@ func (c *Collections) Get(
coll, err := NewCollection(
c.handler,
c.protectedResource,
nil, // delete the drive
prevDrivePath,
driveID,
@ -443,14 +457,14 @@ func (c *Collections) Get(
// empty/missing and default to a full backup.
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
return collections, canUsePreviousBackup, nil
return collections, canUsePrevBackup, nil
}
md, err := graph.MakeMetadataCollection(
pathPrefix,
[]graph.MetadataCollectionEntry{
graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths),
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs),
graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths),
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink),
},
c.statusUpdater)
@ -463,7 +477,7 @@ func (c *Collections) Get(
collections = append(collections, md)
}
return collections, canUsePreviousBackup, nil
return collections, canUsePrevBackup, nil
}
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
@ -477,7 +491,7 @@ func (c *Collections) addURLCacheToDriveCollections(
driveID,
prevDelta,
urlCacheRefreshInterval,
c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()),
c.handler,
errs)
if err != nil {
return err
@ -533,22 +547,21 @@ func updateCollectionPaths(
func (c *Collections) handleDelete(
itemID, driveID string,
oldPaths, newPaths map[string]string,
oldPrevPaths, currPrevPaths, newPrevPaths map[string]string,
isFolder bool,
excluded map[string]struct{},
itemCollection map[string]map[string]string,
invalidPrevDelta bool,
) error {
if !isFolder {
// Try to remove the item from the Collection if an entry exists for this
// item. This handles cases where an item was created and deleted during the
// same delta query.
if parentID, ok := itemCollection[driveID][itemID]; ok {
if parentID, ok := currPrevPaths[itemID]; ok {
if col := c.CollectionMap[driveID][parentID]; col != nil {
col.Remove(itemID)
}
delete(itemCollection[driveID], itemID)
delete(currPrevPaths, itemID)
}
// Don't need to add to exclude list if the delta is invalid since the
@ -569,7 +582,7 @@ func (c *Collections) handleDelete(
var prevPath path.Path
prevPathStr, ok := oldPaths[itemID]
prevPathStr, ok := oldPrevPaths[itemID]
if ok {
var err error
@ -586,7 +599,7 @@ func (c *Collections) handleDelete(
// Nested folders also return deleted delta results so we don't have to
// worry about doing a prefix search in the map to remove the subtree of
// the deleted folder/package.
delete(newPaths, itemID)
delete(newPrevPaths, itemID)
if prevPath == nil || invalidPrevDelta {
// It is possible that an item was created and deleted between two delta
@ -605,6 +618,7 @@ func (c *Collections) handleDelete(
col, err := NewCollection(
c.handler,
c.protectedResource,
nil, // deletes the collection
prevPath,
driveID,
@ -676,21 +690,29 @@ func (c *Collections) getCollectionPath(
// UpdateCollections initializes and adds the provided drive items to Collections
// A new collection is created for every drive folder (or package).
// oldPaths is the unchanged data that was loaded from the metadata file.
// newPaths starts as a copy of oldPaths and is updated as changes are found in
// the returned results.
// oldPrevPaths is the unchanged data that was loaded from the metadata file.
// This map is not modified during the call.
// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in
// the returned results. Items are added to this collection throughout the call.
// newPrevPaths, ie: the items added during this call, get returned as a map.
func (c *Collections) UpdateCollections(
ctx context.Context,
driveID, driveName string,
items []models.DriveItemable,
oldPaths map[string]string,
newPaths map[string]string,
oldPrevPaths map[string]string,
currPrevPaths map[string]string,
excluded map[string]struct{},
itemCollection map[string]map[string]string,
invalidPrevDelta bool,
errs *fault.Bus,
) error {
el := errs.Local()
) (map[string]string, error) {
var (
el = errs.Local()
newPrevPaths = map[string]string{}
)
if !invalidPrevDelta {
maps.Copy(newPrevPaths, oldPrevPaths)
}
for _, item := range items {
if el.Failure() != nil {
@ -700,8 +722,12 @@ func (c *Collections) UpdateCollections(
var (
itemID = ptr.Val(item.GetId())
itemName = ptr.Val(item.GetName())
ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName))
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
ictx = clues.Add(
ctx,
"item_id", itemID,
"item_name", clues.Hide(itemName),
"item_is_folder", isFolder)
)
if item.GetMalware() != nil {
@ -723,13 +749,13 @@ func (c *Collections) UpdateCollections(
if err := c.handleDelete(
itemID,
driveID,
oldPaths,
newPaths,
oldPrevPaths,
currPrevPaths,
newPrevPaths,
isFolder,
excluded,
itemCollection,
invalidPrevDelta); err != nil {
return clues.Stack(err).WithClues(ictx)
return nil, clues.Stack(err).WithClues(ictx)
}
continue
@ -755,13 +781,13 @@ func (c *Collections) UpdateCollections(
// Deletions are handled above so this is just moves/renames.
var prevPath path.Path
prevPathStr, ok := oldPaths[itemID]
prevPathStr, ok := oldPrevPaths[itemID]
if ok {
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
if err != nil {
el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
WithClues(ictx).
With("path_string", prevPathStr))
With("prev_path_string", path.LoggableDir(prevPathStr)))
}
} else if item.GetRoot() != nil {
// Root doesn't move or get renamed.
@ -771,11 +797,11 @@ func (c *Collections) UpdateCollections(
// Moved folders don't cause delta results for any subfolders nested in
// them. We need to go through and update paths to handle that. We only
// update newPaths so we don't accidentally clobber previous deletes.
updatePath(newPaths, itemID, collectionPath.String())
updatePath(newPrevPaths, itemID, collectionPath.String())
found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath)
if err != nil {
return clues.Stack(err).WithClues(ictx)
return nil, clues.Stack(err).WithClues(ictx)
}
if found {
@ -787,8 +813,11 @@ func (c *Collections) UpdateCollections(
colScope = CollectionScopePackage
}
ictx = clues.Add(ictx, "collection_scope", colScope)
col, err := NewCollection(
c.handler,
c.protectedResource,
collectionPath,
prevPath,
driveID,
@ -798,7 +827,7 @@ func (c *Collections) UpdateCollections(
invalidPrevDelta,
nil)
if err != nil {
return clues.Stack(err).WithClues(ictx)
return nil, clues.Stack(err).WithClues(ictx)
}
col.driveName = driveName
@ -820,35 +849,38 @@ func (c *Collections) UpdateCollections(
case item.GetFile() != nil:
// Deletions are handled above so this is just moves/renames.
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
return clues.New("file without parent ID").WithClues(ictx)
return nil, clues.New("file without parent ID").WithClues(ictx)
}
// Get the collection for this item.
parentID := ptr.Val(item.GetParentReference().GetId())
ictx = clues.Add(ictx, "parent_id", parentID)
collection, found := c.CollectionMap[driveID][parentID]
if !found {
return clues.New("item seen before parent folder").WithClues(ictx)
collection, ok := c.CollectionMap[driveID][parentID]
if !ok {
return nil, clues.New("item seen before parent folder").WithClues(ictx)
}
// Delete the file from previous collection. This will
// only kick in if the file was moved multiple times
// within a single delta query
icID, found := itemCollection[driveID][itemID]
if found {
pcollection, found := c.CollectionMap[driveID][icID]
// This will only kick in if the file was moved multiple times
// within a single delta query. We delete the file from the previous
// collection so that it doesn't appear in two places.
prevParentContainerID, ok := currPrevPaths[itemID]
if ok {
prevColl, found := c.CollectionMap[driveID][prevParentContainerID]
if !found {
return clues.New("previous collection not found").WithClues(ictx)
return nil, clues.New("previous collection not found").
With("prev_parent_container_id", prevParentContainerID).
WithClues(ictx)
}
removed := pcollection.Remove(itemID)
if !removed {
return clues.New("removing from prev collection").WithClues(ictx)
if ok := prevColl.Remove(itemID); !ok {
return nil, clues.New("removing item from prev collection").
With("prev_parent_container_id", prevParentContainerID).
WithClues(ictx)
}
}
itemCollection[driveID][itemID] = parentID
currPrevPaths[itemID] = parentID
if collection.Add(item) {
c.NumItems++
@ -869,11 +901,13 @@ func (c *Collections) UpdateCollections(
}
default:
return clues.New("item type not supported").WithClues(ictx)
el.AddRecoverable(ictx, clues.New("item is neither folder nor file").
WithClues(ictx).
Label(fault.LabelForceNoBackupCreation))
}
}
return el.Failure()
return newPrevPaths, el.Failure()
}
type dirScopeChecker interface {

View File

@ -8,12 +8,12 @@ import (
"github.com/alcionai/clues"
"github.com/google/uuid"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
"github.com/alcionai/corso/src/internal/data"
@ -137,7 +137,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath)
tests := []struct {
testCase string
name string
items []models.DriveItemable
inputFolderMap map[string]string
scope selectors.OneDriveScope
@ -147,11 +147,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedContainerCount int
expectedFileCount int
expectedSkippedCount int
expectedMetadataPaths map[string]string
expectedPrevPaths map[string]string
expectedExcludes map[string]struct{}
}{
{
testCase: "Invalid item",
name: "Invalid item",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("item", "item", testBaseDrivePath, "root", false, false, false),
@ -163,13 +163,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
"root": expectedStatePath(data.NotMovedState, ""),
},
expectedContainerCount: 1,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
},
expectedExcludes: map[string]struct{}{},
},
{
testCase: "Single File",
name: "Single File",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("file", "file", testBaseDrivePath, "root", true, false, false),
@ -184,13 +184,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedFileCount: 1,
expectedContainerCount: 1,
// Root folder is skipped since it's always present.
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
},
expectedExcludes: getDelList("file"),
},
{
testCase: "Single Folder",
name: "Single Folder",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
@ -202,7 +202,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
"root": expectedStatePath(data.NotMovedState, ""),
"folder": expectedStatePath(data.NewState, folder),
},
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath("/folder"),
},
@ -211,7 +211,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: map[string]struct{}{},
},
{
testCase: "Single Package",
name: "Single Package",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("package", "package", testBaseDrivePath, "root", false, false, true),
@ -223,7 +223,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
"root": expectedStatePath(data.NotMovedState, ""),
"package": expectedStatePath(data.NewState, pkg),
},
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"package": expectedPath("/package"),
},
@ -232,7 +232,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: map[string]struct{}{},
},
{
testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
name: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
@ -252,7 +252,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 5,
expectedFileCount: 3,
expectedContainerCount: 3,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath("/folder"),
"package": expectedPath("/package"),
@ -260,7 +260,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"),
},
{
testCase: "contains folder selector",
name: "contains folder selector",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
@ -285,7 +285,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedContainerCount: 3,
// just "folder" isn't added here because the include check is done on the
// parent path since we only check later if something is a folder or not.
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"folder": expectedPath(folder),
"subfolder": expectedPath(folderSub),
"folder2": expectedPath(folderSub + folder),
@ -293,7 +293,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: getDelList("fileInFolder", "fileInFolder2"),
},
{
testCase: "prefix subfolder selector",
name: "prefix subfolder selector",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
@ -316,14 +316,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 3,
expectedFileCount: 1,
expectedContainerCount: 2,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"subfolder": expectedPath(folderSub),
"folder2": expectedPath(folderSub + folder),
},
expectedExcludes: getDelList("fileInFolder2"),
},
{
testCase: "match subfolder selector",
name: "match subfolder selector",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
@ -344,13 +344,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedFileCount: 1,
expectedContainerCount: 1,
// No child folders for subfolder so nothing here.
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"subfolder": expectedPath(folderSub),
},
expectedExcludes: getDelList("fileInSubfolder"),
},
{
testCase: "not moved folder tree",
name: "not moved folder tree",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
@ -368,7 +368,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 1,
expectedFileCount: 0,
expectedContainerCount: 2,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath(folder),
"subfolder": expectedPath(folderSub),
@ -376,7 +376,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: map[string]struct{}{},
},
{
testCase: "moved folder tree",
name: "moved folder tree",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
@ -394,7 +394,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 1,
expectedFileCount: 0,
expectedContainerCount: 2,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath(folder),
"subfolder": expectedPath(folderSub),
@ -402,7 +402,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: map[string]struct{}{},
},
{
testCase: "moved folder tree with file no previous",
name: "moved folder tree with file no previous",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
@ -419,14 +419,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 2,
expectedFileCount: 1,
expectedContainerCount: 2,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath("/folder2"),
},
expectedExcludes: getDelList("file"),
},
{
testCase: "moved folder tree with file no previous 1",
name: "moved folder tree with file no previous 1",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
@ -442,14 +442,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 2,
expectedFileCount: 1,
expectedContainerCount: 2,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath(folder),
},
expectedExcludes: getDelList("file"),
},
{
testCase: "moved folder tree and subfolder 1",
name: "moved folder tree and subfolder 1",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
@ -469,7 +469,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 2,
expectedFileCount: 0,
expectedContainerCount: 3,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath(folder),
"subfolder": expectedPath("/subfolder"),
@ -477,7 +477,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: map[string]struct{}{},
},
{
testCase: "moved folder tree and subfolder 2",
name: "moved folder tree and subfolder 2",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false),
@ -497,7 +497,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 2,
expectedFileCount: 0,
expectedContainerCount: 3,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath(folder),
"subfolder": expectedPath("/subfolder"),
@ -505,7 +505,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: map[string]struct{}{},
},
{
testCase: "move subfolder when moving parent",
name: "move subfolder when moving parent",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false),
@ -539,7 +539,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 5,
expectedFileCount: 2,
expectedContainerCount: 4,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath("/folder"),
"folder2": expectedPath("/folder2"),
@ -548,7 +548,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"),
},
{
testCase: "moved folder tree multiple times",
name: "moved folder tree multiple times",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
@ -568,7 +568,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 2,
expectedFileCount: 1,
expectedContainerCount: 2,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath("/folder2"),
"subfolder": expectedPath("/folder2/subfolder"),
@ -576,7 +576,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedExcludes: getDelList("file"),
},
{
testCase: "deleted folder and package",
name: "deleted folder and package",
items: []models.DriveItemable{
driveRootItem("root"), // root is always present, but not necessary here
delItem("folder", testBaseDrivePath, "root", false, true, false),
@ -597,13 +597,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 0,
expectedFileCount: 0,
expectedContainerCount: 1,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
},
expectedExcludes: map[string]struct{}{},
},
{
testCase: "delete folder without previous",
name: "delete folder without previous",
items: []models.DriveItemable{
driveRootItem("root"),
delItem("folder", testBaseDrivePath, "root", false, true, false),
@ -619,13 +619,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 0,
expectedFileCount: 0,
expectedContainerCount: 1,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
},
expectedExcludes: map[string]struct{}{},
},
{
testCase: "delete folder tree move subfolder",
name: "delete folder tree move subfolder",
items: []models.DriveItemable{
driveRootItem("root"),
delItem("folder", testBaseDrivePath, "root", false, true, false),
@ -646,14 +646,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 1,
expectedFileCount: 0,
expectedContainerCount: 2,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"subfolder": expectedPath("/subfolder"),
},
expectedExcludes: map[string]struct{}{},
},
{
testCase: "delete file",
name: "delete file",
items: []models.DriveItemable{
driveRootItem("root"),
delItem("item", testBaseDrivePath, "root", true, false, false),
@ -669,13 +669,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 1,
expectedFileCount: 1,
expectedContainerCount: 1,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
},
expectedExcludes: getDelList("item"),
},
{
testCase: "item before parent errors",
name: "item before parent errors",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false),
@ -690,13 +690,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedItemCount: 0,
expectedFileCount: 0,
expectedContainerCount: 1,
expectedMetadataPaths: map[string]string{
"root": expectedPath(""),
},
expectedExcludes: map[string]struct{}{},
expectedPrevPaths: nil,
expectedExcludes: map[string]struct{}{},
},
{
testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
items: []models.DriveItemable{
driveRootItem("root"),
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
@ -717,7 +715,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
expectedFileCount: 2,
expectedContainerCount: 3,
expectedSkippedCount: 1,
expectedMetadataPaths: map[string]string{
expectedPrevPaths: map[string]string{
"root": expectedPath(""),
"folder": expectedPath("/folder"),
"package": expectedPath("/package"),
@ -726,52 +724,48 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
},
}
for _, tt := range tests {
suite.Run(tt.testCase, func() {
for _, test := range tests {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
excludes = map[string]struct{}{}
outputFolderMap = map[string]string{}
itemCollection = map[string]map[string]string{
driveID: {},
}
errs = fault.New(true)
excludes = map[string]struct{}{}
currPrevPaths = map[string]string{}
errs = fault.New(true)
)
maps.Copy(outputFolderMap, tt.inputFolderMap)
maps.Copy(currPrevPaths, test.inputFolderMap)
c := NewCollections(
&itemBackupHandler{api.Drives{}, user, tt.scope},
&itemBackupHandler{api.Drives{}, user, test.scope},
tenant,
user,
idname.NewProvider(user, user),
nil,
control.Options{ToggleFeatures: control.Toggles{}})
c.CollectionMap[driveID] = map[string]*Collection{}
err := c.UpdateCollections(
newPrevPaths, err := c.UpdateCollections(
ctx,
driveID,
"General",
tt.items,
tt.inputFolderMap,
outputFolderMap,
test.items,
test.inputFolderMap,
currPrevPaths,
excludes,
itemCollection,
false,
errs)
tt.expect(t, err, clues.ToCore(err))
assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count")
assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count")
assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items")
test.expect(t, err, clues.ToCore(err))
assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count")
assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count")
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items")
for id, sp := range tt.expectedCollectionIDs {
for id, sp := range test.expectedCollectionIDs {
if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) {
// Skip collections we don't find so we don't get an NPE.
continue
@ -782,8 +776,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id)
}
assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths")
assert.Equal(t, tt.expectedExcludes, excludes, "exclude list")
assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths")
assert.Equal(t, test.expectedExcludes, excludes, "exclude list")
})
}
}
@ -1305,7 +1299,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -1343,7 +1338,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -1420,7 +1416,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &empty, // probably will never happen with graph
DeltaLink: &empty, // probably will never happen with graph
ResetDelta: true,
},
},
},
@ -1457,7 +1454,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
},
NextLink: &next,
NextLink: &next,
ResetDelta: true,
},
{
Values: []models.DriveItemable{
@ -1465,7 +1463,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -1507,7 +1506,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
driveID2: {
@ -1517,7 +1517,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false),
driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false),
},
DeltaLink: &delta2,
DeltaLink: &delta2,
ResetDelta: true,
},
},
},
@ -1569,7 +1570,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
driveID2: {
@ -1579,7 +1581,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("folder", "folder", driveBasePath2, "root", false, true, false),
driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false),
},
DeltaLink: &delta2,
DeltaLink: &delta2,
ResetDelta: true,
},
},
},
@ -1637,87 +1640,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
expectedFolderPaths: nil,
expectedDelList: nil,
},
{
name: "OneDrive_OneItemPage_DeltaError",
drives: []models.Driveable{drive1},
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID1: {
{
Err: getDeltaError(),
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
driveItem("file", "file", driveBasePath1, "root", true, false, false),
},
DeltaLink: &delta,
},
},
},
canUsePreviousBackup: true,
errCheck: assert.NoError,
expectedCollections: map[string]map[data.CollectionState][]string{
rootFolderPath1: {data.NotMovedState: {"file"}},
},
expectedDeltaURLs: map[string]string{
driveID1: delta,
},
expectedFolderPaths: map[string]map[string]string{
driveID1: {
"root": rootFolderPath1,
},
},
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
doNotMergeItems: map[string]bool{
rootFolderPath1: true,
},
},
{
name: "OneDrive_TwoItemPage_DeltaError",
drives: []models.Driveable{drive1},
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID1: {
{
Err: getDeltaError(),
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
driveItem("file", "file", driveBasePath1, "root", true, false, false),
},
NextLink: &next,
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &delta,
},
},
},
canUsePreviousBackup: true,
errCheck: assert.NoError,
expectedCollections: map[string]map[data.CollectionState][]string{
rootFolderPath1: {data.NotMovedState: {"file"}},
expectedPath1("/folder"): {data.NewState: {"folder", "file2"}},
},
expectedDeltaURLs: map[string]string{
driveID1: delta,
},
expectedFolderPaths: map[string]map[string]string{
driveID1: {
"root": rootFolderPath1,
"folder": folderPath1,
},
},
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
doNotMergeItems: map[string]bool{
rootFolderPath1: true,
folderPath1: true,
},
},
{
name: "OneDrive_TwoItemPage_NoDeltaError",
drives: []models.Driveable{drive1},
@ -1770,16 +1692,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
drives: []models.Driveable{drive1},
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID1: {
{
Err: getDeltaError(),
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false),
driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -1817,16 +1737,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
drives: []models.Driveable{drive1},
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID1: {
{
Err: getDeltaError(),
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
driveItem("folder2", "folder", driveBasePath1, "root", false, true, false),
driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -1883,7 +1801,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -1913,13 +1832,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
expectedSkippedCount: 2,
},
{
name: "One Drive Delta Error Deleted Folder In New Results",
name: "One Drive Deleted Folder In New Results",
drives: []models.Driveable{drive1},
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID1: {
{
Err: getDeltaError(),
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
@ -1936,7 +1852,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
delItem("folder2", driveBasePath1, "root", false, true, false),
delItem("file2", driveBasePath1, "root", true, false, false),
},
DeltaLink: &delta2,
DeltaLink: &delta2,
ResetDelta: true,
},
},
},
@ -1971,19 +1888,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
},
},
{
name: "One Drive Delta Error Random Folder Delete",
name: "One Drive Random Folder Delete",
drives: []models.Driveable{drive1},
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID1: {
{
Err: getDeltaError(),
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
delItem("folder", driveBasePath1, "root", false, true, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -2014,19 +1929,17 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
},
},
{
name: "One Drive Delta Error Random Item Delete",
name: "One Drive Random Item Delete",
drives: []models.Driveable{drive1},
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID1: {
{
Err: getDeltaError(),
},
{
Values: []models.DriveItemable{
driveRootItem("root"),
delItem("file", driveBasePath1, "root", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -2072,7 +1985,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
delItem("folder", driveBasePath1, "root", false, true, false),
delItem("file", driveBasePath1, "root", true, false, false),
},
DeltaLink: &delta2,
DeltaLink: &delta2,
ResetDelta: true,
},
},
},
@ -2115,7 +2029,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveRootItem("root"),
delItem("file", driveBasePath1, "root", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -2153,7 +2068,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveRootItem("root"),
delItem("folder", driveBasePath1, "root", false, true, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -2188,7 +2104,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
driveRootItem("root"),
delItem("file", driveBasePath1, "root", true, false, false),
},
DeltaLink: &delta,
DeltaLink: &delta,
ResetDelta: true,
},
},
},
@ -2270,11 +2187,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
mbh := mock.DefaultOneDriveBH("a-user")
mbh.DrivePagerV = mockDrivePager
mbh.ItemPagerV = itemPagers
mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items)
c := NewCollections(
mbh,
tenant,
user,
idname.NewProvider(user, user),
func(*support.ControllerOperationStatus) {},
control.Options{ToggleFeatures: control.Toggles{}})
@ -2500,121 +2418,6 @@ func delItem(
return item
}
func getDeltaError() error {
syncStateNotFound := "SyncStateNotFound"
me := odataerrors.NewMainError()
me.SetCode(&syncStateNotFound)
deltaError := odataerrors.NewODataError()
deltaError.SetErrorEscaped(me)
return deltaError
}
func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() {
next := "next"
delta := "delta"
prevDelta := "prev-delta"
table := []struct {
name string
items []apiMock.PagerResult[models.DriveItemable]
deltaURL string
prevDeltaSuccess bool
prevDelta string
err error
}{
{
name: "delta on first run",
deltaURL: delta,
items: []apiMock.PagerResult[models.DriveItemable]{
{DeltaLink: &delta},
},
prevDeltaSuccess: true,
prevDelta: prevDelta,
},
{
name: "empty prev delta",
deltaURL: delta,
items: []apiMock.PagerResult[models.DriveItemable]{
{DeltaLink: &delta},
},
prevDeltaSuccess: false,
prevDelta: "",
},
{
name: "next then delta",
deltaURL: delta,
items: []apiMock.PagerResult[models.DriveItemable]{
{NextLink: &next},
{DeltaLink: &delta},
},
prevDeltaSuccess: true,
prevDelta: prevDelta,
},
{
name: "invalid prev delta",
deltaURL: delta,
items: []apiMock.PagerResult[models.DriveItemable]{
{Err: getDeltaError()},
{DeltaLink: &delta}, // works on retry
},
prevDelta: prevDelta,
prevDeltaSuccess: false,
},
{
name: "fail a normal delta query",
items: []apiMock.PagerResult[models.DriveItemable]{
{NextLink: &next},
{Err: assert.AnError},
},
prevDelta: prevDelta,
prevDeltaSuccess: true,
err: assert.AnError,
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
ToReturn: test.items,
}
collectorFunc := func(
ctx context.Context,
driveID, driveName string,
driveItems []models.DriveItemable,
oldPaths map[string]string,
newPaths map[string]string,
excluded map[string]struct{},
itemCollection map[string]map[string]string,
doNotMergeItems bool,
errs *fault.Bus,
) error {
return nil
}
delta, _, _, err := collectItems(
ctx,
itemPager,
"",
"General",
collectorFunc,
map[string]string{},
test.prevDelta,
fault.New(true))
require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err))
require.Equal(t, test.deltaURL, delta.URL, "delta url")
require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset")
})
}
}
func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
driveID := "test-drive"
collCount := 3
@ -2648,7 +2451,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
c := NewCollections(
mbh,
"test-tenant",
"test-user",
idname.NewProvider("test-user", "test-user"),
nil,
control.Options{ToggleFeatures: control.Toggles{}})
@ -2660,6 +2463,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
for i := 0; i < collCount; i++ {
coll, err := NewCollection(
&itemBackupHandler{api.Drives{}, "test-user", anyFolder},
idname.NewProvider("", ""),
nil,
nil,
driveID,

View File

@ -12,18 +12,21 @@ import (
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
)
func NewExportCollection(
baseDir string,
backingCollection []data.RestoreCollection,
backupVersion int,
stats *data.ExportStats,
) export.Collectioner {
return export.BaseCollection{
BaseDir: baseDir,
BackingCollection: backingCollection,
BackupVersion: backupVersion,
Stream: streamItems,
Stats: stats,
}
}
@ -34,6 +37,7 @@ func streamItems(
backupVersion int,
cec control.ExportConfig,
ch chan<- export.Item,
stats *data.ExportStats,
) {
defer close(ch)
@ -47,11 +51,22 @@ func streamItems(
}
name, err := getItemName(ctx, itemUUID, backupVersion, rc)
if err != nil {
ch <- export.Item{
ID: itemUUID,
Error: err,
}
continue
}
stats.UpdateResourceCount(path.FilesCategory)
body := data.ReaderWithStats(item.ToReader(), path.FilesCategory, stats)
ch <- export.Item{
ID: itemUUID,
Name: name,
Body: item.ToReader(),
Body: body,
Error: err,
}
}

View File

@ -5,6 +5,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/path"
@ -12,12 +13,13 @@ import (
func augmentItemInfo(
dii details.ItemInfo,
resource idname.Provider,
service path.ServiceType,
item models.DriveItemable,
size int64,
parentPath *path.Builder,
) details.ItemInfo {
var driveName, siteID, driveID, weburl, creatorEmail string
var driveName, driveID, creatorEmail string
// TODO: we rely on this info for details/restore lookups,
// so if it's nil we have an issue, and will need an alternative
@ -38,19 +40,6 @@ func augmentItemInfo(
}
}
if service == path.SharePointService ||
service == path.GroupsService {
gsi := item.GetSharepointIds()
if gsi != nil {
siteID = ptr.Val(gsi.GetSiteId())
weburl = ptr.Val(gsi.GetSiteUrl())
if len(weburl) == 0 {
weburl = constructWebURL(item.GetAdditionalData())
}
}
}
if item.GetParentReference() != nil {
driveID = ptr.Val(item.GetParentReference().GetDriveId())
driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName()))
@ -84,9 +73,9 @@ func augmentItemInfo(
Modified: ptr.Val(item.GetLastModifiedDateTime()),
Owner: creatorEmail,
ParentPath: pps,
SiteID: siteID,
SiteID: resource.ID(),
Size: size,
WebURL: weburl,
WebURL: resource.Name(),
}
case path.GroupsService:
@ -99,9 +88,9 @@ func augmentItemInfo(
Modified: ptr.Val(item.GetLastModifiedDateTime()),
Owner: creatorEmail,
ParentPath: pps,
SiteID: siteID,
SiteID: resource.ID(),
Size: size,
WebURL: weburl,
WebURL: resource.Name(),
}
}

View File

@ -6,6 +6,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path"
@ -20,6 +21,7 @@ type ItemInfoAugmenter interface {
// and kiota drops any SetSize update.
AugmentItemInfo(
dii details.ItemInfo,
resource idname.Provider,
item models.DriveItemable,
size int64,
parentPath *path.Builder,
@ -36,6 +38,7 @@ type BackupHandler interface {
GetItemPermissioner
GetItemer
NewDrivePagerer
EnumerateDriveItemsDeltaer
// PathPrefix constructs the service and category specific path prefix for
// the given values.
@ -50,7 +53,7 @@ type BackupHandler interface {
// ServiceCat returns the service and category used by this implementation.
ServiceCat() (path.ServiceType, path.CategoryType)
NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable]
// FormatDisplayPath creates a human-readable string to represent the
// provided path.
FormatDisplayPath(driveName string, parentPath *path.Builder) string
@ -79,6 +82,18 @@ type GetItemer interface {
) (models.DriveItemable, error)
}
type EnumerateDriveItemsDeltaer interface {
EnumerateDriveItemsDelta(
ctx context.Context,
driveID, prevDeltaLink string,
selectProps []string,
) (
[]models.DriveItemable,
api.DeltaUpdate,
error,
)
}
// ---------------------------------------------------------------------------
// restore
// ---------------------------------------------------------------------------

View File

@ -10,17 +10,24 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/common"
jwt "github.com/alcionai/corso/src/internal/common/jwt"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/readers"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
const (
acceptHeaderKey = "Accept"
acceptHeaderValue = "*/*"
// JWTQueryParam is a query param embed in graph download URLs which holds
// JWT token.
JWTQueryParam = "tempauth"
)
// downloadUrlKeys is used to find the download URL in a DriveItem response.
@ -121,6 +128,19 @@ func downloadFile(
return nil, clues.New("empty file url").WithClues(ctx)
}
// Precheck for url expiry before we make a call to graph to download the
// file. If the url is expired, we can return early and save a call to graph.
//
// Ignore all errors encountered during the check. We can rely on graph to
// return errors on malformed urls. Ignoring errors also future proofs against
// any sudden graph changes, for e.g. if graph decides to embed the token in a
// new query param.
expired, err := isURLExpired(ctx, url)
if err == nil && expired {
logger.Ctx(ctx).Debug("expired item download url")
return nil, graph.ErrTokenExpired
}
rc, err := readers.NewResetRetryHandler(
ctx,
&downloadWithRetries{
@ -193,3 +213,27 @@ func setName(orig models.ItemReferenceable, driveName string) models.ItemReferen
return orig
}
// isURLExpired inspects the jwt token embed in the item download url
// and returns true if it is expired.
func isURLExpired(
ctx context.Context,
url string,
) (bool, error) {
// Extract the raw JWT string from the download url.
rawJWT, err := common.GetQueryParamFromURL(url, JWTQueryParam)
if err != nil {
logger.CtxErr(ctx, err).Info("query param not found")
return false, clues.Stack(err).WithClues(ctx)
}
expired, err := jwt.IsJWTExpired(rawJWT)
if err != nil {
logger.CtxErr(ctx, err).Info("checking jwt expiry")
return false, clues.Stack(err).WithClues(ctx)
}
return expired, nil
}

View File

@ -1,142 +0,0 @@
package drive
import (
"context"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
// DeltaUpdate holds the results of a current delta token. It normally
// gets produced when aggregating the addition and removal of items in
// a delta-queryable folder.
// FIXME: This is same as exchange.api.DeltaUpdate
type DeltaUpdate struct {
// the deltaLink itself
URL string
// true if the old delta was marked as invalid
Reset bool
}
// itemCollector functions collect the items found in a drive
type itemCollector func(
ctx context.Context,
driveID, driveName string,
driveItems []models.DriveItemable,
oldPaths map[string]string,
newPaths map[string]string,
excluded map[string]struct{},
itemCollections map[string]map[string]string,
validPrevDelta bool,
errs *fault.Bus,
) error
// collectItems will enumerate all items in the specified drive and hand them to the
// provided `collector` method
func collectItems(
ctx context.Context,
pager api.DeltaPager[models.DriveItemable],
driveID, driveName string,
collector itemCollector,
oldPaths map[string]string,
prevDelta string,
errs *fault.Bus,
) (
DeltaUpdate,
map[string]string, // newPaths
map[string]struct{}, // excluded
error,
) {
var (
newDeltaURL = ""
newPaths = map[string]string{}
excluded = map[string]struct{}{}
invalidPrevDelta = len(prevDelta) == 0
// itemCollection is used to identify which collection a
// file belongs to. This is useful to delete a file from the
// collection it was previously in, in case it was moved to a
// different collection within the same delta query
// drive ID -> item ID -> item ID
itemCollection = map[string]map[string]string{
driveID: {},
}
)
if !invalidPrevDelta {
maps.Copy(newPaths, oldPaths)
pager.SetNextLink(prevDelta)
}
for {
// assume delta urls here, which allows single-token consumption
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC))
if graph.IsErrInvalidDelta(err) {
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
invalidPrevDelta = true
newPaths = map[string]string{}
pager.Reset(ctx)
continue
}
if err != nil {
return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page")
}
vals := page.GetValue()
err = collector(
ctx,
driveID,
driveName,
vals,
oldPaths,
newPaths,
excluded,
itemCollection,
invalidPrevDelta,
errs)
if err != nil {
return DeltaUpdate{}, nil, nil, err
}
nextLink, deltaLink := api.NextAndDeltaLink(page)
if len(deltaLink) > 0 {
newDeltaURL = deltaLink
}
// Check if there are more items
if len(nextLink) == 0 {
break
}
logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink)
pager.SetNextLink(nextLink)
}
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
}
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
func newItem(name string, folder bool) *models.DriveItem {
itemToCreate := models.NewDriveItem()
itemToCreate.SetName(&name)
if folder {
itemToCreate.SetFolder(models.NewFolder())
} else {
itemToCreate.SetFile(models.NewFile())
}
return itemToCreate
}

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester"
@ -267,7 +268,7 @@ func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
colls := NewCollections(
&itemBackupHandler{suite.ac.Drives(), test.user, scope},
creds.AzureTenantID,
test.user,
idname.NewProvider(test.user, test.user),
service.updateStatus,
control.Options{
ToggleFeatures: control.Toggles{},

View File

@ -8,6 +8,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/idname"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
@ -87,20 +88,14 @@ func (h itemBackupHandler) NewDrivePager(
return h.ac.NewUserDrivePager(resourceOwner, fields)
}
func (h itemBackupHandler) NewItemPager(
driveID, link string,
fields []string,
) api.DeltaPager[models.DriveItemable] {
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
}
func (h itemBackupHandler) AugmentItemInfo(
dii details.ItemInfo,
resource idname.Provider,
item models.DriveItemable,
size int64,
parentPath *path.Builder,
) details.ItemInfo {
return augmentItemInfo(dii, path.OneDriveService, item, size, parentPath)
return augmentItemInfo(dii, resource, path.OneDriveService, item, size, parentPath)
}
func (h itemBackupHandler) FormatDisplayPath(
@ -139,6 +134,14 @@ func (h itemBackupHandler) IncludesDir(dir string) bool {
return h.scope.Matches(selectors.OneDriveFolder, dir)
}
func (h itemBackupHandler) EnumerateDriveItemsDelta(
ctx context.Context,
driveID, prevDeltaLink string,
selectProps []string,
) ([]models.DriveItemable, api.DeltaUpdate, error) {
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink, selectProps)
}
// ---------------------------------------------------------------------------
// Restore
// ---------------------------------------------------------------------------
@ -173,11 +176,12 @@ func (h itemRestoreHandler) NewDrivePager(
// and kiota drops any SetSize update.
func (h itemRestoreHandler) AugmentItemInfo(
dii details.ItemInfo,
resource idname.Provider,
item models.DriveItemable,
size int64,
parentPath *path.Builder,
) details.ItemInfo {
return augmentItemInfo(dii, path.OneDriveService, item, size, parentPath)
return augmentItemInfo(dii, resource, path.OneDriveService, item, size, parentPath)
}
func (h itemRestoreHandler) DeleteItem(

View File

@ -16,12 +16,11 @@ import (
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
@ -49,6 +48,8 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
suite.service = loadTestService(t)
suite.user = tconfig.SecondaryM365UserID(t)
graph.InitializeConcurrencyLimiter(ctx, true, 4)
pager := suite.service.ac.Drives().NewUserDrivePager(suite.user, nil)
odDrives, err := api.GetAllDrives(ctx, pager)
@ -60,83 +61,6 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
suite.userDriveID = ptr.Val(odDrives[0].GetId())
}
// TestItemReader is an integration test that makes a few assumptions
// about the test environment
// 1) It assumes the test user has a drive
// 2) It assumes the drive has a file it can use to test `driveItemReader`
// The test checks these in below
func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var driveItem models.DriveItemable
// This item collector tries to find "a" drive item that is a non-empty
// file to test the reader function
itemCollector := func(
_ context.Context,
_, _ string,
items []models.DriveItemable,
_ map[string]string,
_ map[string]string,
_ map[string]struct{},
_ map[string]map[string]string,
_ bool,
_ *fault.Bus,
) error {
if driveItem != nil {
return nil
}
for _, item := range items {
if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 {
driveItem = item
break
}
}
return nil
}
ip := suite.service.ac.
Drives().
NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault())
_, _, _, err := collectItems(
ctx,
ip,
suite.userDriveID,
"General",
itemCollector,
map[string]string{},
"",
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
// Test Requirement 2: Need a file
require.NotEmpty(
t,
driveItem,
"no file item found for user %s drive %s",
suite.user,
suite.userDriveID)
bh := itemBackupHandler{
suite.service.ac.Drives(),
suite.user,
(&selectors.OneDriveBackup{}).Folders(selectors.Any())[0],
}
// Read data for the file
itemData, err := downloadItem(ctx, bh, driveItem)
require.NoError(t, err, clues.ToCore(err))
size, err := io.Copy(io.Discard, itemData)
require.NoError(t, err, clues.ToCore(err))
require.NotZero(t, size)
}
// TestItemWriter is an integration test for uploading data to OneDrive
// It creates a new folder with a new item and writes data to it
func (suite *ItemIntegrationSuite) TestItemWriter() {
@ -171,7 +95,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
ctx,
test.driveID,
ptr.Val(root.GetId()),
newItem(newFolderName, true),
api.NewDriveItem(newFolderName, true),
control.Copy)
require.NoError(t, err, clues.ToCore(err))
require.NotNil(t, newFolder.GetId())
@ -183,7 +107,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
ctx,
test.driveID,
ptr.Val(newFolder.GetId()),
newItem(newItemName, false),
api.NewDriveItem(newItemName, false),
control.Copy)
require.NoError(t, err, clues.ToCore(err))
require.NotNil(t, newItem.GetId())
@ -317,7 +241,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
{
name: "success",
itemFunc: func() models.DriveItemable {
di := newItem("test", false)
di := api.NewDriveItem("test", false)
di.SetAdditionalData(map[string]any{
"@microsoft.graph.downloadUrl": url,
})
@ -336,7 +260,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
{
name: "success, content url set instead of download url",
itemFunc: func() models.DriveItemable {
di := newItem("test", false)
di := api.NewDriveItem("test", false)
di.SetAdditionalData(map[string]any{
"@content.downloadUrl": url,
})
@ -355,7 +279,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
{
name: "api getter returns error",
itemFunc: func() models.DriveItemable {
di := newItem("test", false)
di := api.NewDriveItem("test", false)
di.SetAdditionalData(map[string]any{
"@microsoft.graph.downloadUrl": url,
})
@ -371,7 +295,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
{
name: "download url is empty",
itemFunc: func() models.DriveItemable {
di := newItem("test", false)
di := api.NewDriveItem("test", false)
return di
},
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
@ -386,7 +310,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
{
name: "malware",
itemFunc: func() models.DriveItemable {
di := newItem("test", false)
di := api.NewDriveItem("test", false)
di.SetAdditionalData(map[string]any{
"@microsoft.graph.downloadUrl": url,
})
@ -408,7 +332,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
{
name: "non-2xx http response",
itemFunc: func() models.DriveItemable {
di := newItem("test", false)
di := api.NewDriveItem("test", false)
di.SetAdditionalData(map[string]any{
"@microsoft.graph.downloadUrl": url,
})
@ -457,7 +381,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead
url = "https://example.com"
itemFunc = func() models.DriveItemable {
di := newItem("test", false)
di := api.NewDriveItem("test", false)
di.SetAdditionalData(map[string]any{
"@microsoft.graph.downloadUrl": url,
})

View File

@ -3,13 +3,12 @@ package drive
import (
"context"
"net/http"
"strings"
"github.com/alcionai/clues"
"github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/idname"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
@ -92,53 +91,14 @@ func (h libraryBackupHandler) NewDrivePager(
return h.ac.NewSiteDrivePager(resourceOwner, fields)
}
func (h libraryBackupHandler) NewItemPager(
driveID, link string,
fields []string,
) api.DeltaPager[models.DriveItemable] {
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
}
func (h libraryBackupHandler) AugmentItemInfo(
dii details.ItemInfo,
resource idname.Provider,
item models.DriveItemable,
size int64,
parentPath *path.Builder,
) details.ItemInfo {
return augmentItemInfo(dii, h.service, item, size, parentPath)
}
// constructWebURL is a helper function for recreating the webURL
// for the originating SharePoint site. Uses the additionalData map
// from a models.DriveItemable that possesses a downloadURL within the map.
// Returns "" if the map is nil or key is not present.
func constructWebURL(adtl map[string]any) string {
var (
desiredKey = "@microsoft.graph.downloadUrl"
sep = `/_layouts`
url string
)
if adtl == nil {
return url
}
r := adtl[desiredKey]
point, ok := r.(*string)
if !ok {
return url
}
value := ptr.Val(point)
if len(value) == 0 {
return url
}
temp := strings.Split(value, sep)
url = temp[0]
return url
return augmentItemInfo(dii, resource, h.service, item, size, parentPath)
}
func (h libraryBackupHandler) FormatDisplayPath(
@ -177,6 +137,14 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
return h.scope.Matches(selectors.SharePointLibraryFolder, dir)
}
func (h libraryBackupHandler) EnumerateDriveItemsDelta(
ctx context.Context,
driveID, prevDeltaLink string,
selectProps []string,
) ([]models.DriveItemable, api.DeltaUpdate, error) {
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink, selectProps)
}
// ---------------------------------------------------------------------------
// Restore
// ---------------------------------------------------------------------------
@ -208,11 +176,12 @@ func (h libraryRestoreHandler) NewDrivePager(
func (h libraryRestoreHandler) AugmentItemInfo(
dii details.ItemInfo,
resource idname.Provider,
item models.DriveItemable,
size int64,
parentPath *path.Builder,
) details.ItemInfo {
return augmentItemInfo(dii, h.service, item, size, parentPath)
return augmentItemInfo(dii, resource, h.service, item, size, parentPath)
}
func (h libraryRestoreHandler) DeleteItem(

View File

@ -271,7 +271,7 @@ func restoreItem(
itemInfo, err := restoreV0File(
ctx,
rh,
rcc.RestoreConfig,
rcc,
drivePath,
fibn,
restoreFolderID,
@ -377,7 +377,7 @@ func restoreItem(
func restoreV0File(
ctx context.Context,
rh RestoreHandler,
restoreCfg control.RestoreConfig,
rcc inject.RestoreConsumerConfig,
drivePath *path.DrivePath,
fibn data.FetchItemByNamer,
restoreFolderID string,
@ -388,7 +388,7 @@ func restoreV0File(
) (details.ItemInfo, error) {
_, itemInfo, err := restoreFile(
ctx,
restoreCfg,
rcc,
rh,
fibn,
itemData.ID(),
@ -423,7 +423,7 @@ func restoreV1File(
itemID, itemInfo, err := restoreFile(
ctx,
rcc.RestoreConfig,
rcc,
rh,
fibn,
trimmedName,
@ -509,7 +509,7 @@ func restoreV6File(
itemID, itemInfo, err := restoreFile(
ctx,
rcc.RestoreConfig,
rcc,
rh,
fibn,
meta.FileName,
@ -671,7 +671,7 @@ func createFolder(
ctx,
driveID,
parentFolderID,
newItem(folderName, true),
api.NewDriveItem(folderName, true),
control.Replace)
// ErrItemAlreadyExistsConflict can only occur for folders if the
@ -692,7 +692,7 @@ func createFolder(
ctx,
driveID,
parentFolderID,
newItem(folderName, true),
api.NewDriveItem(folderName, true),
control.Copy)
if err != nil {
return nil, clues.Wrap(err, "creating folder")
@ -711,7 +711,7 @@ type itemRestorer interface {
// restoreFile will create a new item in the specified `parentFolderID` and upload the data.Item
func restoreFile(
ctx context.Context,
restoreCfg control.RestoreConfig,
rcc inject.RestoreConsumerConfig,
ir itemRestorer,
fibn data.FetchItemByNamer,
name string,
@ -733,7 +733,7 @@ func restoreFile(
}
var (
item = newItem(name, false)
item = api.NewDriveItem(name, false)
collisionKey = api.DriveItemCollisionKey(item)
collision api.DriveItemIDType
shouldDeleteOriginal bool
@ -743,7 +743,7 @@ func restoreFile(
log := logger.Ctx(ctx).With("collision_key", clues.Hide(collisionKey))
log.Debug("item collision")
if restoreCfg.OnCollision == control.Skip {
if rcc.RestoreConfig.OnCollision == control.Skip {
ctr.Inc(count.CollisionSkip)
log.Debug("skipping item with collision")
@ -751,7 +751,7 @@ func restoreFile(
}
collision = dci
shouldDeleteOriginal = restoreCfg.OnCollision == control.Replace && !dci.IsFolder
shouldDeleteOriginal = rcc.RestoreConfig.OnCollision == control.Replace && !dci.IsFolder
}
// drive items do not support PUT requests on the drive item data, so
@ -850,7 +850,12 @@ func restoreFile(
defer closeProgressBar()
dii := ir.AugmentItemInfo(details.ItemInfo{}, newItem, written, nil)
dii := ir.AugmentItemInfo(
details.ItemInfo{},
rcc.ProtectedResource,
newItem,
written,
nil)
if shouldDeleteOriginal {
ctr.Inc(count.CollisionReplace)

View File

@ -47,7 +47,7 @@ type urlCache struct {
refreshMu sync.Mutex
deltaQueryCount int
itemPager api.DeltaPager[models.DriveItemable]
edid EnumerateDriveItemsDeltaer
errs *fault.Bus
}
@ -56,13 +56,10 @@ type urlCache struct {
func newURLCache(
driveID, prevDelta string,
refreshInterval time.Duration,
itemPager api.DeltaPager[models.DriveItemable],
edid EnumerateDriveItemsDeltaer,
errs *fault.Bus,
) (*urlCache, error) {
err := validateCacheParams(
driveID,
refreshInterval,
itemPager)
err := validateCacheParams(driveID, refreshInterval, edid)
if err != nil {
return nil, clues.Wrap(err, "cache params")
}
@ -71,9 +68,9 @@ func newURLCache(
idToProps: make(map[string]itemProps),
lastRefreshTime: time.Time{},
driveID: driveID,
edid: edid,
prevDelta: prevDelta,
refreshInterval: refreshInterval,
itemPager: itemPager,
errs: errs,
},
nil
@ -83,7 +80,7 @@ func newURLCache(
func validateCacheParams(
driveID string,
refreshInterval time.Duration,
itemPager api.DeltaPager[models.DriveItemable],
edid EnumerateDriveItemsDeltaer,
) error {
if len(driveID) == 0 {
return clues.New("drive id is empty")
@ -93,8 +90,8 @@ func validateCacheParams(
return clues.New("invalid refresh interval")
}
if itemPager == nil {
return clues.New("nil item pager")
if edid == nil {
return clues.New("nil item enumerator")
}
return nil
@ -160,44 +157,27 @@ func (uc *urlCache) refreshCache(
// Issue a delta query to graph
logger.Ctx(ctx).Info("refreshing url cache")
err := uc.deltaQuery(ctx)
items, du, err := uc.edid.EnumerateDriveItemsDelta(
ctx,
uc.driveID,
uc.prevDelta,
api.URLCacheDriveItemProps())
if err != nil {
// clear cache
uc.idToProps = make(map[string]itemProps)
return clues.Stack(err)
}
return err
uc.deltaQueryCount++
if err := uc.updateCache(ctx, items, uc.errs); err != nil {
return clues.Stack(err)
}
logger.Ctx(ctx).Info("url cache refreshed")
// Update last refresh time
uc.lastRefreshTime = time.Now()
return nil
}
// deltaQuery performs a delta query on the drive and update the cache
func (uc *urlCache) deltaQuery(
ctx context.Context,
) error {
logger.Ctx(ctx).Debug("starting delta query")
// Reset item pager to remove any previous state
uc.itemPager.Reset(ctx)
_, _, _, err := collectItems(
ctx,
uc.itemPager,
uc.driveID,
"",
uc.updateCache,
map[string]string{},
uc.prevDelta,
uc.errs)
if err != nil {
return clues.Wrap(err, "delta query")
}
uc.deltaQueryCount++
uc.prevDelta = du.URL
return nil
}
@ -224,13 +204,7 @@ func (uc *urlCache) readCache(
// It assumes that cacheMu is held by caller in write mode
func (uc *urlCache) updateCache(
ctx context.Context,
_, _ string,
items []models.DriveItemable,
_ map[string]string,
_ map[string]string,
_ map[string]struct{},
_ map[string]map[string]string,
_ bool,
errs *fault.Bus,
) error {
el := errs.Local()

View File

@ -1,7 +1,6 @@
package drive
import (
"context"
"errors"
"io"
"math/rand"
@ -18,15 +17,19 @@ import (
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/m365/graph"
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/control/testdata"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/services/m365/api"
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
)
// ---------------------------------------------------------------------------
// integration
// ---------------------------------------------------------------------------
type URLCacheIntegrationSuite struct {
tester.Suite
ac api.Client
@ -68,11 +71,10 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
// url cache
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
var (
t = suite.T()
ac = suite.ac.Drives()
driveID = suite.driveID
newFolderName = testdata.DefaultRestoreConfig("folder").Location
driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault())
t = suite.T()
ac = suite.ac.Drives()
driveID = suite.driveID
newFolderName = testdata.DefaultRestoreConfig("folder").Location
)
ctx, flush := tester.NewContext(t)
@ -82,11 +84,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
root, err := ac.GetRootFolder(ctx, driveID)
require.NoError(t, err, clues.ToCore(err))
newFolder, err := ac.Drives().PostItemInContainer(
newFolder, err := ac.PostItemInContainer(
ctx,
driveID,
ptr.Val(root.GetId()),
newItem(newFolderName, true),
api.NewDriveItem(newFolderName, true),
control.Copy)
require.NoError(t, err, clues.ToCore(err))
@ -94,33 +96,14 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
nfid := ptr.Val(newFolder.GetId())
collectorFunc := func(
context.Context,
string,
string,
[]models.DriveItemable,
map[string]string,
map[string]string,
map[string]struct{},
map[string]map[string]string,
bool,
*fault.Bus,
) error {
return nil
}
// Get the previous delta to feed into url cache
prevDelta, _, _, err := collectItems(
_, du, err := ac.EnumerateDriveItemsDelta(
ctx,
suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()),
suite.driveID,
"drive-name",
collectorFunc,
map[string]string{},
"",
fault.New(true))
api.URLCacheDriveItemProps())
require.NoError(t, err, clues.ToCore(err))
require.NotNil(t, prevDelta.URL)
require.NotEmpty(t, du.URL)
// Create a bunch of files in the new folder
var items []models.DriveItemable
@ -128,11 +111,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
for i := 0; i < 5; i++ {
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
item, err := ac.Drives().PostItemInContainer(
item, err := ac.PostItemInContainer(
ctx,
driveID,
nfid,
newItem(newItemName, false),
api.NewDriveItem(newItemName, false),
control.Copy)
require.NoError(t, err, clues.ToCore(err))
@ -142,9 +125,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
// Create a new URL cache with a long TTL
uc, err := newURLCache(
suite.driveID,
prevDelta.URL,
du.URL,
1*time.Hour,
driveItemPager,
suite.ac.Drives(),
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
@ -195,6 +178,10 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
require.Equal(t, 1, uc.deltaQueryCount)
}
// ---------------------------------------------------------------------------
// unit
// ---------------------------------------------------------------------------
type URLCacheUnitSuite struct {
tester.Suite
}
@ -205,27 +192,20 @@ func TestURLCacheUnitSuite(t *testing.T) {
func (suite *URLCacheUnitSuite) TestGetItemProperties() {
deltaString := "delta"
next := "next"
driveID := "drive1"
table := []struct {
name string
pagerResult map[string][]apiMock.PagerResult[models.DriveItemable]
pagerItems map[string][]models.DriveItemable
pagerErr map[string]error
expectedItemProps map[string]itemProps
expectedErr require.ErrorAssertionFunc
cacheAssert func(*urlCache, time.Time)
}{
{
name: "single item in cache",
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID: {
{
Values: []models.DriveItemable{
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
},
DeltaLink: &deltaString,
},
},
pagerItems: map[string][]models.DriveItemable{
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
},
expectedItemProps: map[string]itemProps{
"1": {
@ -242,18 +222,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
},
{
name: "multiple items in cache",
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
pagerItems: map[string][]models.DriveItemable{
driveID: {
{
Values: []models.DriveItemable{
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
},
DeltaLink: &deltaString,
},
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
},
},
expectedItemProps: map[string]itemProps{
@ -287,18 +262,13 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
},
{
name: "duplicate items with potentially new urls",
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
pagerItems: map[string][]models.DriveItemable{
driveID: {
{
Values: []models.DriveItemable{
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
fileItem("1", "file1", "root", "root", "https://test1.com", false),
fileItem("2", "file2", "root", "root", "https://test2.com", false),
},
DeltaLink: &deltaString,
},
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
fileItem("1", "file1", "root", "root", "https://test1.com", false),
fileItem("2", "file2", "root", "root", "https://test2.com", false),
},
},
expectedItemProps: map[string]itemProps{
@ -324,16 +294,11 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
},
{
name: "deleted items",
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
pagerItems: map[string][]models.DriveItemable{
driveID: {
{
Values: []models.DriveItemable{
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
},
DeltaLink: &deltaString,
},
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
},
},
expectedItemProps: map[string]itemProps{
@ -355,15 +320,8 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
},
{
name: "item not found in cache",
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID: {
{
Values: []models.DriveItemable{
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
},
DeltaLink: &deltaString,
},
},
pagerItems: map[string][]models.DriveItemable{
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
},
expectedItemProps: map[string]itemProps{
"2": {},
@ -376,23 +334,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
},
},
{
name: "multi-page delta query error",
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
driveID: {
{
Values: []models.DriveItemable{
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
},
NextLink: &next,
},
{
Values: []models.DriveItemable{
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
},
DeltaLink: &deltaString,
Err: errors.New("delta query error"),
},
},
name: "delta query error",
pagerItems: map[string][]models.DriveItemable{},
pagerErr: map[string]error{
driveID: errors.New("delta query error"),
},
expectedItemProps: map[string]itemProps{
"1": {},
@ -408,15 +353,10 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
{
name: "folder item",
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
pagerItems: map[string][]models.DriveItemable{
driveID: {
{
Values: []models.DriveItemable{
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
driveItem("2", "folder2", "root", "root", false, true, false),
},
DeltaLink: &deltaString,
},
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
driveItem("2", "folder2", "root", "root", false, true, false),
},
},
expectedItemProps: map[string]itemProps{
@ -437,15 +377,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
ctx, flush := tester.NewContext(t)
defer flush()
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
ToReturn: test.pagerResult[driveID],
medi := mock.EnumeratesDriveItemsDelta{
Items: test.pagerItems,
Err: test.pagerErr,
DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}},
}
cache, err := newURLCache(
driveID,
"",
1*time.Hour,
itemPager,
&medi,
fault.New(true))
require.NoError(suite.T(), err, clues.ToCore(err))
@ -480,15 +422,17 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
// Test needsRefresh
func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
driveID := "drive1"
t := suite.T()
refreshInterval := 1 * time.Second
var (
t = suite.T()
driveID = "drive1"
refreshInterval = 1 * time.Second
)
cache, err := newURLCache(
driveID,
"",
refreshInterval,
&apiMock.DeltaPager[models.DriveItemable]{},
&mock.EnumeratesDriveItemsDelta{},
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
@ -510,14 +454,12 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
require.False(t, cache.needsRefresh())
}
// Test newURLCache
func (suite *URLCacheUnitSuite) TestNewURLCache() {
// table driven tests
table := []struct {
name string
driveID string
refreshInt time.Duration
itemPager api.DeltaPager[models.DriveItemable]
itemPager EnumerateDriveItemsDeltaer
errors *fault.Bus
expectedErr require.ErrorAssertionFunc
}{
@ -525,7 +467,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
name: "invalid driveID",
driveID: "",
refreshInt: 1 * time.Hour,
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
itemPager: &mock.EnumeratesDriveItemsDelta{},
errors: fault.New(true),
expectedErr: require.Error,
},
@ -533,12 +475,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
name: "invalid refresh interval",
driveID: "drive1",
refreshInt: 100 * time.Millisecond,
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
itemPager: &mock.EnumeratesDriveItemsDelta{},
errors: fault.New(true),
expectedErr: require.Error,
},
{
name: "invalid itemPager",
name: "invalid item enumerator",
driveID: "drive1",
refreshInt: 1 * time.Hour,
itemPager: nil,
@ -549,7 +491,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
name: "valid",
driveID: "drive1",
refreshInt: 1 * time.Hour,
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
itemPager: &mock.EnumeratesDriveItemsDelta{},
errors: fault.New(true),
expectedErr: require.NoError,
},

View File

@ -160,7 +160,7 @@ func populateCollections(
ictx = clues.Add(ictx, "previous_path", prevPath)
added, _, removed, newDelta, err := bh.itemEnumerator().
added, validModTimes, removed, newDelta, err := bh.itemEnumerator().
GetAddedAndRemovedItemIDs(
ictx,
qp.ProtectedResource.ID(),
@ -199,9 +199,7 @@ func populateCollections(
bh.itemHandler(),
added,
removed,
// TODO(ashmrtn): Set to value returned by pager when we have deletion
// markers in files.
false,
validModTimes,
statusUpdater)
collections[cID] = edc

View File

@ -278,7 +278,7 @@ func (col *prefetchCollection) streamItems(
return
}
item, err := data.NewPrefetchedItem(
item, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(itemData)),
id,
details.ItemInfo{Exchange: info})
@ -403,7 +403,7 @@ func (col *lazyFetchCollection) streamItems(
"service", path.ExchangeService.String(),
"category", col.Category().String())
stream <- data.NewLazyItem(
stream <- data.NewLazyItemWithInfo(
ictx,
&lazyItemGetter{
userID: user,

View File

@ -56,7 +56,7 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
suite.Run(test.name, func() {
t := suite.T()
ed, err := data.NewPrefetchedItem(
ed, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(test.readData)),
"itemID",
details.ItemInfo{})
@ -494,7 +494,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() {
ctx, flush := tester.NewContext(t)
defer flush()
li := data.NewLazyItem(
li := data.NewLazyItemWithInfo(
ctx,
nil,
"itemID",
@ -552,7 +552,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() {
SerializeErr: test.serializeErr,
}
li := data.NewLazyItem(
li := data.NewLazyItemWithInfo(
ctx,
&lazyItemGetter{
userID: "userID",
@ -592,7 +592,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlig
getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight}
li := data.NewLazyItem(
li := data.NewLazyItemWithInfo(
ctx,
&lazyItemGetter{
userID: "userID",
@ -645,7 +645,7 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
getter := &mock.ItemGetSerialize{GetData: testData}
li := data.NewLazyItem(
li := data.NewLazyItemWithInfo(
ctx,
&lazyItemGetter{
userID: "userID",

View File

@ -2,7 +2,6 @@ package groups
import (
"context"
"fmt"
"testing"
"time"
@ -527,8 +526,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
require.NotEmpty(t, c.FullPath().Folder(false))
fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false))
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
// interface.
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
@ -537,8 +534,6 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
loc := c.(data.LocationPather).LocationPath().String()
fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String())
require.NotEmpty(t, loc)
delete(test.channelNames, loc)

View File

@ -176,7 +176,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
info.ParentPath = col.LocationPath().String()
storeItem, err := data.NewPrefetchedItem(
storeItem, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(itemData)),
id,
details.ItemInfo{Groups: info})

View File

@ -49,7 +49,7 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
suite.Run(test.name, func() {
t := suite.T()
ed, err := data.NewPrefetchedItem(
ed, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(test.readData)),
"itemID",
details.ItemInfo{})

View File

@ -15,6 +15,7 @@ import (
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
@ -23,6 +24,7 @@ func NewExportCollection(
backingCollections []data.RestoreCollection,
backupVersion int,
cec control.ExportConfig,
stats *data.ExportStats,
) export.Collectioner {
return export.BaseCollection{
BaseDir: baseDir,
@ -30,6 +32,7 @@ func NewExportCollection(
BackupVersion: backupVersion,
Cfg: cec,
Stream: streamItems,
Stats: stats,
}
}
@ -40,6 +43,7 @@ func streamItems(
backupVersion int,
cec control.ExportConfig,
ch chan<- export.Item,
stats *data.ExportStats,
) {
defer close(ch)
@ -54,6 +58,9 @@ func streamItems(
Error: err,
}
} else {
stats.UpdateResourceCount(path.ChannelMessagesCategory)
body = data.ReaderWithStats(body, path.ChannelMessagesCategory, stats)
ch <- export.Item{
ID: item.ID(),
// channel message items have no name

View File

@ -90,7 +90,8 @@ func (suite *ExportUnitSuite) TestStreamItems() {
[]data.RestoreCollection{test.backingColl},
version.NoBackup,
control.DefaultExportConfig(),
ch)
ch,
&data.ExportStats{})
var (
itm export.Item

View File

@ -38,7 +38,7 @@ func CollectLibraries(
colls = drive.NewCollections(
bh,
tenantID,
bpc.ProtectedResource.ID(),
bpc.ProtectedResource,
su,
bpc.Options)
)

View File

@ -212,7 +212,7 @@ func (sc *Collection) retrieveLists(
metrics.Successes++
item, err := data.NewPrefetchedItem(
item, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(byteArray)),
ptr.Val(lst.GetId()),
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
@ -279,7 +279,7 @@ func (sc *Collection) retrievePages(
metrics.Bytes += size
metrics.Successes++
item, err := data.NewPrefetchedItem(
item, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(byteArray)),
ptr.Val(pg.GetId()),
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})

View File

@ -103,7 +103,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
byteArray, err := ow.GetSerializedContent()
require.NoError(t, err, clues.ToCore(err))
data, err := data.NewPrefetchedItem(
data, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(byteArray)),
name,
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
@ -133,7 +133,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
page, err := betaAPI.CreatePageFromBytes(byteArray)
require.NoError(t, err, clues.ToCore(err))
data, err := data.NewPrefetchedItem(
data, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(byteArray)),
itemName,
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
@ -196,7 +196,7 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
byteArray, err := service.Serialize(listing)
require.NoError(t, err, clues.ToCore(err))
listData, err := data.NewPrefetchedItem(
listData, err := data.NewPrefetchedItemWithInfo(
io.NopCloser(bytes.NewReader(byteArray)),
testName,
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})

View File

@ -36,7 +36,7 @@ type Controller struct {
tenant string
credentials account.M365Config
ownerLookup getOwnerIDAndNamer
ownerLookup idname.GetResourceIDAndNamer
// maps of resource owner ids to names, and names to ids.
// not guaranteed to be populated, only here as a post-population
// reference for processes that choose to populate the values.
@ -229,38 +229,24 @@ type getIDAndNamer interface {
)
}
var _ getOwnerIDAndNamer = &resourceClient{}
var _ idname.GetResourceIDAndNamer = &resourceClient{}
type getOwnerIDAndNamer interface {
getOwnerIDAndNameFrom(
ctx context.Context,
discovery api.Client,
owner string,
ins idname.Cacher,
) (
ownerID string,
ownerName string,
err error,
)
}
// getOwnerIDAndNameFrom looks up the owner's canonical id and display name.
// If the owner is present in the idNameSwapper, then that interface's id and
// GetResourceIDAndNameFrom looks up the resource's canonical id and display name.
// If the resource is present in the idNameSwapper, then that interface's id and
// name values are returned. As a fallback, the resource calls the discovery
// api to fetch the user or site using the owner value. This fallback assumes
// that the owner is a well formed ID or display name of appropriate design
// api to fetch the user or site using the resource value. This fallback assumes
// that the resource is a well formed ID or display name of appropriate design
// (PrincipalName for users, WebURL for sites).
func (r resourceClient) getOwnerIDAndNameFrom(
func (r resourceClient) GetResourceIDAndNameFrom(
ctx context.Context,
discovery api.Client,
owner string,
ins idname.Cacher,
) (string, string, error) {
) (idname.Provider, error) {
if ins != nil {
if n, ok := ins.NameOf(owner); ok {
return owner, n, nil
return idname.NewProvider(owner, n), nil
} else if i, ok := ins.IDOf(owner); ok {
return i, owner, nil
return idname.NewProvider(i, owner), nil
}
}
@ -274,17 +260,21 @@ func (r resourceClient) getOwnerIDAndNameFrom(
id, name, err = r.getter.GetIDAndName(ctx, owner, api.CallConfig{})
if err != nil {
if graph.IsErrUserNotFound(err) {
return "", "", clues.Stack(graph.ErrResourceOwnerNotFound, err)
return nil, clues.Stack(graph.ErrResourceOwnerNotFound, err)
}
return "", "", err
if graph.IsErrResourceLocked(err) {
return nil, clues.Stack(graph.ErrResourceLocked, err)
}
return nil, err
}
if len(id) == 0 || len(name) == 0 {
return "", "", clues.Stack(graph.ErrResourceOwnerNotFound)
return nil, clues.Stack(graph.ErrResourceOwnerNotFound)
}
return id, name, nil
return idname.NewProvider(id, name), nil
}
// PopulateProtectedResourceIDAndName takes the provided owner identifier and produces
@ -297,15 +287,15 @@ func (r resourceClient) getOwnerIDAndNameFrom(
// data gets stored inside the controller instance for later re-use.
func (ctrl *Controller) PopulateProtectedResourceIDAndName(
ctx context.Context,
owner string, // input value, can be either id or name
resourceID string, // input value, can be either id or name
ins idname.Cacher,
) (string, string, error) {
id, name, err := ctrl.ownerLookup.getOwnerIDAndNameFrom(ctx, ctrl.AC, owner, ins)
) (idname.Provider, error) {
pr, err := ctrl.ownerLookup.GetResourceIDAndNameFrom(ctx, resourceID, ins)
if err != nil {
return "", "", clues.Wrap(err, "identifying resource owner")
return nil, clues.Wrap(err, "identifying resource owner")
}
ctrl.IDNameLookup = idname.NewCache(map[string]string{id: name})
ctrl.IDNameLookup = idname.NewCache(map[string]string{pr.ID(): pr.Name()})
return id, name, nil
return pr, nil
}

View File

@ -65,114 +65,126 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
)
table := []struct {
name string
owner string
ins inMock.Cache
rc *resourceClient
expectID string
expectName string
expectErr require.ErrorAssertionFunc
name string
protectedResource string
ins inMock.Cache
rc *resourceClient
expectID string
expectName string
expectErr require.ErrorAssertionFunc
expectNil require.ValueAssertionFunc
}{
{
name: "nil ins",
owner: id,
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "nil ins",
protectedResource: id,
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "nil ins no lookup",
owner: id,
rc: noLookup,
expectID: "",
expectName: "",
expectErr: require.Error,
name: "nil ins no lookup",
protectedResource: id,
rc: noLookup,
expectID: "",
expectName: "",
expectErr: require.Error,
expectNil: require.Nil,
},
{
name: "only id map with owner id",
owner: id,
ins: inMock.NewCache(itn, nil),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "only id map with owner id",
protectedResource: id,
ins: inMock.NewCache(itn, nil),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "only name map with owner id",
owner: id,
ins: inMock.NewCache(nil, nti),
rc: noLookup,
expectID: "",
expectName: "",
expectErr: require.Error,
name: "only name map with owner id",
protectedResource: id,
ins: inMock.NewCache(nil, nti),
rc: noLookup,
expectID: "",
expectName: "",
expectErr: require.Error,
expectNil: require.Nil,
},
{
name: "only name map with owner id and lookup",
owner: id,
ins: inMock.NewCache(nil, nti),
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "only name map with owner id and lookup",
protectedResource: id,
ins: inMock.NewCache(nil, nti),
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "only id map with owner name",
owner: name,
ins: inMock.NewCache(itn, nil),
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "only id map with owner name",
protectedResource: name,
ins: inMock.NewCache(itn, nil),
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "only name map with owner name",
owner: name,
ins: inMock.NewCache(nil, nti),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "only name map with owner name",
protectedResource: name,
ins: inMock.NewCache(nil, nti),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "only id map with owner name",
owner: name,
ins: inMock.NewCache(itn, nil),
rc: noLookup,
expectID: "",
expectName: "",
expectErr: require.Error,
name: "only id map with owner name",
protectedResource: name,
ins: inMock.NewCache(itn, nil),
rc: noLookup,
expectID: "",
expectName: "",
expectErr: require.Error,
expectNil: require.Nil,
},
{
name: "only id map with owner name and lookup",
owner: name,
ins: inMock.NewCache(itn, nil),
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "only id map with owner name and lookup",
protectedResource: name,
ins: inMock.NewCache(itn, nil),
rc: lookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "both maps with owner id",
owner: id,
ins: inMock.NewCache(itn, nti),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "both maps with owner id",
protectedResource: id,
ins: inMock.NewCache(itn, nti),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "both maps with owner name",
owner: name,
ins: inMock.NewCache(itn, nti),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
name: "both maps with owner name",
protectedResource: name,
ins: inMock.NewCache(itn, nti),
rc: noLookup,
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "non-matching maps with owner id",
owner: id,
name: "non-matching maps with owner id",
protectedResource: id,
ins: inMock.NewCache(
map[string]string{"foo": "bar"},
map[string]string{"fnords": "smarf"}),
@ -180,10 +192,11 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
expectID: "",
expectName: "",
expectErr: require.Error,
expectNil: require.Nil,
},
{
name: "non-matching with owner name",
owner: name,
name: "non-matching with owner name",
protectedResource: name,
ins: inMock.NewCache(
map[string]string{"foo": "bar"},
map[string]string{"fnords": "smarf"}),
@ -191,10 +204,11 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
expectID: "",
expectName: "",
expectErr: require.Error,
expectNil: require.Nil,
},
{
name: "non-matching maps with owner id and lookup",
owner: id,
name: "non-matching maps with owner id and lookup",
protectedResource: id,
ins: inMock.NewCache(
map[string]string{"foo": "bar"},
map[string]string{"fnords": "smarf"}),
@ -202,10 +216,11 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
{
name: "non-matching with owner name and lookup",
owner: name,
name: "non-matching with owner name and lookup",
protectedResource: name,
ins: inMock.NewCache(
map[string]string{"foo": "bar"},
map[string]string{"fnords": "smarf"}),
@ -213,6 +228,7 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
expectID: id,
expectName: name,
expectErr: require.NoError,
expectNil: require.NotNil,
},
}
for _, test := range table {
@ -224,10 +240,16 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
ctrl := &Controller{ownerLookup: test.rc}
rID, rName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, test.owner, test.ins)
resource, err := ctrl.PopulateProtectedResourceIDAndName(ctx, test.protectedResource, test.ins)
test.expectErr(t, err, clues.ToCore(err))
assert.Equal(t, test.expectID, rID, "id")
assert.Equal(t, test.expectName, rName, "name")
test.expectNil(t, resource)
if err != nil {
return
}
assert.Equal(t, test.expectID, resource.ID(), "id")
assert.Equal(t, test.expectName, resource.Name(), "name")
})
}
}
@ -1362,15 +1384,15 @@ func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
start = time.Now()
)
id, name, err := backupCtrl.PopulateProtectedResourceIDAndName(ctx, backupSel.DiscreteOwner, nil)
resource, err := backupCtrl.PopulateProtectedResourceIDAndName(ctx, backupSel.DiscreteOwner, nil)
require.NoError(t, err, clues.ToCore(err))
backupSel.SetDiscreteOwnerIDName(id, name)
backupSel.SetDiscreteOwnerIDName(resource.ID(), resource.Name())
bpc := inject.BackupProducerConfig{
LastBackupVersion: version.NoBackup,
Options: control.DefaultOptions(),
ProtectedResource: inMock.NewProvider(id, name),
ProtectedResource: resource,
Selector: backupSel,
}

View File

@ -27,6 +27,7 @@ func (ctrl *Controller) ProduceExportCollections(
exportCfg control.ExportConfig,
opts control.Options,
dcs []data.RestoreCollection,
stats *data.ExportStats,
errs *fault.Bus,
) ([]export.Collectioner, error) {
ctx, end := diagnostics.Span(ctx, "m365:export")
@ -51,6 +52,7 @@ func (ctrl *Controller) ProduceExportCollections(
opts,
dcs,
deets,
stats,
errs)
case selectors.ServiceSharePoint:
expCollections, err = sharepoint.ProduceExportCollections(
@ -61,6 +63,7 @@ func (ctrl *Controller) ProduceExportCollections(
dcs,
ctrl.backupDriveIDNames,
deets,
stats,
errs)
case selectors.ServiceGroups:
expCollections, err = groups.ProduceExportCollections(
@ -72,6 +75,7 @@ func (ctrl *Controller) ProduceExportCollections(
ctrl.backupDriveIDNames,
ctrl.backupSiteIDWebURL,
deets,
stats,
errs)
default:

View File

@ -149,11 +149,12 @@ const limiterConsumptionCtxKey limiterConsumptionKey = "corsoGraphRateLimiterCon
const (
// https://learn.microsoft.com/en-us/sharepoint/dev/general-development
// /how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#application-throttling
defaultLC = 1
driveDefaultLC = 2
defaultLC = 1
// limit consumption rate for single-item GETs requests,
// or delta-based multi-item GETs.
// or delta-based multi-item GETs, or item content download requests.
SingleGetOrDeltaLC = 1
// delta queries without a delta token cost 2 units
DeltaNoTokenLC = 2
// limit consumption rate for anything permissions related
PermissionsLC = 5
)
@ -185,13 +186,7 @@ func ctxLimiterConsumption(ctx context.Context, defaultConsumption int) int {
// the next token set is available.
func QueueRequest(ctx context.Context) {
limiter := ctxLimiter(ctx)
defaultConsumed := defaultLC
if limiter == driveLimiter {
defaultConsumed = driveDefaultLC
}
consume := ctxLimiterConsumption(ctx, defaultConsumed)
consume := ctxLimiterConsumption(ctx, defaultLC)
if err := limiter.WaitN(ctx, consume); err != nil {
logger.CtxErr(ctx, err).Error("graph middleware waiting on the limiter")

View File

@ -15,6 +15,7 @@ import (
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/common/ptr"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/filters"
)
@ -50,6 +51,7 @@ const (
// nameAlreadyExists occurs when a request with
// @microsoft.graph.conflictBehavior=fail finds a conflicting file.
nameAlreadyExists errorCode = "nameAlreadyExists"
NotAllowed errorCode = "notAllowed"
noResolvedUsers errorCode = "noResolvedUsers"
QuotaExceeded errorCode = "ErrorQuotaExceeded"
RequestResourceNotFound errorCode = "Request_ResourceNotFound"
@ -61,6 +63,11 @@ const (
syncStateNotFound errorCode = "SyncStateNotFound"
)
// inner error codes
const (
ResourceLocked errorCode = "resourceLocked"
)
type errorMessage string
const (
@ -113,6 +120,11 @@ var (
// replies, no error should get returned.
ErrMultipleResultsMatchIdentifier = clues.New("multiple results match the identifier")
// ErrResourceLocked occurs when a resource has had its access locked.
// Example case: https://learn.microsoft.com/en-us/sharepoint/manage-lock-status
// This makes the resource inaccessible for any Corso operations.
ErrResourceLocked = clues.New("resource has been locked and must be unlocked by an administrator")
// ErrServiceNotEnabled identifies that a resource owner does not have
// access to a given service.
ErrServiceNotEnabled = clues.New("service is not enabled for that resource owner")
@ -124,6 +136,8 @@ var (
ErrTimeout = clues.New("communication timeout")
ErrResourceOwnerNotFound = clues.New("resource owner not found in tenant")
ErrTokenExpired = clues.New("jwt token expired")
)
func IsErrApplicationThrottled(err error) bool {
@ -224,7 +238,8 @@ func IsErrUnauthorized(err error) bool {
// TODO: refine this investigation. We don't currently know if
// a specific item download url expired, or if the full connection
// auth expired.
return clues.HasLabel(err, LabelStatus(http.StatusUnauthorized))
return clues.HasLabel(err, LabelStatus(http.StatusUnauthorized)) ||
errors.Is(err, ErrTokenExpired)
}
func IsErrItemAlreadyExistsConflict(err error) bool {
@ -264,6 +279,12 @@ func IsErrSiteNotFound(err error) bool {
return hasErrorMessage(err, requestedSiteCouldNotBeFound)
}
func IsErrResourceLocked(err error) bool {
return errors.Is(err, ErrResourceLocked) ||
hasInnerErrorCode(err, ResourceLocked) ||
hasErrorCode(err, NotAllowed)
}
// ---------------------------------------------------------------------------
// error parsers
// ---------------------------------------------------------------------------
@ -291,6 +312,34 @@ func hasErrorCode(err error, codes ...errorCode) bool {
return filters.Equal(cs).Compare(code)
}
func hasInnerErrorCode(err error, codes ...errorCode) bool {
if err == nil {
return false
}
var oDataError odataerrors.ODataErrorable
if !errors.As(err, &oDataError) {
return false
}
inner := oDataError.GetErrorEscaped().GetInnerError()
if inner == nil {
return false
}
code, err := str.AnyValueToString("code", inner.GetAdditionalData())
if err != nil {
return false
}
cs := make([]string, len(codes))
for i, c := range codes {
cs[i] = string(c)
}
return filters.Equal(cs).Compare(code)
}
// only use this as a last resort. Prefer the code or statuscode if possible.
func hasErrorMessage(err error, msgs ...errorMessage) bool {
if err == nil {

View File

@ -478,11 +478,16 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUnauthorized() {
expect: assert.False,
},
{
name: "as",
name: "graph 401",
err: clues.Stack(assert.AnError).
Label(LabelStatus(http.StatusUnauthorized)),
expect: assert.True,
},
{
name: "token expired",
err: clues.Stack(assert.AnError, ErrTokenExpired),
expect: assert.True,
},
}
for _, test := range table {
suite.Run(test.name, func() {
@ -808,3 +813,57 @@ func (suite *GraphErrorsUnitSuite) TestIsErrItemNotFound() {
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrResourceLocked() {
innerMatch := odErr("not-match")
merr := odataerrors.NewMainError()
inerr := odataerrors.NewInnerError()
inerr.SetAdditionalData(map[string]any{
"code": string(ResourceLocked),
})
merr.SetInnerError(inerr)
merr.SetCode(ptr.To("not-match"))
innerMatch.SetErrorEscaped(merr)
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "non-matching oDataErr",
err: odErrMsg("InvalidRequest", "resource is locked"),
expect: assert.False,
},
{
name: "matching oDataErr code",
err: odErr(string(NotAllowed)),
expect: assert.True,
},
{
name: "matching oDataErr inner code",
err: innerMatch,
expect: assert.True,
},
{
name: "matching err sentinel",
err: ErrResourceLocked,
expect: assert.True,
},
}
for _, test := range table {
suite.Run(test.name, func() {
test.expect(suite.T(), IsErrResourceLocked(test.err))
})
}
}

View File

@ -82,7 +82,7 @@ func (hw httpWrapper) Request(
body io.Reader,
headers map[string]string,
) (*http.Response, error) {
req, err := http.NewRequest(method, url, body)
req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil {
return nil, clues.Wrap(err, "new http request")
}

View File

@ -57,7 +57,7 @@ func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) {
return metadataItem{}, clues.Wrap(err, "serializing metadata")
}
item, err := data.NewUnindexedPrefetchedItem(
item, err := data.NewPrefetchedItem(
io.NopCloser(buf),
mce.fileName,
time.Now())

View File

@ -70,7 +70,7 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
items := []metadataItem{}
for i := 0; i < len(itemNames); i++ {
item, err := data.NewUnindexedPrefetchedItem(
item, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(itemData[i])),
itemNames[i],
time.Time{})

View File

@ -304,21 +304,21 @@ func checkContact(
// assert.Equal(t, expected.GetBusinessPhones(), got.GetBusinessPhones())
// TODO(ashmrtn): Remove this when we properly set and handle categories in
// addition to folders for contacts.
folders := colPath.Folder(false)
gotCategories := []string{}
// addition to folders for contacts. See #2785 and #3550.
// folders := colPath.Folder(false)
// gotCategories := []string{}
for _, cat := range got.GetCategories() {
// Don't add a category for the current folder since we didn't create the
// item with it and it throws off our comparisons.
if cat == folders {
continue
}
// for _, cat := range got.GetCategories() {
// // Don't add a category for the current folder since we didn't create the
// // item with it and it throws off our comparisons.
// if cat == folders {
// continue
// }
gotCategories = append(gotCategories, cat)
}
// gotCategories = append(gotCategories, cat)
// }
assert.ElementsMatch(t, expected.GetCategories(), gotCategories, "Categories")
// assert.ElementsMatch(t, expected.GetCategories(), gotCategories, "Categories")
// Skip ChangeKey as it's tied to this specific instance of the item.

View File

@ -90,6 +90,7 @@ func (ctrl Controller) ProduceExportCollections(
_ control.ExportConfig,
_ control.Options,
_ []data.RestoreCollection,
_ *data.ExportStats,
_ *fault.Bus,
) ([]export.Collectioner, error) {
return nil, ctrl.Err
@ -99,8 +100,7 @@ func (ctrl Controller) PopulateProtectedResourceIDAndName(
ctx context.Context,
protectedResource string, // input value, can be either id or name
ins idname.Cacher,
) (string, string, error) {
return ctrl.ProtectedResourceID,
ctrl.ProtectedResourceName,
) (idname.Provider, error) {
return idname.NewProvider(ctrl.ProtectedResourceID, ctrl.ProtectedResourceName),
ctrl.ProtectedResourceErr
}

View File

@ -93,7 +93,7 @@ func ProduceBackupCollections(
}
for _, s := range sites {
pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetName()))
pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetWebUrl()))
sbpc := inject.BackupProducerConfig{
LastBackupVersion: bpc.LastBackupVersion,
Options: bpc.Options,

View File

@ -29,6 +29,7 @@ func ProduceExportCollections(
backupDriveIDNames idname.Cacher,
backupSiteIDWebURL idname.Cacher,
deets *details.Builder,
stats *data.ExportStats,
errs *fault.Bus,
) ([]export.Collectioner, error) {
var (
@ -52,7 +53,8 @@ func ProduceExportCollections(
path.Builder{}.Append(folders...).String(),
[]data.RestoreCollection{restoreColl},
backupVersion,
exportCfg)
exportCfg,
stats)
case path.LibrariesCategory:
drivePath, err := path.ToDrivePath(restoreColl.FullPath())
if err != nil {
@ -91,7 +93,8 @@ func ProduceExportCollections(
coll = drive.NewExportCollection(
baseDir.String(),
[]data.RestoreCollection{restoreColl},
backupVersion)
backupVersion,
stats)
default:
el.AddRecoverable(
ctx,

View File

@ -7,6 +7,7 @@ import (
"strings"
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
@ -64,8 +65,8 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_messages() {
itemID = "itemID"
containerName = "channelID"
dii = groupMock.ItemInfo()
body = io.NopCloser(bytes.NewBufferString(
`{"displayname": "` + dii.Groups.ItemName + `"}`))
content = `{"displayname": "` + dii.Groups.ItemName + `"}`
body = io.NopCloser(bytes.NewBufferString(content))
exportCfg = control.ExportConfig{}
expectedPath = path.ChannelMessagesCategory.HumanString() + "/" + containerName
expectedItems = []export.Item{
@ -96,6 +97,8 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_messages() {
},
}
stats := data.ExportStats{}
ecs, err := ProduceExportCollections(
ctx,
int(version.Backup),
@ -105,6 +108,7 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_messages() {
nil,
nil,
nil,
&stats,
fault.New(true))
assert.NoError(t, err, "export collections error")
assert.Len(t, ecs, 1, "num of collections")
@ -113,7 +117,15 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_messages() {
fitems := []export.Item{}
size := 0
for item := range ecs[0].Items(ctx) {
b, err := io.ReadAll(item.Body)
assert.NoError(t, err, clues.ToCore(err))
// count up size for tests
size += len(b)
// have to nil out body, otherwise assert fails due to
// pointer memory location differences
item.Body = nil
@ -121,6 +133,11 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_messages() {
}
assert.Equal(t, expectedItems, fitems, "items")
expectedStats := data.ExportStats{}
expectedStats.UpdateBytes(path.ChannelMessagesCategory, int64(size))
expectedStats.UpdateResourceCount(path.ChannelMessagesCategory)
assert.Equal(t, expectedStats, stats, "stats")
}
func (suite *ExportUnitSuite) TestExportRestoreCollections_libraries() {
@ -182,6 +199,8 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_libraries() {
},
}
stats := data.ExportStats{}
ecs, err := ProduceExportCollections(
ctx,
int(version.Backup),
@ -191,6 +210,7 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_libraries() {
driveNameCache,
siteWebURLCache,
nil,
&stats,
fault.New(true))
assert.NoError(t, err, "export collections error")
assert.Len(t, ecs, 1, "num of collections")
@ -199,9 +219,24 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections_libraries() {
fitems := []export.Item{}
size := 0
for item := range ecs[0].Items(ctx) {
// unwrap the body from stats reader
b, err := io.ReadAll(item.Body)
assert.NoError(t, err, clues.ToCore(err))
size += len(b)
bitem := io.NopCloser(bytes.NewBuffer(b))
item.Body = bitem
fitems = append(fitems, item)
}
assert.Equal(t, expectedItems, fitems, "items")
expectedStats := data.ExportStats{}
expectedStats.UpdateBytes(path.FilesCategory, int64(size))
expectedStats.UpdateResourceCount(path.FilesCategory)
assert.Equal(t, expectedStats, stats, "stats")
}

View File

@ -51,7 +51,7 @@ func ProduceBackupCollections(
nc := drive.NewCollections(
drive.NewItemBackupHandler(ac.Drives(), bpc.ProtectedResource.ID(), scope),
tenant,
bpc.ProtectedResource.ID(),
bpc.ProtectedResource,
su,
bpc.Options)

View File

@ -30,6 +30,10 @@ func IsServiceEnabled(
return false, clues.Stack(graph.ErrResourceOwnerNotFound, err)
}
if graph.IsErrResourceLocked(err) {
return false, clues.Stack(graph.ErrResourceLocked, err)
}
return false, clues.Stack(err)
}

View File

@ -105,6 +105,17 @@ func (suite *EnabledUnitSuite) TestIsServiceEnabled() {
assert.Error(t, err, clues.ToCore(err))
},
},
{
name: "resource locked",
mock: func(ctx context.Context) getDefaultDriver {
odErr := odErrMsg(string(graph.NotAllowed), "resource")
return mockDGDD{nil, graph.Stack(ctx, odErr)}
},
expect: assert.False,
expectErr: func(t *testing.T, err error) {
assert.Error(t, err, clues.ToCore(err))
},
},
{
name: "arbitrary error",
mock: func(ctx context.Context) getDefaultDriver {

View File

@ -23,6 +23,7 @@ func ProduceExportCollections(
opts control.Options,
dcs []data.RestoreCollection,
deets *details.Builder,
stats *data.ExportStats,
errs *fault.Bus,
) ([]export.Collectioner, error) {
var (
@ -43,7 +44,8 @@ func ProduceExportCollections(
drive.NewExportCollection(
baseDir.String(),
[]data.RestoreCollection{dc},
backupVersion))
backupVersion,
stats))
}
return ec, el.Failure()

View File

@ -6,6 +6,7 @@ import (
"io"
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
@ -19,6 +20,7 @@ import (
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
)
type ExportUnitSuite struct {
@ -245,15 +247,32 @@ func (suite *ExportUnitSuite) TestGetItems() {
ctx, flush := tester.NewContext(t)
defer flush()
stats := data.ExportStats{}
ec := drive.NewExportCollection(
"",
[]data.RestoreCollection{test.backingCollection},
test.version)
test.version,
&stats)
items := ec.Items(ctx)
count := 0
size := 0
fitems := []export.Item{}
for item := range items {
if item.Error == nil {
count++
}
if item.Body != nil {
b, err := io.ReadAll(item.Body)
assert.NoError(t, err, clues.ToCore(err))
size += len(b)
item.Body = io.NopCloser(bytes.NewBuffer(b))
}
fitems = append(fitems, item)
}
@ -268,6 +287,19 @@ func (suite *ExportUnitSuite) TestGetItems() {
assert.Equal(t, test.expectedItems[i].Body, item.Body, "body")
assert.ErrorIs(t, item.Error, test.expectedItems[i].Error)
}
var expectedStats data.ExportStats
if size+count > 0 { // it is only initialized if we have something
expectedStats = data.ExportStats{}
expectedStats.UpdateBytes(path.FilesCategory, int64(size))
for i := 0; i < count; i++ {
expectedStats.UpdateResourceCount(path.FilesCategory)
}
}
assert.Equal(t, expectedStats, stats, "stats")
})
}
}
@ -312,6 +344,8 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections() {
},
}
stats := data.ExportStats{}
ecs, err := ProduceExportCollections(
ctx,
int(version.Backup),
@ -319,14 +353,30 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections() {
control.DefaultOptions(),
dcs,
nil,
&stats,
fault.New(true))
assert.NoError(t, err, "export collections error")
assert.Len(t, ecs, 1, "num of collections")
fitems := []export.Item{}
size := 0
for item := range ecs[0].Items(ctx) {
// unwrap the body from stats reader
b, err := io.ReadAll(item.Body)
assert.NoError(t, err, clues.ToCore(err))
size += len(b)
bitem := io.NopCloser(bytes.NewBuffer(b))
item.Body = bitem
fitems = append(fitems, item)
}
assert.Equal(t, expectedItems, fitems, "items")
expectedStats := data.ExportStats{}
expectedStats.UpdateBytes(path.FilesCategory, int64(size))
expectedStats.UpdateResourceCount(path.FilesCategory)
assert.Equal(t, expectedStats, stats, "stats")
}

View File

@ -8,11 +8,14 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/common/ptr"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/services/m365/api"
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
)
// ---------------------------------------------------------------------------
@ -22,6 +25,8 @@ import (
type BackupHandler struct {
ItemInfo details.ItemInfo
DriveItemEnumeration EnumeratesDriveItemsDelta
GI GetsItem
GIP GetsItemPermission
@ -34,9 +39,9 @@ type BackupHandler struct {
CanonPathFn canonPather
CanonPathErr error
ResourceOwner string
Service path.ServiceType
Category path.CategoryType
ProtectedResource idname.Provider
Service path.ServiceType
Category path.CategoryType
DrivePagerV api.Pager[models.Driveable]
// driveID -> itemPager
@ -55,12 +60,13 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler {
OneDrive: &details.OneDriveInfo{},
Extension: &details.ExtensionData{},
},
DriveItemEnumeration: EnumeratesDriveItemsDelta{},
GI: GetsItem{Err: clues.New("not defined")},
GIP: GetsItemPermission{Err: clues.New("not defined")},
PathPrefixFn: defaultOneDrivePathPrefixer,
MetadataPathPrefixFn: defaultOneDriveMetadataPathPrefixer,
CanonPathFn: defaultOneDriveCanonPather,
ResourceOwner: resourceOwner,
ProtectedResource: idname.NewProvider(resourceOwner, resourceOwner),
Service: path.OneDriveService,
Category: path.FilesCategory,
LocationIDFn: defaultOneDriveLocationIDer,
@ -80,7 +86,7 @@ func DefaultSharePointBH(resourceOwner string) *BackupHandler {
PathPrefixFn: defaultSharePointPathPrefixer,
MetadataPathPrefixFn: defaultSharePointMetadataPathPrefixer,
CanonPathFn: defaultSharePointCanonPather,
ResourceOwner: resourceOwner,
ProtectedResource: idname.NewProvider(resourceOwner, resourceOwner),
Service: path.SharePointService,
Category: path.LibrariesCategory,
LocationIDFn: defaultSharePointLocationIDer,
@ -90,7 +96,7 @@ func DefaultSharePointBH(resourceOwner string) *BackupHandler {
}
func (h BackupHandler) PathPrefix(tID, driveID string) (path.Path, error) {
pp, err := h.PathPrefixFn(tID, h.ResourceOwner, driveID)
pp, err := h.PathPrefixFn(tID, h.ProtectedResource.ID(), driveID)
if err != nil {
return nil, err
}
@ -99,7 +105,7 @@ func (h BackupHandler) PathPrefix(tID, driveID string) (path.Path, error) {
}
func (h BackupHandler) MetadataPathPrefix(tID string) (path.Path, error) {
pp, err := h.MetadataPathPrefixFn(tID, h.ResourceOwner)
pp, err := h.MetadataPathPrefixFn(tID, h.ProtectedResource.ID())
if err != nil {
return nil, err
}
@ -108,7 +114,7 @@ func (h BackupHandler) MetadataPathPrefix(tID string) (path.Path, error) {
}
func (h BackupHandler) CanonicalPath(pb *path.Builder, tID string) (path.Path, error) {
cp, err := h.CanonPathFn(pb, tID, h.ResourceOwner)
cp, err := h.CanonPathFn(pb, tID, h.ProtectedResource.ID())
if err != nil {
return nil, err
}
@ -124,10 +130,6 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl
return h.DrivePagerV
}
func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] {
return h.ItemPagerV[driveID]
}
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
return "/" + pb.String()
}
@ -136,7 +138,13 @@ func (h BackupHandler) NewLocationIDer(driveID string, elems ...string) details.
return h.LocationIDFn(driveID, elems...)
}
func (h BackupHandler) AugmentItemInfo(details.ItemInfo, models.DriveItemable, int64, *path.Builder) details.ItemInfo {
func (h BackupHandler) AugmentItemInfo(
details.ItemInfo,
idname.Provider,
models.DriveItemable,
int64,
*path.Builder,
) details.ItemInfo {
return h.ItemInfo
}
@ -152,6 +160,18 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R
return h.GetResps[c], h.GetErrs[c]
}
func (h BackupHandler) EnumerateDriveItemsDelta(
ctx context.Context,
driveID, prevDeltaLink string,
selectProps []string,
) ([]models.DriveItemable, api.DeltaUpdate, error) {
return h.DriveItemEnumeration.EnumerateDriveItemsDelta(
ctx,
driveID,
prevDeltaLink,
selectProps)
}
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
return h.GI.GetItem(ctx, "", "")
}
@ -254,6 +274,66 @@ func (m GetsItem) GetItem(
return m.Item, m.Err
}
// ---------------------------------------------------------------------------
// Enumerates Drive Items
// ---------------------------------------------------------------------------
type EnumeratesDriveItemsDelta struct {
Items map[string][]models.DriveItemable
DeltaUpdate map[string]api.DeltaUpdate
Err map[string]error
}
func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta(
_ context.Context,
driveID, _ string,
_ []string,
) (
[]models.DriveItemable,
api.DeltaUpdate,
error,
) {
return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID]
}
func PagerResultToEDID(
m map[string][]apiMock.PagerResult[models.DriveItemable],
) EnumeratesDriveItemsDelta {
edi := EnumeratesDriveItemsDelta{
Items: map[string][]models.DriveItemable{},
DeltaUpdate: map[string]api.DeltaUpdate{},
Err: map[string]error{},
}
for driveID, results := range m {
var (
err error
items = []models.DriveItemable{}
deltaUpdate api.DeltaUpdate
)
for _, pr := range results {
items = append(items, pr.Values...)
if pr.DeltaLink != nil {
deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)}
}
if pr.Err != nil {
err = pr.Err
}
deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta
}
edi.Items[driveID] = items
edi.Err[driveID] = err
edi.DeltaUpdate[driveID] = deltaUpdate
}
return edi
}
// ---------------------------------------------------------------------------
// Get Item Permissioner
// ---------------------------------------------------------------------------
@ -308,6 +388,7 @@ func (h RestoreHandler) NewDrivePager(string, []string) api.Pager[models.Driveab
func (h *RestoreHandler) AugmentItemInfo(
details.ItemInfo,
idname.Provider,
models.DriveItemable,
int64,
*path.Builder,

View File

@ -109,7 +109,7 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() {
//nolint:lll
byteArray := spMock.Page("Byte Test")
pageData, err := data.NewUnindexedPrefetchedItem(
pageData, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(byteArray)),
testName,
time.Now())

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common/idname"
"github.com/alcionai/corso/src/internal/m365/collection/drive"
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
"github.com/alcionai/corso/src/internal/tester"
@ -90,12 +91,9 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
var (
paths = map[string]string{}
newPaths = map[string]string{}
currPaths = map[string]string{}
excluded = map[string]struct{}{}
itemColls = map[string]map[string]string{
driveID: {},
}
collMap = map[string]map[string]*drive.Collection{
collMap = map[string]map[string]*drive.Collection{
driveID: {},
}
)
@ -103,21 +101,20 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
c := drive.NewCollections(
drive.NewLibraryBackupHandler(api.Drives{}, siteID, test.scope, path.SharePointService),
tenantID,
siteID,
idname.NewProvider(siteID, siteID),
nil,
control.DefaultOptions())
c.CollectionMap = collMap
err := c.UpdateCollections(
_, err := c.UpdateCollections(
ctx,
driveID,
"General",
test.items,
paths,
newPaths,
currPaths,
excluded,
itemColls,
true,
fault.New(true))

View File

@ -26,6 +26,7 @@ func ProduceExportCollections(
dcs []data.RestoreCollection,
backupDriveIDNames idname.CacheBuilder,
deets *details.Builder,
stats *data.ExportStats,
errs *fault.Bus,
) ([]export.Collectioner, error) {
var (
@ -56,7 +57,8 @@ func ProduceExportCollections(
drive.NewExportCollection(
baseDir.String(),
[]data.RestoreCollection{dc},
backupVersion))
backupVersion,
stats))
}
return ec, el.Failure()

View File

@ -7,6 +7,7 @@ import (
"strings"
"testing"
"github.com/alcionai/clues"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
@ -98,6 +99,8 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections() {
},
}
stats := data.ExportStats{}
ecs, err := ProduceExportCollections(
ctx,
int(version.Backup),
@ -106,6 +109,7 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections() {
dcs,
cache,
nil,
&stats,
fault.New(true))
assert.NoError(t, err, "export collections error")
assert.Len(t, ecs, 1, "num of collections")
@ -113,9 +117,24 @@ func (suite *ExportUnitSuite) TestExportRestoreCollections() {
assert.Equal(t, expectedPath, ecs[0].BasePath(), "base dir")
fitems := []export.Item{}
size := 0
for item := range ecs[0].Items(ctx) {
// unwrap the body from stats reader
b, err := io.ReadAll(item.Body)
assert.NoError(t, err, clues.ToCore(err))
size += len(b)
bitem := io.NopCloser(bytes.NewBuffer(b))
item.Body = bitem
fitems = append(fitems, item)
}
assert.Equal(t, expectedItems, fitems, "items")
expectedStats := data.ExportStats{}
expectedStats.UpdateBytes(path.FilesCategory, int64(size))
expectedStats.UpdateResourceCount(path.FilesCategory)
assert.Equal(t, expectedStats, stats, "stats")
}

View File

@ -247,15 +247,6 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
"incremental", op.incremental,
"disable_assist_backup", op.disableAssistBackup)
op.bus.Event(
ctx,
events.BackupStart,
map[string]any{
events.StartTime: startTime,
events.Service: op.Selectors.Service.String(),
events.BackupID: op.Results.BackupID,
})
defer func() {
if op.Errors.Failure() != nil {
op.bus.Event(

View File

@ -27,6 +27,7 @@ import (
"github.com/alcionai/corso/src/pkg/export"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
"github.com/alcionai/corso/src/pkg/store"
)
@ -46,6 +47,7 @@ type ExportOperation struct {
Selectors selectors.Selector
ExportCfg control.ExportConfig
Version string
stats data.ExportStats
acct account.Account
ec inject.ExportConsumer
@ -72,6 +74,7 @@ func NewExportOperation(
Selectors: sel,
Version: "v0",
ec: ec,
stats: data.ExportStats{},
}
if err := op.validate(); err != nil {
return ExportOperation{}, err
@ -242,16 +245,6 @@ func (op *ExportOperation) do(
"backup_snapshot_id", bup.SnapshotID,
"backup_version", bup.Version)
op.bus.Event(
ctx,
events.ExportStart,
map[string]any{
events.StartTime: start,
events.BackupID: op.BackupID,
events.BackupCreateTime: bup.CreationTime,
events.ExportID: opStats.exportID,
})
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to export", len(paths), op.BackupID))
kopiaComplete := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
@ -270,7 +263,7 @@ func (op *ExportOperation) do(
opStats.resourceCount = 1
opStats.cs = dcs
expCollections, err := exportRestoreCollections(
expCollections, err := produceExportCollections(
ctx,
op.ec,
bup.Version,
@ -278,6 +271,9 @@ func (op *ExportOperation) do(
op.ExportCfg,
op.Options,
dcs,
// We also have opStats, but that tracks different data.
// Maybe we can look into merging them some time in the future.
&op.stats,
op.Errors)
if err != nil {
return nil, clues.Stack(err)
@ -333,11 +329,19 @@ func (op *ExportOperation) finalizeMetrics(
return op.Errors.Failure()
}
// GetStats returns the stats of the export operation. You should only
// be calling this once the export collections have been read and process
// as the data that will be available here will be the data that was read
// and processed.
func (op *ExportOperation) GetStats() map[path.CategoryType]data.KindStats {
return op.stats.GetStats()
}
// ---------------------------------------------------------------------------
// Exporter funcs
// ---------------------------------------------------------------------------
func exportRestoreCollections(
func produceExportCollections(
ctx context.Context,
ec inject.ExportConsumer,
backupVersion int,
@ -345,6 +349,7 @@ func exportRestoreCollections(
exportCfg control.ExportConfig,
opts control.Options,
dcs []data.RestoreCollection,
exportStats *data.ExportStats,
errs *fault.Bus,
) ([]export.Collectioner, error) {
complete := observe.MessageWithCompletion(ctx, "Preparing export")
@ -360,6 +365,7 @@ func exportRestoreCollections(
exportCfg,
opts,
dcs,
exportStats,
errs)
if err != nil {
return nil, clues.Wrap(err, "exporting collections")

View File

@ -34,7 +34,7 @@ func ControllerWithSelector(
t.FailNow()
}
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
resource, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
if !assert.NoError(t, err, clues.ToCore(err)) {
if onFail != nil {
onFail()
@ -43,7 +43,7 @@ func ControllerWithSelector(
t.FailNow()
}
sel = sel.SetDiscreteOwnerIDName(id, name)
sel = sel.SetDiscreteOwnerIDName(resource.ID(), resource.Name())
return ctrl, sel
}

View File

@ -48,9 +48,9 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) {
}
var (
log = logger.Ctx(ctx)
pfxMsg = prefix + ":"
li, ls, lr = len(fe.Items), len(fe.Skipped), len(fe.Recovered)
log = logger.Ctx(ctx)
pfxMsg = prefix + ":"
li, ls, lr, la = len(fe.Items), len(fe.Skipped), len(fe.Recovered), len(fe.Alerts)
)
if fe.Failure == nil && li+ls+lr == 0 {
@ -73,4 +73,8 @@ func LogFaultErrors(ctx context.Context, fe *fault.Errors, prefix string) {
for i, err := range fe.Recovered {
log.With("recovered_error", err).Errorf("%s recoverable error %d of %d: %s", pfxMsg, i+1, lr, err.Msg)
}
for i, alert := range fe.Alerts {
log.With("alert", alert).Infof("%s alert %d of %d: %s", pfxMsg, i+1, la, alert.Message)
}
}

View File

@ -88,6 +88,7 @@ type (
exportCfg control.ExportConfig,
opts control.Options,
dcs []data.RestoreCollection,
stats *data.ExportStats,
errs *fault.Bus,
) ([]export.Collectioner, error)
@ -109,10 +110,7 @@ type (
ctx context.Context,
owner string, // input value, can be either id or name
ins idname.Cacher,
) (
id, name string,
err error,
)
) (idname.Provider, error)
}
RepoMaintenancer interface {

View File

@ -57,13 +57,6 @@ func (op *MaintenanceOperation) Run(ctx context.Context) (err error) {
op.Results.StartedAt = time.Now()
op.bus.Event(
ctx,
events.MaintenanceStart,
map[string]any{
events.StartTime: op.Results.StartedAt,
})
defer func() {
if op.Errors.Failure() != nil {
op.bus.Event(

View File

@ -279,16 +279,6 @@ func (op *RestoreOperation) do(
"backup_snapshot_id", bup.SnapshotID,
"backup_version", bup.Version)
op.bus.Event(
ctx,
events.RestoreStart,
map[string]any{
events.StartTime: start,
events.BackupID: op.BackupID,
events.BackupCreateTime: bup.CreationTime,
events.RestoreID: opStats.restoreID,
})
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))
progressBar := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
@ -375,12 +365,12 @@ func chooseRestoreResource(
return orig, nil
}
id, name, err := pprian.PopulateProtectedResourceIDAndName(
resource, err := pprian.PopulateProtectedResourceIDAndName(
ctx,
restoreCfg.ProtectedResource,
nil)
return idname.NewProvider(id, name), clues.Stack(err).OrNil()
return resource, clues.Stack(err).OrNil()
}
// ---------------------------------------------------------------------------

View File

@ -366,8 +366,6 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoBackup() {
require.Nil(t, ds, "restoreOp.Run() should not produce details")
assert.Zero(t, ro.Results.ResourceOwners, "resource owners")
assert.Zero(t, ro.Results.BytesRead, "bytes read")
// no restore start, because we'd need to find the backup first.
assert.Equal(t, 0, mb.TimesCalled[events.RestoreStart], "restore-start events")
assert.Equal(t, 1, mb.TimesCalled[events.CorsoError], "corso-error events")
assert.Equal(t, 1, mb.TimesCalled[events.RestoreEnd], "restore-end events")
}

View File

@ -76,28 +76,28 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
category path.CategoryType
metadataFiles [][]string
}{
// {
// name: "Mail",
// selector: func() *selectors.ExchangeBackup {
// sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
// sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
// sel.DiscreteOwner = suite.its.user.ID
{
name: "Mail",
selector: func() *selectors.ExchangeBackup {
sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
sel.DiscreteOwner = suite.its.user.ID
// return sel
// },
// category: path.EmailCategory,
// metadataFiles: exchange.MetadataFileNames(path.EmailCategory),
// },
// {
// name: "Contacts",
// selector: func() *selectors.ExchangeBackup {
// sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
// sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()))
// return sel
// },
// category: path.ContactsCategory,
// metadataFiles: exchange.MetadataFileNames(path.ContactsCategory),
// },
return sel
},
category: path.EmailCategory,
metadataFiles: MetadataFileNames(path.EmailCategory),
},
{
name: "Contacts",
selector: func() *selectors.ExchangeBackup {
sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()))
return sel
},
category: path.ContactsCategory,
metadataFiles: MetadataFileNames(path.ContactsCategory),
},
{
name: "Calendar Events",
selector: func() *selectors.ExchangeBackup {
@ -226,12 +226,8 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "count incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 0, incMB.TimesCalled[events.CorsoError], "corso error events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
incBO.Results.BackupID, "incremental backupID pre-declaration")
})
}
}
@ -877,11 +873,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
// assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
bupID, "incremental backupID pre-declaration")
})
}
}

View File

@ -226,18 +226,18 @@ func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsWithAdvancedOp
suite.its.group.RootSite.DriveRootFolderID)
}
func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsAlternateProtectedResource() {
sel := selectors.NewGroupsBackup([]string{suite.its.group.ID})
sel.Include(selTD.GroupsBackupLibraryFolderScope(sel))
sel.Filter(sel.Library("documents"))
sel.DiscreteOwner = suite.its.group.ID
// func (suite *GroupsRestoreNightlyIntgSuite) TestRestore_Run_groupsAlternateProtectedResource() {
// sel := selectors.NewGroupsBackup([]string{suite.its.group.ID})
// sel.Include(selTD.GroupsBackupLibraryFolderScope(sel))
// sel.Filter(sel.Library("documents"))
// sel.DiscreteOwner = suite.its.group.ID
runDriveRestoreToAlternateProtectedResource(
suite.T(),
suite,
suite.its.ac,
sel.Selector,
suite.its.group.RootSite,
suite.its.secondaryGroup.RootSite,
suite.its.secondaryGroup.ID)
}
// runDriveRestoreToAlternateProtectedResource(
// suite.T(),
// suite,
// suite.its.ac,
// sel.Selector,
// suite.its.group.RootSite,
// suite.its.secondaryGroup.RootSite,
// suite.its.secondaryGroup.ID)
// }

View File

@ -224,11 +224,7 @@ func runAndCheckBackup(
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
assert.NoError(t, bo.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(bo.Errors.Failure()))
assert.Empty(t, bo.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
assert.Equal(t, 1, mb.TimesCalled[events.BackupEnd], "backup-end events")
assert.Equal(t,
mb.CalledWith[events.BackupStart][0][events.BackupID],
bo.Results.BackupID, "backupID pre-declaration")
}
func checkBackupIsInManifests(
@ -550,7 +546,7 @@ func ControllerWithSelector(
t.FailNow()
}
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
resource, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
if !assert.NoError(t, err, clues.ToCore(err)) {
if onFail != nil {
onFail(t, ctx)
@ -559,7 +555,7 @@ func ControllerWithSelector(
t.FailNow()
}
sel = sel.SetDiscreteOwnerIDName(id, name)
sel = sel.SetDiscreteOwnerIDName(resource.ID(), resource.Name())
return ctrl, sel
}

View File

@ -801,11 +801,7 @@ func runDriveIncrementalTest(
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "incremental backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
bupID, "incremental backupID pre-declaration")
})
}
}
@ -912,11 +908,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read")
assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "backup-start events")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupEnd], "backup-end events")
assert.Equal(t,
incMB.CalledWith[events.BackupStart][0][events.BackupID],
incBO.Results.BackupID, "backupID pre-declaration")
bid := incBO.Results.BackupID
bup := &backup.Backup{}

View File

@ -205,7 +205,6 @@ func runAndCheckRestore(
assert.NotZero(t, ro.Results.ItemsRead, "count of items read")
assert.NotZero(t, ro.Results.BytesRead, "bytes read")
assert.Equal(t, 1, ro.Results.ResourceOwners, "count of resource owners")
assert.Equal(t, 1, mb.TimesCalled[events.RestoreStart], "restore-start events")
assert.Equal(t, 1, mb.TimesCalled[events.RestoreEnd], "restore-end events")
return deets

View File

@ -182,7 +182,7 @@ func collect(
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx)
}
item, err := data.NewUnindexedPrefetchedItem(
item, err := data.NewPrefetchedItem(
io.NopCloser(bytes.NewReader(bs)),
col.itemName,
time.Now())

View File

@ -102,7 +102,7 @@ func New(
switch true {
case s.HasCause(fault.SkipMalware):
malware++
case s.HasCause(fault.SkipBigOneNote):
case s.HasCause(fault.SkipOneNote):
invalidONFile++
default:
otherSkips++

View File

@ -16,6 +16,7 @@ const (
ApplicationThrottled errEnum = "application-throttled"
BackupNotFound errEnum = "backup-not-found"
RepoAlreadyExists errEnum = "repository-already-exists"
ResourceNotAccessible errEnum = "resource-not-accesible"
ResourceOwnerNotFound errEnum = "resource-owner-not-found"
ServiceNotEnabled errEnum = "service-not-enabled"
)
@ -27,6 +28,7 @@ var internalToExternal = map[errEnum][]error{
ApplicationThrottled: {graph.ErrApplicationThrottled},
BackupNotFound: {repository.ErrorBackupNotFound},
RepoAlreadyExists: {repository.ErrorRepoAlreadyExists},
ResourceNotAccessible: {graph.ErrResourceLocked},
ResourceOwnerNotFound: {graph.ErrResourceOwnerNotFound},
ServiceNotEnabled: {graph.ErrServiceNotEnabled},
}

View File

@ -29,6 +29,7 @@ func (suite *ErrUnitSuite) TestInternal() {
{BackupNotFound, []error{repository.ErrorBackupNotFound}},
{ServiceNotEnabled, []error{graph.ErrServiceNotEnabled}},
{ResourceOwnerNotFound, []error{graph.ErrResourceOwnerNotFound}},
{ResourceNotAccessible, []error{graph.ErrResourceLocked}},
}
for _, test := range table {
suite.Run(string(test.get), func() {
@ -46,6 +47,7 @@ func (suite *ErrUnitSuite) TestIs() {
{BackupNotFound, repository.ErrorBackupNotFound},
{ServiceNotEnabled, graph.ErrServiceNotEnabled},
{ResourceOwnerNotFound, graph.ErrResourceOwnerNotFound},
{ResourceNotAccessible, graph.ErrResourceLocked},
}
for _, test := range table {
suite.Run(string(test.target), func() {

Some files were not shown because too many files have changed in this diff Show More