Merge branch 'main' into sharepoint-restore-selectors
This commit is contained in:
commit
341878b6e3
28
.github/workflows/ci.yml
vendored
28
.github/workflows/ci.yml
vendored
@ -239,17 +239,29 @@ jobs:
|
|||||||
run:
|
run:
|
||||||
working-directory: src
|
working-directory: src
|
||||||
steps:
|
steps:
|
||||||
- name: Fail check
|
- name: Fail check if not repository_dispatch
|
||||||
if: github.event_name != 'repository_dispatch'
|
if: github.event_name != 'repository_dispatch'
|
||||||
run: |
|
run: |
|
||||||
echo "Workflow requires approval from a maintainer to run. It will be automatically rerun on approval."
|
echo "Workflow requires approval from a maintainer to run. It will be automatically rerun on approval."
|
||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
|
- uses: marocchino/sticky-pull-request-comment@v2
|
||||||
|
if: github.event.client_payload.slash_command.args.named.sha != '' && contains(github.event.client_payload.pull_request.head.sha, github.event.client_payload.slash_command.args.named.sha)
|
||||||
|
with:
|
||||||
|
message: |
|
||||||
|
Workflow run sha specified via `ok-to-test` is not the latest commit on PR. Run canceled.
|
||||||
|
|
||||||
|
- name: Fail check if not head of PR
|
||||||
|
if: github.event.client_payload.slash_command.args.named.sha != '' && contains(github.event.client_payload.pull_request.head.sha, github.event.client_payload.slash_command.args.named.sha)
|
||||||
|
run: |
|
||||||
|
echo "Workflow run sha specified is not the latest commit on PR. Exiting."
|
||||||
|
exit 1
|
||||||
|
|
||||||
# add comment to PR with link to workflow run
|
# add comment to PR with link to workflow run
|
||||||
- uses: marocchino/sticky-pull-request-comment@v2
|
- uses: marocchino/sticky-pull-request-comment@v2
|
||||||
with:
|
with:
|
||||||
message: |
|
message: |
|
||||||
https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID
|
Test suite run will be available at https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID
|
||||||
|
|
||||||
# Check out merge commit
|
# Check out merge commit
|
||||||
- name: Fork based /ok-to-test checkout
|
- name: Fork based /ok-to-test checkout
|
||||||
@ -517,7 +529,7 @@ jobs:
|
|||||||
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Linux_x86_64.tar.gz > corso.tar.gz
|
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Linux_x86_64.tar.gz > corso.tar.gz
|
||||||
tar -xf corso.tar.gz
|
tar -xf corso.tar.gz
|
||||||
./corso --help
|
./corso --help
|
||||||
./corso --version 2>&1 | grep -E "^version: ${{ env.CORSO_VERSION }}$"
|
./corso --version 2>&1 | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
||||||
- name: Validate arm64 binary artifacts
|
- name: Validate arm64 binary artifacts
|
||||||
uses: uraimo/run-on-arch-action@v2
|
uses: uraimo/run-on-arch-action@v2
|
||||||
with:
|
with:
|
||||||
@ -531,7 +543,7 @@ jobs:
|
|||||||
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Linux_arm64.tar.gz > corso.tar.gz
|
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Linux_arm64.tar.gz > corso.tar.gz
|
||||||
tar -xf corso.tar.gz
|
tar -xf corso.tar.gz
|
||||||
./corso --help
|
./corso --help
|
||||||
./corso --version 2>&1 | grep -E "^version: ${{ env.CORSO_VERSION }}$"
|
./corso --version 2>&1 | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
||||||
|
|
||||||
Validate-Docker-Artifacts:
|
Validate-Docker-Artifacts:
|
||||||
needs: [Publish-Binary, Publish-Image, SetEnv]
|
needs: [Publish-Binary, Publish-Image, SetEnv]
|
||||||
@ -549,11 +561,11 @@ jobs:
|
|||||||
- name: Validate amd64 container images
|
- name: Validate amd64 container images
|
||||||
run: |
|
run: |
|
||||||
docker run --platform linux/amd64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --help
|
docker run --platform linux/amd64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --help
|
||||||
docker run --platform linux/amd64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --version | grep -E "^version: ${{ env.CORSO_VERSION }}$"
|
docker run --platform linux/amd64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --version | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
||||||
- name: Validate arm64 container images
|
- name: Validate arm64 container images
|
||||||
run: |
|
run: |
|
||||||
docker run --platform linux/arm64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --help
|
docker run --platform linux/arm64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --help
|
||||||
docker run --platform linux/amd64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --version | grep -E "^version: ${{ env.CORSO_VERSION }}$"
|
docker run --platform linux/amd64 ${{ env.IMAGE_NAME }}:${{ env.CORSO_VERSION }} --version | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
||||||
|
|
||||||
Validate-MacOS-Artifacts:
|
Validate-MacOS-Artifacts:
|
||||||
needs: [Publish-Binary, Publish-Image, SetEnv]
|
needs: [Publish-Binary, Publish-Image, SetEnv]
|
||||||
@ -569,7 +581,7 @@ jobs:
|
|||||||
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Darwin_x86_64.tar.gz > corso.tar.gz
|
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Darwin_x86_64.tar.gz > corso.tar.gz
|
||||||
tar -xf corso.tar.gz
|
tar -xf corso.tar.gz
|
||||||
./corso --help
|
./corso --help
|
||||||
./corso --version 2>&1 | grep -E "^version: ${{ env.CORSO_VERSION }}$"
|
./corso --version 2>&1 | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
||||||
- name: Validate arm64 binary artifacts
|
- name: Validate arm64 binary artifacts
|
||||||
run: |
|
run: |
|
||||||
set -ex
|
set -ex
|
||||||
@ -590,7 +602,7 @@ jobs:
|
|||||||
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Windows_x86_64.zip -o corso.zip
|
curl -L https://github.com/alcionai/corso/releases/download/${{ env.CORSO_VERSION }}/corso_${{ env.CORSO_VERSION }}_Windows_x86_64.zip -o corso.zip
|
||||||
7z x corso.zip
|
7z x corso.zip
|
||||||
./corso.exe --help
|
./corso.exe --help
|
||||||
./corso.exe --version 2>&1 | grep -E "^version: ${{ env.CORSO_VERSION }}$"
|
./corso.exe --version 2>&1 | grep -E "version: ${{ env.CORSO_VERSION }}$"
|
||||||
|
|
||||||
Publish-Website-Test:
|
Publish-Website-Test:
|
||||||
needs: [Test-Suite-Trusted, Linting, Website-Linting, SetEnv]
|
needs: [Test-Suite-Trusted, Linting, Website-Linting, SetEnv]
|
||||||
|
|||||||
3
.github/workflows/ok-to-test.yml
vendored
3
.github/workflows/ok-to-test.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
private_key: ${{ secrets.PRIVATE_KEY }}
|
private_key: ${{ secrets.PRIVATE_KEY }}
|
||||||
|
|
||||||
- name: Slash Command Dispatch
|
- name: Slash Command Dispatch
|
||||||
uses: peter-evans/slash-command-dispatch@v1
|
uses: peter-evans/slash-command-dispatch@v3
|
||||||
env:
|
env:
|
||||||
TOKEN: ${{ steps.generate_token.outputs.token }}
|
TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||||
with:
|
with:
|
||||||
@ -27,5 +27,4 @@ jobs:
|
|||||||
reaction-token: ${{ secrets.GITHUB_TOKEN }}
|
reaction-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
issue-type: pull-request
|
issue-type: pull-request
|
||||||
commands: ok-to-test
|
commands: ok-to-test
|
||||||
named-args: true
|
|
||||||
permission: write
|
permission: write
|
||||||
|
|||||||
15
CHANGELOG.md
15
CHANGELOG.md
@ -7,18 +7,26 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased] (alpha)
|
## [Unreleased] (alpha)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Document Corso's fault-tolerance and restartability features
|
||||||
|
|
||||||
|
## [v0.2.0] (alpha) - 2023-1-29
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Check if the user specified for an exchange backup operation has a mailbox.
|
- Check if the user specified for an exchange backup operation has a mailbox.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- msgraph-beta-sdk-go replaces msgraph-sdk-go for new features. This can lead to long build times.
|
- BetaClient introduced. Enables Corso to be able to interact with SharePoint Page objects. Package located `/internal/connector/graph/betasdk`
|
||||||
- Handle case where user's drive has not been initialized
|
- Handle case where user's drive has not been initialized
|
||||||
- Inline attachments (e.g. copy/paste ) are discovered and backed up correctly ([#2163](https://github.com/alcionai/corso/issues/2163))
|
- Inline attachments (e.g. copy/paste ) are discovered and backed up correctly ([#2163](https://github.com/alcionai/corso/issues/2163))
|
||||||
- Guest and External users (for cloud accounts) and non-on-premise users (for systems that use on-prem AD syncs) are now excluded from backup and restore operations.
|
- Guest and External users (for cloud accounts) and non-on-premise users (for systems that use on-prem AD syncs) are now excluded from backup and restore operations.
|
||||||
- Remove the M365 license guid check in OneDrive backup which wasn't reliable.
|
- Remove the M365 license guid check in OneDrive backup which wasn't reliable.
|
||||||
|
- Reduced extra socket consumption while downloading multiple drive files.
|
||||||
|
- Extended timeout boundaries for exchange attachment downloads, reducing risk of cancellation on large files.
|
||||||
|
- Identify all drives associated with a user or SharePoint site instead of just the results on the first page returned by Graph API.
|
||||||
|
|
||||||
## [v0.1.0] (alpha) - 2023-01-13
|
## [v0.1.0] (alpha) - 2023-01-13
|
||||||
|
|
||||||
@ -131,7 +139,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Miscellaneous
|
- Miscellaneous
|
||||||
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
||||||
|
|
||||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.1.0...HEAD
|
[Unreleased]: https://github.com/alcionai/corso/compare/v0.2.0...HEAD
|
||||||
|
[v0.2.0]: https://github.com/alcionai/corso/compare/v0.1.0...v0.2.0
|
||||||
[v0.1.0]: https://github.com/alcionai/corso/compare/v0.0.4...v0.1.0
|
[v0.1.0]: https://github.com/alcionai/corso/compare/v0.0.4...v0.1.0
|
||||||
[v0.0.4]: https://github.com/alcionai/corso/compare/v0.0.3...v0.0.4
|
[v0.0.4]: https://github.com/alcionai/corso/compare/v0.0.3...v0.0.4
|
||||||
[v0.0.3]: https://github.com/alcionai/corso/compare/v0.0.2...v0.0.3
|
[v0.0.3]: https://github.com/alcionai/corso/compare/v0.0.2...v0.0.3
|
||||||
|
|||||||
@ -81,7 +81,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
|
|
||||||
switch cmd.Use {
|
switch cmd.Use {
|
||||||
case createCommand:
|
case createCommand:
|
||||||
c, fs = utils.AddCommand(cmd, sharePointCreateCmd(), utils.HideCommand())
|
c, fs = utils.AddCommand(cmd, sharePointCreateCmd(), utils.MarkPreReleaseCommand())
|
||||||
|
|
||||||
c.Use = c.Use + " " + sharePointServiceCommandCreateUseSuffix
|
c.Use = c.Use + " " + sharePointServiceCommandCreateUseSuffix
|
||||||
c.Example = sharePointServiceCommandCreateExamples
|
c.Example = sharePointServiceCommandCreateExamples
|
||||||
@ -101,14 +101,14 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
options.AddOperationFlags(c)
|
options.AddOperationFlags(c)
|
||||||
|
|
||||||
case listCommand:
|
case listCommand:
|
||||||
c, fs = utils.AddCommand(cmd, sharePointListCmd(), utils.HideCommand())
|
c, fs = utils.AddCommand(cmd, sharePointListCmd(), utils.MarkPreReleaseCommand())
|
||||||
|
|
||||||
fs.StringVar(&backupID,
|
fs.StringVar(&backupID,
|
||||||
utils.BackupFN, "",
|
utils.BackupFN, "",
|
||||||
"ID of the backup to retrieve.")
|
"ID of the backup to retrieve.")
|
||||||
|
|
||||||
case detailsCommand:
|
case detailsCommand:
|
||||||
c, fs = utils.AddCommand(cmd, sharePointDetailsCmd())
|
c, fs = utils.AddCommand(cmd, sharePointDetailsCmd(), utils.MarkPreReleaseCommand())
|
||||||
|
|
||||||
c.Use = c.Use + " " + sharePointServiceCommandDetailsUseSuffix
|
c.Use = c.Use + " " + sharePointServiceCommandDetailsUseSuffix
|
||||||
c.Example = sharePointServiceCommandDetailsExamples
|
c.Example = sharePointServiceCommandDetailsExamples
|
||||||
@ -157,7 +157,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
// "Select backup details for items created after this datetime.")
|
// "Select backup details for items created after this datetime.")
|
||||||
|
|
||||||
case deleteCommand:
|
case deleteCommand:
|
||||||
c, fs = utils.AddCommand(cmd, sharePointDeleteCmd(), utils.HideCommand())
|
c, fs = utils.AddCommand(cmd, sharePointDeleteCmd(), utils.MarkPreReleaseCommand())
|
||||||
|
|
||||||
c.Use = c.Use + " " + sharePointServiceCommandDeleteUseSuffix
|
c.Use = c.Use + " " + sharePointServiceCommandDeleteUseSuffix
|
||||||
c.Example = sharePointServiceCommandDeleteExamples
|
c.Example = sharePointServiceCommandDeleteExamples
|
||||||
@ -210,7 +210,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
defer utils.CloseRepo(ctx, r)
|
defer utils.CloseRepo(ctx, r)
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Sites)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs"))
|
return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/cli/print"
|
"github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/repo"
|
"github.com/alcionai/corso/src/cli/repo"
|
||||||
"github.com/alcionai/corso/src/cli/restore"
|
"github.com/alcionai/corso/src/cli/restore"
|
||||||
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -31,7 +32,27 @@ var corsoCmd = &cobra.Command{
|
|||||||
Short: "Free, Secure, Open-Source Backup for M365.",
|
Short: "Free, Secure, Open-Source Backup for M365.",
|
||||||
Long: `Free, Secure, and Open-Source Backup for Microsoft 365.`,
|
Long: `Free, Secure, and Open-Source Backup for Microsoft 365.`,
|
||||||
RunE: handleCorsoCmd,
|
RunE: handleCorsoCmd,
|
||||||
PersistentPreRunE: config.InitFunc(),
|
PersistentPreRunE: preRun,
|
||||||
|
}
|
||||||
|
|
||||||
|
func preRun(cc *cobra.Command, args []string) error {
|
||||||
|
if err := config.InitFunc(cc, args); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log := logger.Ctx(cc.Context())
|
||||||
|
|
||||||
|
flags := utils.GetPopulatedFlags(cc)
|
||||||
|
flagSl := make([]string, 0, len(flags))
|
||||||
|
|
||||||
|
// currently only tracking flag names to avoid pii leakage.
|
||||||
|
for f := range flags {
|
||||||
|
flagSl = append(flagSl, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("cli command", "command", cc.CommandPath(), "flags", flagSl, "version", version.CurrentVersion())
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handler for flat calls to `corso`.
|
// Handler for flat calls to `corso`.
|
||||||
@ -39,7 +60,7 @@ var corsoCmd = &cobra.Command{
|
|||||||
func handleCorsoCmd(cmd *cobra.Command, args []string) error {
|
func handleCorsoCmd(cmd *cobra.Command, args []string) error {
|
||||||
v, _ := cmd.Flags().GetBool("version")
|
v, _ := cmd.Flags().GetBool("version")
|
||||||
if v {
|
if v {
|
||||||
print.Outf(cmd.Context(), "Corso version: "+version.Version)
|
print.Outf(cmd.Context(), "Corso version: "+version.CurrentVersion())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +83,7 @@ func BuildCommandTree(cmd *cobra.Command) {
|
|||||||
cmd.PersistentFlags().SortFlags = false
|
cmd.PersistentFlags().SortFlags = false
|
||||||
|
|
||||||
cmd.Flags().BoolP("version", "v", false, "current version info")
|
cmd.Flags().BoolP("version", "v", false, "current version info")
|
||||||
cmd.PersistentPostRunE = config.InitFunc()
|
cmd.PersistentPreRunE = preRun
|
||||||
config.AddConfigFlags(cmd)
|
config.AddConfigFlags(cmd)
|
||||||
logger.AddLoggingFlags(cmd)
|
logger.AddLoggingFlags(cmd)
|
||||||
observe.AddProgressBarFlags(cmd)
|
observe.AddProgressBarFlags(cmd)
|
||||||
@ -85,6 +106,7 @@ func BuildCommandTree(cmd *cobra.Command) {
|
|||||||
|
|
||||||
// Handle builds and executes the cli processor.
|
// Handle builds and executes the cli processor.
|
||||||
func Handle() {
|
func Handle() {
|
||||||
|
//nolint:forbidigo
|
||||||
ctx := config.Seed(context.Background())
|
ctx := config.Seed(context.Background())
|
||||||
ctx = print.SetRootCmd(ctx, corsoCmd)
|
ctx = print.SetRootCmd(ctx, corsoCmd)
|
||||||
observe.SeedWriter(ctx, print.StderrWriter(ctx), observe.PreloadFlags())
|
observe.SeedWriter(ctx, print.StderrWriter(ctx), observe.PreloadFlags())
|
||||||
|
|||||||
@ -77,20 +77,18 @@ func AddConfigFlags(cmd *cobra.Command) {
|
|||||||
|
|
||||||
// InitFunc provides a func that lazily initializes viper and
|
// InitFunc provides a func that lazily initializes viper and
|
||||||
// verifies that the configuration was able to read a file.
|
// verifies that the configuration was able to read a file.
|
||||||
func InitFunc() func(*cobra.Command, []string) error {
|
func InitFunc(cmd *cobra.Command, args []string) error {
|
||||||
return func(cmd *cobra.Command, args []string) error {
|
fp := configFilePathFlag
|
||||||
fp := configFilePathFlag
|
if len(fp) == 0 || fp == displayDefaultFP {
|
||||||
if len(fp) == 0 || fp == displayDefaultFP {
|
fp = configFilePath
|
||||||
fp = configFilePath
|
|
||||||
}
|
|
||||||
|
|
||||||
err := initWithViper(GetViper(cmd.Context()), fp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Read(cmd.Context())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err := initWithViper(GetViper(cmd.Context()), fp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return Read(cmd.Context())
|
||||||
}
|
}
|
||||||
|
|
||||||
// initWithViper implements InitConfig, but takes in a viper
|
// initWithViper implements InitConfig, but takes in a viper
|
||||||
|
|||||||
@ -35,7 +35,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
|
|
||||||
switch cmd.Use {
|
switch cmd.Use {
|
||||||
case restoreCommand:
|
case restoreCommand:
|
||||||
c, fs = utils.AddCommand(cmd, sharePointRestoreCmd(), utils.HideCommand())
|
c, fs = utils.AddCommand(cmd, sharePointRestoreCmd(), utils.MarkPreReleaseCommand())
|
||||||
|
|
||||||
c.Use = c.Use + " " + sharePointServiceCommandUseSuffix
|
c.Use = c.Use + " " + sharePointServiceCommandUseSuffix
|
||||||
|
|
||||||
|
|||||||
@ -59,7 +59,8 @@ func HasNoFlagsAndShownHelp(cmd *cobra.Command) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type cmdCfg struct {
|
type cmdCfg struct {
|
||||||
hidden bool
|
hidden bool
|
||||||
|
preRelese bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type cmdOpt func(*cmdCfg)
|
type cmdOpt func(*cmdCfg)
|
||||||
@ -76,6 +77,13 @@ func HideCommand() cmdOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MarkPreReleaseCommand() cmdOpt {
|
||||||
|
return func(cc *cmdCfg) {
|
||||||
|
cc.hidden = true
|
||||||
|
cc.preRelese = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// AddCommand adds a clone of the subCommand to the parent,
|
// AddCommand adds a clone of the subCommand to the parent,
|
||||||
// and returns both the clone and its pflags.
|
// and returns both the clone and its pflags.
|
||||||
func AddCommand(parent, c *cobra.Command, opts ...cmdOpt) (*cobra.Command, *pflag.FlagSet) {
|
func AddCommand(parent, c *cobra.Command, opts ...cmdOpt) (*cobra.Command, *pflag.FlagSet) {
|
||||||
@ -85,6 +93,14 @@ func AddCommand(parent, c *cobra.Command, opts ...cmdOpt) (*cobra.Command, *pfla
|
|||||||
parent.AddCommand(c)
|
parent.AddCommand(c)
|
||||||
c.Hidden = cc.hidden
|
c.Hidden = cc.hidden
|
||||||
|
|
||||||
|
if cc.preRelese {
|
||||||
|
// There is a default deprecated message that always shows so we do some terminal magic to overwrite it
|
||||||
|
c.Deprecated = "\n\033[1F\033[K" +
|
||||||
|
"==================================================================================================\n" +
|
||||||
|
"\tWARNING!!! THIS IS A PRE-RELEASE COMMAND THAT MAY NOT FUNCTION PROPERLY, OR AT ALL\n" +
|
||||||
|
"==================================================================================================\n"
|
||||||
|
}
|
||||||
|
|
||||||
c.Flags().SortFlags = false
|
c.Flags().SortFlags = false
|
||||||
|
|
||||||
return c, c.Flags()
|
return c, c.Flags()
|
||||||
|
|||||||
@ -112,7 +112,7 @@ func getGCAndVerifyUser(ctx context.Context, userID string) (*connector.GraphCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
// build a graph connector
|
// build a graph connector
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, account.Account{}, errors.Wrap(err, "connecting to graph api")
|
return nil, account.Account{}, errors.Wrap(err, "connecting to graph api")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -178,7 +178,7 @@ func getGC(ctx context.Context) (*connector.GraphConnector, account.M365Config,
|
|||||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details"))
|
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details"))
|
||||||
}
|
}
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API"))
|
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -151,7 +151,12 @@ func purgeOneDriveFolders(
|
|||||||
uid string,
|
uid string,
|
||||||
) error {
|
) error {
|
||||||
getter := func(gs graph.Servicer, uid, prefix string) ([]purgable, error) {
|
getter := func(gs graph.Servicer, uid, prefix string) ([]purgable, error) {
|
||||||
cfs, err := onedrive.GetAllFolders(ctx, gs, uid, prefix)
|
pager, err := onedrive.PagerForSource(onedrive.OneDriveSource, gs, uid, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cfs, err := onedrive.GetAllFolders(ctx, gs, pager, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -255,7 +260,7 @@ func getGC(ctx context.Context) (*connector.GraphConnector, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// build a graph connector
|
// build a graph connector
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, Only(ctx, errors.Wrap(err, "connecting to graph api"))
|
return nil, Only(ctx, errors.Wrap(err, "connecting to graph api"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,7 +5,7 @@ go 1.19
|
|||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||||
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40
|
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40
|
||||||
github.com/aws/aws-sdk-go v1.44.187
|
github.com/aws/aws-sdk-go v1.44.190
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0
|
github.com/aws/aws-xray-sdk-go v1.8.0
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
@ -13,6 +13,7 @@ require (
|
|||||||
github.com/microsoft/kiota-abstractions-go v0.16.0
|
github.com/microsoft/kiota-abstractions-go v0.16.0
|
||||||
github.com/microsoft/kiota-authentication-azure-go v0.6.0
|
github.com/microsoft/kiota-authentication-azure-go v0.6.0
|
||||||
github.com/microsoft/kiota-http-go v0.13.0
|
github.com/microsoft/kiota-http-go v0.13.0
|
||||||
|
github.com/microsoft/kiota-serialization-form-go v0.2.0
|
||||||
github.com/microsoft/kiota-serialization-json-go v0.7.2
|
github.com/microsoft/kiota-serialization-json-go v0.7.2
|
||||||
github.com/microsoftgraph/msgraph-sdk-go v0.53.0
|
github.com/microsoftgraph/msgraph-sdk-go v0.53.0
|
||||||
github.com/microsoftgraph/msgraph-sdk-go-core v0.33.0
|
github.com/microsoftgraph/msgraph-sdk-go-core v0.33.0
|
||||||
@ -40,7 +41,6 @@ require (
|
|||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/microsoft/kiota-serialization-form-go v0.2.0 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||||
github.com/spf13/afero v1.9.3 // indirect
|
github.com/spf13/afero v1.9.3 // indirect
|
||||||
@ -84,7 +84,7 @@ require (
|
|||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/microsoft/kiota-serialization-text-go v0.6.0 // indirect
|
github.com/microsoft/kiota-serialization-text-go v0.6.0
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/minio-go/v7 v7.0.45 // indirect
|
github.com/minio/minio-go/v7 v7.0.45 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
|
|||||||
@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
|||||||
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
||||||
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
||||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA=
|
github.com/aws/aws-sdk-go v1.44.190 h1:QC+Pf/Ooj7Waf2obOPZbIQOqr00hy4h54j3ZK9mvHcc=
|
||||||
github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.190/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
|
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
|
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
|
||||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery"
|
"github.com/alcionai/corso/src/internal/connector/discovery"
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||||
@ -35,27 +36,27 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
sels selectors.Selector,
|
sels selectors.Selector,
|
||||||
metadata []data.Collection,
|
metadata []data.Collection,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.Collection, map[string]struct{}, error) {
|
||||||
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
|
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
err := verifyBackupInputs(sels, gc.GetUsers(), gc.GetSiteIDs())
|
err := verifyBackupInputs(sels, gc.GetUsers(), gc.GetSiteIDs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceEnabled, err := checkServiceEnabled(ctx, gc.Owners.Users(), path.ServiceType(sels.Service), sels.DiscreteOwner)
|
serviceEnabled, err := checkServiceEnabled(ctx, gc.Owners.Users(), path.ServiceType(sels.Service), sels.DiscreteOwner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !serviceEnabled {
|
if !serviceEnabled {
|
||||||
return []data.Collection{}, nil
|
return []data.Collection{}, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
switch sels.Service {
|
switch sels.Service {
|
||||||
case selectors.ServiceExchange:
|
case selectors.ServiceExchange:
|
||||||
colls, err := exchange.DataCollections(
|
colls, excludes, err := exchange.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
sels,
|
sels,
|
||||||
metadata,
|
metadata,
|
||||||
@ -64,7 +65,7 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
gc.UpdateStatus,
|
gc.UpdateStatus,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range colls {
|
for _, c := range colls {
|
||||||
@ -79,13 +80,13 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return colls, nil
|
return colls, excludes, nil
|
||||||
|
|
||||||
case selectors.ServiceOneDrive:
|
case selectors.ServiceOneDrive:
|
||||||
return gc.OneDriveDataCollections(ctx, sels, ctrlOpts)
|
return gc.OneDriveDataCollections(ctx, sels, ctrlOpts)
|
||||||
|
|
||||||
case selectors.ServiceSharePoint:
|
case selectors.ServiceSharePoint:
|
||||||
colls, err := sharepoint.DataCollections(
|
colls, excludes, err := sharepoint.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
gc.itemClient,
|
gc.itemClient,
|
||||||
sels,
|
sels,
|
||||||
@ -94,17 +95,17 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
gc,
|
gc,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for range colls {
|
for range colls {
|
||||||
gc.incrementAwaitingMessages()
|
gc.incrementAwaitingMessages()
|
||||||
}
|
}
|
||||||
|
|
||||||
return colls, nil
|
return colls, excludes, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("service %s not supported", sels.Service.String())
|
return nil, nil, errors.Errorf("service %s not supported", sels.Service.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,15 +183,16 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
selector selectors.Selector,
|
selector selectors.Selector,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.Collection, map[string]struct{}, error) {
|
||||||
odb, err := selector.ToOneDriveBackup()
|
odb, err := selector.ToOneDriveBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "oneDriveDataCollection: parsing selector")
|
return nil, nil, errors.Wrap(err, "oneDriveDataCollection: parsing selector")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
user = selector.DiscreteOwner
|
user = selector.DiscreteOwner
|
||||||
collections = []data.Collection{}
|
collections = []data.Collection{}
|
||||||
|
allExcludes = map[string]struct{}{}
|
||||||
errs error
|
errs error
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -198,7 +200,7 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
|||||||
for _, scope := range odb.Scopes() {
|
for _, scope := range odb.Scopes() {
|
||||||
logger.Ctx(ctx).With("user", user).Debug("Creating OneDrive collections")
|
logger.Ctx(ctx).With("user", user).Debug("Creating OneDrive collections")
|
||||||
|
|
||||||
odcs, err := onedrive.NewCollections(
|
odcs, excludes, err := onedrive.NewCollections(
|
||||||
gc.itemClient,
|
gc.itemClient,
|
||||||
gc.credentials.AzureTenantID,
|
gc.credentials.AzureTenantID,
|
||||||
user,
|
user,
|
||||||
@ -209,15 +211,17 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
).Get(ctx)
|
).Get(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, support.WrapAndAppend(user, err, errs)
|
return nil, nil, support.WrapAndAppend(user, err, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, odcs...)
|
collections = append(collections, odcs...)
|
||||||
|
|
||||||
|
maps.Copy(allExcludes, excludes)
|
||||||
}
|
}
|
||||||
|
|
||||||
for range collections {
|
for range collections {
|
||||||
gc.incrementAwaitingMessages()
|
gc.incrementAwaitingMessages()
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, errs
|
return collections, allExcludes, errs
|
||||||
}
|
}
|
||||||
|
|||||||
@ -44,7 +44,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) SetupSuite() {
|
|||||||
|
|
||||||
tester.MustGetEnvVars(suite.T(), tester.M365AcctCredEnvs...)
|
tester.MustGetEnvVars(suite.T(), tester.M365AcctCredEnvs...)
|
||||||
|
|
||||||
suite.connector = loadConnector(ctx, suite.T(), graph.LargeItemClient(), AllResources)
|
suite.connector = loadConnector(ctx, suite.T(), graph.HTTPClient(graph.NoTimeout()), AllResources)
|
||||||
suite.user = tester.M365UserID(suite.T())
|
suite.user = tester.M365UserID(suite.T())
|
||||||
suite.site = tester.M365SiteID(suite.T())
|
suite.site = tester.M365SiteID(suite.T())
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestExchangeDataCollection
|
|||||||
|
|
||||||
selUsers := []string{suite.user}
|
selUsers := []string{suite.user}
|
||||||
|
|
||||||
connector := loadConnector(ctx, suite.T(), graph.LargeItemClient(), Users)
|
connector := loadConnector(ctx, suite.T(), graph.HTTPClient(graph.NoTimeout()), Users)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
getSelector func(t *testing.T) selectors.Selector
|
getSelector func(t *testing.T) selectors.Selector
|
||||||
@ -99,7 +99,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestExchangeDataCollection
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
collections, err := exchange.DataCollections(
|
collections, excludes, err := exchange.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
test.getSelector(t),
|
test.getSelector(t),
|
||||||
nil,
|
nil,
|
||||||
@ -108,6 +108,8 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestExchangeDataCollection
|
|||||||
control.Options{})
|
control.Options{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Empty(t, excludes)
|
||||||
|
|
||||||
for range collections {
|
for range collections {
|
||||||
connector.incrementAwaitingMessages()
|
connector.incrementAwaitingMessages()
|
||||||
}
|
}
|
||||||
@ -139,7 +141,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestDataCollections_invali
|
|||||||
|
|
||||||
owners := []string{"snuffleupagus"}
|
owners := []string{"snuffleupagus"}
|
||||||
|
|
||||||
connector := loadConnector(ctx, suite.T(), graph.LargeItemClient(), Users)
|
connector := loadConnector(ctx, suite.T(), graph.HTTPClient(graph.NoTimeout()), Users)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
getSelector func(t *testing.T) selectors.Selector
|
getSelector func(t *testing.T) selectors.Selector
|
||||||
@ -199,9 +201,10 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestDataCollections_invali
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
collections, err := connector.DataCollections(ctx, test.getSelector(t), nil, control.Options{})
|
collections, excludes, err := connector.DataCollections(ctx, test.getSelector(t), nil, control.Options{})
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Empty(t, collections)
|
assert.Empty(t, collections)
|
||||||
|
assert.Empty(t, excludes)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -215,7 +218,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
|||||||
|
|
||||||
selSites := []string{suite.site}
|
selSites := []string{suite.site}
|
||||||
|
|
||||||
connector := loadConnector(ctx, suite.T(), graph.LargeItemClient(), Sites)
|
connector := loadConnector(ctx, suite.T(), graph.HTTPClient(graph.NoTimeout()), Sites)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
expected int
|
expected int
|
||||||
@ -242,15 +245,17 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
collections, err := sharepoint.DataCollections(
|
collections, excludes, err := sharepoint.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
graph.LargeItemClient(),
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
test.getSelector(),
|
test.getSelector(),
|
||||||
connector.credentials.AzureTenantID,
|
connector.credentials.AzureTenantID,
|
||||||
connector.Service,
|
connector.Service,
|
||||||
connector,
|
connector,
|
||||||
control.Options{})
|
control.Options{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
// Not expecting excludes as this isn't an incremental backup.
|
||||||
|
assert.Empty(t, excludes)
|
||||||
|
|
||||||
for range collections {
|
for range collections {
|
||||||
connector.incrementAwaitingMessages()
|
connector.incrementAwaitingMessages()
|
||||||
@ -300,7 +305,7 @@ func (suite *ConnectorCreateSharePointCollectionIntegrationSuite) SetupSuite() {
|
|||||||
|
|
||||||
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
||||||
|
|
||||||
suite.connector = loadConnector(ctx, suite.T(), graph.LargeItemClient(), Sites)
|
suite.connector = loadConnector(ctx, suite.T(), graph.HTTPClient(graph.NoTimeout()), Sites)
|
||||||
suite.user = tester.M365UserID(suite.T())
|
suite.user = tester.M365UserID(suite.T())
|
||||||
|
|
||||||
tester.LogTimeOfTest(suite.T())
|
tester.LogTimeOfTest(suite.T())
|
||||||
@ -313,16 +318,18 @@ func (suite *ConnectorCreateSharePointCollectionIntegrationSuite) TestCreateShar
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
siteID = tester.M365SiteID(t)
|
siteID = tester.M365SiteID(t)
|
||||||
gc = loadConnector(ctx, t, graph.LargeItemClient(), Sites)
|
gc = loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), Sites)
|
||||||
siteIDs = []string{siteID}
|
siteIDs = []string{siteID}
|
||||||
)
|
)
|
||||||
|
|
||||||
sel := selectors.NewSharePointBackup(siteIDs)
|
sel := selectors.NewSharePointBackup(siteIDs)
|
||||||
sel.Include(sel.Libraries([]string{"foo"}, selectors.PrefixMatch()))
|
sel.Include(sel.Libraries([]string{"foo"}, selectors.PrefixMatch()))
|
||||||
|
|
||||||
cols, err := gc.DataCollections(ctx, sel.Selector, nil, control.Options{})
|
cols, excludes, err := gc.DataCollections(ctx, sel.Selector, nil, control.Options{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Len(t, cols, 1)
|
assert.Len(t, cols, 1)
|
||||||
|
// No excludes yet as this isn't an incremental backup.
|
||||||
|
assert.Empty(t, excludes)
|
||||||
|
|
||||||
for _, collection := range cols {
|
for _, collection := range cols {
|
||||||
t.Logf("Path: %s\n", collection.FullPath().String())
|
t.Logf("Path: %s\n", collection.FullPath().String())
|
||||||
@ -337,16 +344,18 @@ func (suite *ConnectorCreateSharePointCollectionIntegrationSuite) TestCreateShar
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
siteID = tester.M365SiteID(t)
|
siteID = tester.M365SiteID(t)
|
||||||
gc = loadConnector(ctx, t, graph.LargeItemClient(), Sites)
|
gc = loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), Sites)
|
||||||
siteIDs = []string{siteID}
|
siteIDs = []string{siteID}
|
||||||
)
|
)
|
||||||
|
|
||||||
sel := selectors.NewSharePointBackup(siteIDs)
|
sel := selectors.NewSharePointBackup(siteIDs)
|
||||||
sel.Include(sel.Lists(selectors.Any(), selectors.PrefixMatch()))
|
sel.Include(sel.Lists(selectors.Any(), selectors.PrefixMatch()))
|
||||||
|
|
||||||
cols, err := gc.DataCollections(ctx, sel.Selector, nil, control.Options{})
|
cols, excludes, err := gc.DataCollections(ctx, sel.Selector, nil, control.Options{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Less(t, 0, len(cols))
|
assert.Less(t, 0, len(cols))
|
||||||
|
// No excludes yet as this isn't an incremental backup.
|
||||||
|
assert.Empty(t, excludes)
|
||||||
|
|
||||||
for _, collection := range cols {
|
for _, collection := range cols {
|
||||||
t.Logf("Path: %s\n", collection.FullPath().String())
|
t.Logf("Path: %s\n", collection.FullPath().String())
|
||||||
|
|||||||
43
src/internal/connector/discovery/api/beta_service.go
Normal file
43
src/internal/connector/discovery/api/beta_service.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/betasdk"
|
||||||
|
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
|
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Service wraps BetaClient's functionality.
|
||||||
|
// Abstraction created to comply loosely with graph.Servicer
|
||||||
|
// methods for ease of switching between v1.0 and beta connnectors
|
||||||
|
type Service struct {
|
||||||
|
client *betasdk.BetaClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Service) Client() *betasdk.BetaClient {
|
||||||
|
return s.client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBetaService(adpt *msgraphsdk.GraphRequestAdapter) *Service {
|
||||||
|
return &Service{
|
||||||
|
client: betasdk.NewBetaClient(adpt),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seraialize writes an M365 parsable object into a byte array using the built-in
|
||||||
|
// application/json writer within the adapter.
|
||||||
|
func (s Service) Serialize(object absser.Parsable) ([]byte, error) {
|
||||||
|
writer, err := s.client.Adapter().
|
||||||
|
GetSerializationWriterFactory().
|
||||||
|
GetSerializationWriter("application/json")
|
||||||
|
if err != nil || writer == nil {
|
||||||
|
return nil, errors.Wrap(err, "creating json serialization writer")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = writer.WriteObjectValue("", object)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "writeObjecValue serialization")
|
||||||
|
}
|
||||||
|
|
||||||
|
return writer.GetSerializedContent()
|
||||||
|
}
|
||||||
49
src/internal/connector/discovery/api/beta_service_test.go
Normal file
49
src/internal/connector/discovery/api/beta_service_test.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BetaUnitSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBetaUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, new(BetaUnitSuite))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *BetaUnitSuite) TestBetaService_Adapter() {
|
||||||
|
t := suite.T()
|
||||||
|
a := tester.NewM365Account(t)
|
||||||
|
m365, err := a.M365Config()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
adpt, err := graph.CreateAdapter(
|
||||||
|
m365.AzureTenantID,
|
||||||
|
m365.AzureClientID,
|
||||||
|
m365.AzureClientSecret,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
service := NewBetaService(adpt)
|
||||||
|
require.NotNil(t, service)
|
||||||
|
|
||||||
|
testPage := models.NewSitePage()
|
||||||
|
name := "testFile"
|
||||||
|
desc := "working with parsing"
|
||||||
|
|
||||||
|
testPage.SetName(&name)
|
||||||
|
testPage.SetDescription(&desc)
|
||||||
|
|
||||||
|
byteArray, err := service.Serialize(testPage)
|
||||||
|
assert.NotEmpty(t, byteArray)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
@ -2,9 +2,11 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
@ -57,6 +59,11 @@ type Client struct {
|
|||||||
// The stable service is re-usable for any non-paged request.
|
// The stable service is re-usable for any non-paged request.
|
||||||
// This allows us to maintain performance across async requests.
|
// This allows us to maintain performance across async requests.
|
||||||
stable graph.Servicer
|
stable graph.Servicer
|
||||||
|
|
||||||
|
// The largeItem graph servicer is configured specifically for
|
||||||
|
// downloading large items. Specifically for use when handling
|
||||||
|
// attachments, and for no other use.
|
||||||
|
largeItem graph.Servicer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient produces a new exchange api client. Must be used in
|
// NewClient produces a new exchange api client. Must be used in
|
||||||
@ -67,27 +74,45 @@ func NewClient(creds account.M365Config) (Client, error) {
|
|||||||
return Client{}, err
|
return Client{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return Client{creds, s}, nil
|
li, err := newLargeItemService(creds)
|
||||||
|
if err != nil {
|
||||||
|
return Client{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return Client{creds, s, li}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// service generates a new service. Used for paged and other long-running
|
// service generates a new service. Used for paged and other long-running
|
||||||
// requests instead of the client's stable service, so that in-flight state
|
// requests instead of the client's stable service, so that in-flight state
|
||||||
// within the adapter doesn't get clobbered
|
// within the adapter doesn't get clobbered
|
||||||
func (c Client) service() (*graph.Service, error) {
|
func (c Client) service() (*graph.Service, error) {
|
||||||
return newService(c.Credentials)
|
s, err := newService(c.Credentials)
|
||||||
|
return s, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func newService(creds account.M365Config) (*graph.Service, error) {
|
func newService(creds account.M365Config) (*graph.Service, error) {
|
||||||
adapter, err := graph.CreateAdapter(
|
a, err := graph.CreateAdapter(
|
||||||
|
creds.AzureTenantID,
|
||||||
|
creds.AzureClientID,
|
||||||
|
creds.AzureClientSecret)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "generating no-timeout graph adapter")
|
||||||
|
}
|
||||||
|
|
||||||
|
return graph.NewService(a), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLargeItemService(creds account.M365Config) (*graph.Service, error) {
|
||||||
|
a, err := graph.CreateAdapter(
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
creds.AzureClientID,
|
creds.AzureClientID,
|
||||||
creds.AzureClientSecret,
|
creds.AzureClientSecret,
|
||||||
)
|
graph.NoTimeout())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "generating graph api service client")
|
return nil, errors.Wrap(err, "generating no-timeout graph adapter")
|
||||||
}
|
}
|
||||||
|
|
||||||
return graph.NewService(adapter), nil
|
return graph.NewService(a), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -117,3 +142,14 @@ func orNow(t *time.Time) time.Time {
|
|||||||
|
|
||||||
return *t
|
return *t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func HasAttachments(body models.ItemBodyable) bool {
|
||||||
|
if body.GetContent() == nil || body.GetContentType() == nil ||
|
||||||
|
*body.GetContentType() == models.TEXT_BODYTYPE || len(*body.GetContent()) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
content := *body.GetContent()
|
||||||
|
|
||||||
|
return strings.Contains(content, "src=\"cid:")
|
||||||
|
}
|
||||||
|
|||||||
@ -3,11 +3,14 @@ package api
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
)
|
)
|
||||||
@ -190,3 +193,57 @@ func (suite *ExchangeServiceSuite) TestGraphQueryFunctions() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint:lll
|
||||||
|
var stubHTMLContent = "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><style type=\"text/css\" style=\"display:none\">\r\n<!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n-->\r\n</style></head><body dir=\"ltr\"><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Happy New Year,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">In accordance with TPS report guidelines, there have been questions about how to address our activities SharePoint Cover page. Do you believe this is the best picture? </div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><img class=\"FluidPluginCopy ContentPasted0 w-2070 h-1380\" size=\"5854817\" data-outlook-trace=\"F:1|T:1\" src=\"cid:85f4faa3-9851-40c7-ba0a-e63dce1185f9\" style=\"max-width:100%\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Let me know if this meets our culture requirements.</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Warm Regards,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Dustin</div></body></html>"
|
||||||
|
|
||||||
|
func (suite *ExchangeServiceSuite) TestHasAttachments() {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
hasAttachment assert.BoolAssertionFunc
|
||||||
|
getBodyable func(t *testing.T) models.ItemBodyable
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Mock w/out attachment",
|
||||||
|
hasAttachment: assert.False,
|
||||||
|
getBodyable: func(t *testing.T) models.ItemBodyable {
|
||||||
|
byteArray := mockconnector.GetMockMessageWithBodyBytes(
|
||||||
|
"Test",
|
||||||
|
"This is testing",
|
||||||
|
"This is testing",
|
||||||
|
)
|
||||||
|
message, err := support.CreateMessageFromBytes(byteArray)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return message.GetBody()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Mock w/ inline attachment",
|
||||||
|
hasAttachment: assert.True,
|
||||||
|
getBodyable: func(t *testing.T) models.ItemBodyable {
|
||||||
|
byteArray := mockconnector.GetMessageWithOneDriveAttachment("Test legacy")
|
||||||
|
message, err := support.CreateMessageFromBytes(byteArray)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return message.GetBody()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Edge Case",
|
||||||
|
hasAttachment: assert.True,
|
||||||
|
getBodyable: func(t *testing.T) models.ItemBodyable {
|
||||||
|
body := models.NewItemBody()
|
||||||
|
body.SetContent(&stubHTMLContent)
|
||||||
|
cat := models.HTML_BODYTYPE
|
||||||
|
body.SetContentType(&cat)
|
||||||
|
return body
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
found := HasAttachments(test.getBodyable(t))
|
||||||
|
test.hasAttachment(t, found)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
)
|
)
|
||||||
@ -47,9 +48,8 @@ func (c Contacts) CreateContactFolder(
|
|||||||
return c.stable.Client().UsersById(user).ContactFolders().Post(ctx, requestBody, nil)
|
return c.stable.Client().UsersById(user).ContactFolders().Post(ctx, requestBody, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteContactFolder deletes the ContactFolder associated with the M365 ID if permissions are valid.
|
// DeleteContainer deletes the ContactFolder associated with the M365 ID if permissions are valid.
|
||||||
// Errors returned if the function call was not successful.
|
func (c Contacts) DeleteContainer(
|
||||||
func (c Contacts) DeleteContactFolder(
|
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, folderID string,
|
user, folderID string,
|
||||||
) error {
|
) error {
|
||||||
@ -173,7 +173,7 @@ type contactPager struct {
|
|||||||
options *users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration
|
options *users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *contactPager) getPage(ctx context.Context) (pageLinker, error) {
|
func (p *contactPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||||
return p.builder.Get(ctx, p.options)
|
return p.builder.Get(ctx, p.options)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ func (p *contactPager) setNext(nextLink string) {
|
|||||||
p.builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(nextLink, p.gs.Adapter())
|
p.builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(nextLink, p.gs.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *contactPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) {
|
func (p *contactPager) valuesIn(pl api.DeltaPageLinker) ([]getIDAndAddtler, error) {
|
||||||
return toValues[models.Contactable](pl)
|
return toValues[models.Contactable](pl)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,7 +215,7 @@ func (c Contacts) GetAddedAndRemovedItemIDs(
|
|||||||
}
|
}
|
||||||
// only return on error if it is NOT a delta issue.
|
// only return on error if it is NOT a delta issue.
|
||||||
// on bad deltas we retry the call with the regular builder
|
// on bad deltas we retry the call with the regular builder
|
||||||
if graph.IsErrInvalidDelta(err) == nil {
|
if !graph.IsErrInvalidDelta(err) {
|
||||||
return nil, nil, DeltaUpdate{}, err
|
return nil, nil, DeltaUpdate{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -49,9 +50,9 @@ func (c Events) CreateCalendar(
|
|||||||
return c.stable.Client().UsersById(user).Calendars().Post(ctx, requestbody, nil)
|
return c.stable.Client().UsersById(user).Calendars().Post(ctx, requestbody, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteCalendar removes calendar from user's M365 account
|
// DeleteContainer removes a calendar from user's M365 account
|
||||||
// Reference: https://docs.microsoft.com/en-us/graph/api/calendar-delete?view=graph-rest-1.0&tabs=go
|
// Reference: https://docs.microsoft.com/en-us/graph/api/calendar-delete?view=graph-rest-1.0&tabs=go
|
||||||
func (c Events) DeleteCalendar(
|
func (c Events) DeleteContainer(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, calendarID string,
|
user, calendarID string,
|
||||||
) error {
|
) error {
|
||||||
@ -85,12 +86,37 @@ func (c Events) GetItem(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
evt, err := c.stable.Client().UsersById(user).EventsById(itemID).Get(ctx, nil)
|
event, err := c.stable.Client().UsersById(user).EventsById(itemID).Get(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return evt, EventInfo(evt), nil
|
var errs *multierror.Error
|
||||||
|
|
||||||
|
if *event.GetHasAttachments() || HasAttachments(event.GetBody()) {
|
||||||
|
for count := 0; count < numberOfRetries; count++ {
|
||||||
|
attached, err := c.largeItem.
|
||||||
|
Client().
|
||||||
|
UsersById(user).
|
||||||
|
EventsById(itemID).
|
||||||
|
Attachments().
|
||||||
|
Get(ctx, nil)
|
||||||
|
if err == nil {
|
||||||
|
event.SetAttachments(attached.GetValue())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Debugw("retrying event attachment download", "err", err)
|
||||||
|
errs = multierror.Append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logger.Ctx(ctx).Errorw("event attachment download exceeded maximum retries", "err", errs)
|
||||||
|
return nil, nil, support.WrapAndAppend(itemID, errors.Wrap(err, "download event attachment"), nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return event, EventInfo(event), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) GetAllCalendarNamesForUser(
|
func (c Client) GetAllCalendarNamesForUser(
|
||||||
@ -178,7 +204,7 @@ type eventPager struct {
|
|||||||
options *users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration
|
options *users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *eventPager) getPage(ctx context.Context) (pageLinker, error) {
|
func (p *eventPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||||
resp, err := p.builder.Get(ctx, p.options)
|
resp, err := p.builder.Get(ctx, p.options)
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
@ -187,7 +213,7 @@ func (p *eventPager) setNext(nextLink string) {
|
|||||||
p.builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(nextLink, p.gs.Adapter())
|
p.builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(nextLink, p.gs.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *eventPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) {
|
func (p *eventPager) valuesIn(pl api.DeltaPageLinker) ([]getIDAndAddtler, error) {
|
||||||
return toValues[models.Eventable](pl)
|
return toValues[models.Eventable](pl)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,7 +242,7 @@ func (c Events) GetAddedAndRemovedItemIDs(
|
|||||||
}
|
}
|
||||||
// only return on error if it is NOT a delta issue.
|
// only return on error if it is NOT a delta issue.
|
||||||
// on bad deltas we retry the call with the regular builder
|
// on bad deltas we retry the call with the regular builder
|
||||||
if graph.IsErrInvalidDelta(err) == nil {
|
if !graph.IsErrInvalidDelta(err) {
|
||||||
return nil, nil, DeltaUpdate{}, err
|
return nil, nil, DeltaUpdate{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,8 +275,7 @@ func (c Events) GetAddedAndRemovedItemIDs(
|
|||||||
// Serialization
|
// Serialization
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// Serialize retrieves attachment data identified by the event item, and then
|
// Serialize transforms the event into a byte slice.
|
||||||
// serializes it into a byte slice.
|
|
||||||
func (c Events) Serialize(
|
func (c Events) Serialize(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
item serialization.Parsable,
|
item serialization.Parsable,
|
||||||
@ -268,31 +293,6 @@ func (c Events) Serialize(
|
|||||||
|
|
||||||
defer writer.Close()
|
defer writer.Close()
|
||||||
|
|
||||||
if *event.GetHasAttachments() || support.HasAttachments(event.GetBody()) {
|
|
||||||
// getting all the attachments might take a couple attempts due to filesize
|
|
||||||
var retriesErr error
|
|
||||||
|
|
||||||
for count := 0; count < numberOfRetries; count++ {
|
|
||||||
attached, err := c.stable.
|
|
||||||
Client().
|
|
||||||
UsersById(user).
|
|
||||||
EventsById(itemID).
|
|
||||||
Attachments().
|
|
||||||
Get(ctx, nil)
|
|
||||||
retriesErr = err
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
event.SetAttachments(attached.GetValue())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if retriesErr != nil {
|
|
||||||
logger.Ctx(ctx).Debug("exceeded maximum retries")
|
|
||||||
return nil, support.WrapAndAppend(itemID, errors.Wrap(retriesErr, "attachment failed"), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = writer.WriteObjectValue("", event); err != nil {
|
if err = writer.WriteObjectValue("", event); err != nil {
|
||||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -71,9 +72,9 @@ func (c Mail) CreateMailFolderWithParent(
|
|||||||
Post(ctx, requestBody, nil)
|
Post(ctx, requestBody, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteMailFolder removes a mail folder with the corresponding M365 ID from the user's M365 Exchange account
|
// DeleteContainer removes a mail folder with the corresponding M365 ID from the user's M365 Exchange account
|
||||||
// Reference: https://docs.microsoft.com/en-us/graph/api/mailfolder-delete?view=graph-rest-1.0&tabs=http
|
// Reference: https://docs.microsoft.com/en-us/graph/api/mailfolder-delete?view=graph-rest-1.0&tabs=http
|
||||||
func (c Mail) DeleteMailFolder(
|
func (c Mail) DeleteContainer(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, folderID string,
|
user, folderID string,
|
||||||
) error {
|
) error {
|
||||||
@ -97,7 +98,8 @@ func (c Mail) GetContainerByID(
|
|||||||
return service.Client().UsersById(userID).MailFoldersById(dirID).Get(ctx, ofmf)
|
return service.Client().UsersById(userID).MailFoldersById(dirID).Get(ctx, ofmf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetItem retrieves a Messageable item.
|
// GetItem retrieves a Messageable item. If the item contains an attachment, that
|
||||||
|
// attachment is also downloaded.
|
||||||
func (c Mail) GetItem(
|
func (c Mail) GetItem(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, itemID string,
|
user, itemID string,
|
||||||
@ -107,6 +109,31 @@ func (c Mail) GetItem(
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errs *multierror.Error
|
||||||
|
|
||||||
|
if *mail.GetHasAttachments() || HasAttachments(mail.GetBody()) {
|
||||||
|
for count := 0; count < numberOfRetries; count++ {
|
||||||
|
attached, err := c.largeItem.
|
||||||
|
Client().
|
||||||
|
UsersById(user).
|
||||||
|
MessagesById(itemID).
|
||||||
|
Attachments().
|
||||||
|
Get(ctx, nil)
|
||||||
|
if err == nil {
|
||||||
|
mail.SetAttachments(attached.GetValue())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Debugw("retrying mail attachment download", "err", err)
|
||||||
|
errs = multierror.Append(errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logger.Ctx(ctx).Errorw("mail attachment download exceeded maximum retries", "err", errs)
|
||||||
|
return nil, nil, support.WrapAndAppend(itemID, errors.Wrap(err, "downloading mail attachment"), nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return mail, MailInfo(mail), nil
|
return mail, MailInfo(mail), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +199,7 @@ type mailPager struct {
|
|||||||
options *users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration
|
options *users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *mailPager) getPage(ctx context.Context) (pageLinker, error) {
|
func (p *mailPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
|
||||||
return p.builder.Get(ctx, p.options)
|
return p.builder.Get(ctx, p.options)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,7 +207,7 @@ func (p *mailPager) setNext(nextLink string) {
|
|||||||
p.builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(nextLink, p.gs.Adapter())
|
p.builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(nextLink, p.gs.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *mailPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) {
|
func (p *mailPager) valuesIn(pl api.DeltaPageLinker) ([]getIDAndAddtler, error) {
|
||||||
return toValues[models.Messageable](pl)
|
return toValues[models.Messageable](pl)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,7 +242,7 @@ func (c Mail) GetAddedAndRemovedItemIDs(
|
|||||||
}
|
}
|
||||||
// only return on error if it is NOT a delta issue.
|
// only return on error if it is NOT a delta issue.
|
||||||
// on bad deltas we retry the call with the regular builder
|
// on bad deltas we retry the call with the regular builder
|
||||||
if graph.IsErrInvalidDelta(err) == nil {
|
if !graph.IsErrInvalidDelta(err) {
|
||||||
return nil, nil, DeltaUpdate{}, err
|
return nil, nil, DeltaUpdate{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,8 +265,7 @@ func (c Mail) GetAddedAndRemovedItemIDs(
|
|||||||
// Serialization
|
// Serialization
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// Serialize retrieves attachment data identified by the mail item, and then
|
// Serialize transforms the mail item into a byte slice.
|
||||||
// serializes it into a byte slice.
|
|
||||||
func (c Mail) Serialize(
|
func (c Mail) Serialize(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
item serialization.Parsable,
|
item serialization.Parsable,
|
||||||
@ -257,32 +283,6 @@ func (c Mail) Serialize(
|
|||||||
|
|
||||||
defer writer.Close()
|
defer writer.Close()
|
||||||
|
|
||||||
if *msg.GetHasAttachments() || support.HasAttachments(msg.GetBody()) {
|
|
||||||
// getting all the attachments might take a couple attempts due to filesize
|
|
||||||
var retriesErr error
|
|
||||||
|
|
||||||
for count := 0; count < numberOfRetries; count++ {
|
|
||||||
attached, err := c.stable.
|
|
||||||
Client().
|
|
||||||
UsersById(user).
|
|
||||||
MessagesById(itemID).
|
|
||||||
Attachments().
|
|
||||||
Get(ctx, nil)
|
|
||||||
retriesErr = err
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
msg.SetAttachments(attached.GetValue())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if retriesErr != nil {
|
|
||||||
logger.Ctx(ctx).Debug("exceeded maximum retries")
|
|
||||||
return nil, support.WrapAndAppend(itemID,
|
|
||||||
support.ConnectorStackErrorTraceWrap(retriesErr, "attachment Failed"), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = writer.WriteObjectValue("", msg); err != nil {
|
if err = writer.WriteObjectValue("", msg); err != nil {
|
||||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -14,14 +15,9 @@ import (
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
type itemPager interface {
|
type itemPager interface {
|
||||||
getPage(context.Context) (pageLinker, error)
|
getPage(context.Context) (api.DeltaPageLinker, error)
|
||||||
setNext(string)
|
setNext(string)
|
||||||
valuesIn(pageLinker) ([]getIDAndAddtler, error)
|
valuesIn(api.DeltaPageLinker) ([]getIDAndAddtler, error)
|
||||||
}
|
|
||||||
|
|
||||||
type pageLinker interface {
|
|
||||||
GetOdataDeltaLink() *string
|
|
||||||
GetOdataNextLink() *string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type getIDAndAddtler interface {
|
type getIDAndAddtler interface {
|
||||||
@ -72,11 +68,7 @@ func getItemsAddedAndRemovedFromContainer(
|
|||||||
// get the next page of data, check for standard errors
|
// get the next page of data, check for standard errors
|
||||||
resp, err := pager.getPage(ctx)
|
resp, err := pager.getPage(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := graph.IsErrDeletedInFlight(err); err != nil {
|
if graph.IsErrDeletedInFlight(err) || graph.IsErrInvalidDelta(err) {
|
||||||
return nil, nil, deltaURL, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := graph.IsErrInvalidDelta(err); err != nil {
|
|
||||||
return nil, nil, deltaURL, err
|
return nil, nil, deltaURL, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,24 +94,24 @@ func getItemsAddedAndRemovedFromContainer(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nextLink, delta := api.NextAndDeltaLink(resp)
|
||||||
|
|
||||||
// the deltaLink is kind of like a cursor for overall data state.
|
// the deltaLink is kind of like a cursor for overall data state.
|
||||||
// once we run through pages of nextLinks, the last query will
|
// once we run through pages of nextLinks, the last query will
|
||||||
// produce a deltaLink instead (if supported), which we'll use on
|
// produce a deltaLink instead (if supported), which we'll use on
|
||||||
// the next backup to only get the changes since this run.
|
// the next backup to only get the changes since this run.
|
||||||
delta := resp.GetOdataDeltaLink()
|
if len(delta) > 0 {
|
||||||
if delta != nil && len(*delta) > 0 {
|
deltaURL = delta
|
||||||
deltaURL = *delta
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// the nextLink is our page cursor within this query.
|
// the nextLink is our page cursor within this query.
|
||||||
// if we have more data to retrieve, we'll have a
|
// if we have more data to retrieve, we'll have a
|
||||||
// nextLink instead of a deltaLink.
|
// nextLink instead of a deltaLink.
|
||||||
nextLink := resp.GetOdataNextLink()
|
if len(nextLink) == 0 {
|
||||||
if nextLink == nil || len(*nextLink) == 0 {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
pager.setNext(*nextLink)
|
pager.setNext(nextLink)
|
||||||
}
|
}
|
||||||
|
|
||||||
return addedIDs, removedIDs, deltaURL, nil
|
return addedIDs, removedIDs, deltaURL, nil
|
||||||
|
|||||||
@ -167,10 +167,10 @@ func DataCollections(
|
|||||||
acct account.M365Config,
|
acct account.M365Config,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.Collection, map[string]struct{}, error) {
|
||||||
eb, err := selector.ToExchangeBackup()
|
eb, err := selector.ToExchangeBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "exchangeDataCollection: parsing selector")
|
return nil, nil, errors.Wrap(err, "exchangeDataCollection: parsing selector")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -181,7 +181,7 @@ func DataCollections(
|
|||||||
|
|
||||||
cdps, err := parseMetadataCollections(ctx, metadata)
|
cdps, err := parseMetadataCollections(ctx, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, scope := range eb.Scopes() {
|
for _, scope := range eb.Scopes() {
|
||||||
@ -196,13 +196,15 @@ func DataCollections(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
su)
|
su)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, support.WrapAndAppend(user, err, errs)
|
return nil, nil, support.WrapAndAppend(user, err, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, dcs...)
|
collections = append(collections, dcs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, errs
|
// Exchange does not require adding items to the global exclude list so always
|
||||||
|
// return nil.
|
||||||
|
return collections, nil, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
func getterByType(ac api.Client, category path.CategoryType) (addedAndRemovedItemIDsGetter, error) {
|
func getterByType(ac api.Client, category path.CategoryType) (addedAndRemovedItemIDsGetter, error) {
|
||||||
@ -251,7 +253,10 @@ func createCollections(
|
|||||||
Credentials: creds,
|
Credentials: creds,
|
||||||
}
|
}
|
||||||
|
|
||||||
foldersComplete, closer := observe.MessageWithCompletion(ctx, observe.Bulletf("%s - %s", qp.Category, user))
|
foldersComplete, closer := observe.MessageWithCompletion(ctx, observe.Bulletf(
|
||||||
|
"%s - %s",
|
||||||
|
observe.Safe(qp.Category.String()),
|
||||||
|
observe.PII(user)))
|
||||||
defer closer()
|
defer closer()
|
||||||
defer close(foldersComplete)
|
defer close(foldersComplete)
|
||||||
|
|
||||||
|
|||||||
@ -173,6 +173,9 @@ func (col *Collection) streamItems(ctx context.Context) {
|
|||||||
colProgress chan<- struct{}
|
colProgress chan<- struct{}
|
||||||
|
|
||||||
user = col.user
|
user = col.user
|
||||||
|
log = logger.Ctx(ctx).With(
|
||||||
|
"service", path.ExchangeService.String(),
|
||||||
|
"category", col.category.String())
|
||||||
)
|
)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -183,9 +186,9 @@ func (col *Collection) streamItems(ctx context.Context) {
|
|||||||
var closer func()
|
var closer func()
|
||||||
colProgress, closer = observe.CollectionProgress(
|
colProgress, closer = observe.CollectionProgress(
|
||||||
ctx,
|
ctx,
|
||||||
user,
|
|
||||||
col.fullPath.Category().String(),
|
col.fullPath.Category().String(),
|
||||||
col.fullPath.Folder())
|
observe.PII(user),
|
||||||
|
observe.PII(col.fullPath.Folder()))
|
||||||
|
|
||||||
go closer()
|
go closer()
|
||||||
|
|
||||||
@ -251,58 +254,19 @@ func (col *Collection) streamItems(ctx context.Context) {
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
for i := 1; i <= numberOfRetries; i++ {
|
item, info, err = getItemWithRetries(ctx, user, id, col.items)
|
||||||
item, info, err = col.items.GetItem(ctx, user, id)
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the data is no longer available just return here and chalk it up
|
|
||||||
// as a success. There's no reason to retry and no way we can backup up
|
|
||||||
// enough information to restore the item anyway.
|
|
||||||
if e := graph.IsErrDeletedInFlight(err); e != nil {
|
|
||||||
atomic.AddInt64(&success, 1)
|
|
||||||
logger.Ctx(ctx).Infow(
|
|
||||||
"Graph reported item not found",
|
|
||||||
"error",
|
|
||||||
e,
|
|
||||||
"service",
|
|
||||||
path.ExchangeService.String(),
|
|
||||||
"category",
|
|
||||||
col.category.String,
|
|
||||||
)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < numberOfRetries {
|
|
||||||
time.Sleep(time.Duration(3*(i+1)) * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Don't report errors for deleted items as there's no way for us to
|
// Don't report errors for deleted items as there's no way for us to
|
||||||
// back up data that is gone. Chalk them up as a "success" though since
|
// back up data that is gone. Record it as a "success", since there's
|
||||||
// there's really nothing we can do and not reporting it will make the
|
// nothing else we can do, and not reporting it will make the status
|
||||||
// status code upset cause we won't have the same number of results as
|
// investigation upset.
|
||||||
// attempted items.
|
if graph.IsErrDeletedInFlight(err) {
|
||||||
if e := graph.IsErrDeletedInFlight(err); e != nil {
|
|
||||||
atomic.AddInt64(&success, 1)
|
atomic.AddInt64(&success, 1)
|
||||||
logger.Ctx(ctx).Infow(
|
log.Infow("item not found", "err", err)
|
||||||
"Graph reported item not found",
|
} else {
|
||||||
"error",
|
errUpdater(user, support.ConnectorStackErrorTraceWrap(err, "fetching item"))
|
||||||
e,
|
|
||||||
"service",
|
|
||||||
path.ExchangeService.String(),
|
|
||||||
"category",
|
|
||||||
col.category.String,
|
|
||||||
)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
errUpdater(user, support.ConnectorStackErrorTraceWrap(err, "fetching item"))
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,6 +297,42 @@ func (col *Collection) streamItems(ctx context.Context) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get an item while handling retry and backoff.
|
||||||
|
func getItemWithRetries(
|
||||||
|
ctx context.Context,
|
||||||
|
userID, itemID string,
|
||||||
|
items itemer,
|
||||||
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
|
var (
|
||||||
|
item serialization.Parsable
|
||||||
|
info *details.ExchangeInfo
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 1; i <= numberOfRetries; i++ {
|
||||||
|
item, info, err = items.GetItem(ctx, userID, itemID)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the data is no longer available just return here and chalk it up
|
||||||
|
// as a success. There's no reason to retry; it's gone Let it go.
|
||||||
|
if graph.IsErrDeletedInFlight(err) {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < numberOfRetries {
|
||||||
|
time.Sleep(time.Duration(3*(i+1)) * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item, info, err
|
||||||
|
}
|
||||||
|
|
||||||
// terminatePopulateSequence is a utility function used to close a Collection's data channel
|
// terminatePopulateSequence is a utility function used to close a Collection's data channel
|
||||||
// and to send the status update through the channel.
|
// and to send the status update through the channel.
|
||||||
func (col *Collection) finishPopulation(ctx context.Context, success int, totalBytes int64, errs error) {
|
func (col *Collection) finishPopulation(ctx context.Context, success int, totalBytes int64, errs error) {
|
||||||
|
|||||||
@ -10,23 +10,33 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockItemer struct{}
|
type mockItemer struct {
|
||||||
|
getCount int
|
||||||
|
serializeCount int
|
||||||
|
getErr error
|
||||||
|
serializeErr error
|
||||||
|
}
|
||||||
|
|
||||||
func (mi mockItemer) GetItem(
|
func (mi *mockItemer) GetItem(
|
||||||
context.Context,
|
context.Context,
|
||||||
string, string,
|
string, string,
|
||||||
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
) (serialization.Parsable, *details.ExchangeInfo, error) {
|
||||||
return nil, nil, nil
|
mi.getCount++
|
||||||
|
return nil, nil, mi.getErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mi mockItemer) Serialize(context.Context, serialization.Parsable, string, string) ([]byte, error) {
|
func (mi *mockItemer) Serialize(context.Context, serialization.Parsable, string, string) ([]byte, error) {
|
||||||
return nil, nil
|
mi.serializeCount++
|
||||||
|
return nil, mi.serializeErr
|
||||||
}
|
}
|
||||||
|
|
||||||
type ExchangeDataCollectionSuite struct {
|
type ExchangeDataCollectionSuite struct {
|
||||||
@ -153,10 +163,58 @@ func (suite *ExchangeDataCollectionSuite) TestNewCollection_state() {
|
|||||||
"u",
|
"u",
|
||||||
test.curr, test.prev,
|
test.curr, test.prev,
|
||||||
0,
|
0,
|
||||||
mockItemer{}, nil,
|
&mockItemer{}, nil,
|
||||||
control.Options{},
|
control.Options{},
|
||||||
false)
|
false)
|
||||||
assert.Equal(t, test.expect, c.State())
|
assert.Equal(t, test.expect, c.State())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *ExchangeDataCollectionSuite) TestGetItemWithRetries() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
items *mockItemer
|
||||||
|
expectErr func(*testing.T, error)
|
||||||
|
expectGetCalls int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "happy",
|
||||||
|
items: &mockItemer{},
|
||||||
|
expectErr: func(t *testing.T, err error) {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
},
|
||||||
|
expectGetCalls: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "an error",
|
||||||
|
items: &mockItemer{getErr: assert.AnError},
|
||||||
|
expectErr: func(t *testing.T, err error) {
|
||||||
|
assert.Error(t, err)
|
||||||
|
},
|
||||||
|
expectGetCalls: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleted in flight",
|
||||||
|
items: &mockItemer{
|
||||||
|
getErr: graph.ErrDeletedInFlight{
|
||||||
|
Err: *common.EncapsulateError(assert.AnError),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: func(t *testing.T, err error) {
|
||||||
|
assert.True(t, graph.IsErrDeletedInFlight(err), "is ErrDeletedInFlight")
|
||||||
|
},
|
||||||
|
expectGetCalls: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
// itemer is mocked, so only the errors are configured atm.
|
||||||
|
_, _, err := getItemWithRetries(ctx, "userID", "itemID", test.items)
|
||||||
|
test.expectErr(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -76,7 +76,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreContact() {
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Remove the folder containing contact prior to exiting test
|
// Remove the folder containing contact prior to exiting test
|
||||||
err = suite.ac.Contacts().DeleteContactFolder(ctx, userID, folderID)
|
err = suite.ac.Contacts().DeleteContainer(ctx, userID, folderID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -110,7 +110,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreEvent() {
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Removes calendar containing events created during the test
|
// Removes calendar containing events created during the test
|
||||||
err = suite.ac.Events().DeleteCalendar(ctx, userID, calendarID)
|
err = suite.ac.Events().DeleteContainer(ctx, userID, calendarID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -124,6 +124,10 @@ func (suite *ExchangeRestoreSuite) TestRestoreEvent() {
|
|||||||
assert.NotNil(t, info, "event item info")
|
assert.NotNil(t, info, "event item info")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type containerDeleter interface {
|
||||||
|
DeleteContainer(context.Context, string, string) error
|
||||||
|
}
|
||||||
|
|
||||||
// TestRestoreExchangeObject verifies path.Category usage for restored objects
|
// TestRestoreExchangeObject verifies path.Category usage for restored objects
|
||||||
func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
||||||
a := tester.NewM365Account(suite.T())
|
a := tester.NewM365Account(suite.T())
|
||||||
@ -133,20 +137,24 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
service, err := createService(m365)
|
service, err := createService(m365)
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
deleters := map[path.CategoryType]containerDeleter{
|
||||||
|
path.EmailCategory: suite.ac.Mail(),
|
||||||
|
path.ContactsCategory: suite.ac.Contacts(),
|
||||||
|
path.EventsCategory: suite.ac.Events(),
|
||||||
|
}
|
||||||
|
|
||||||
userID := tester.M365UserID(suite.T())
|
userID := tester.M365UserID(suite.T())
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
bytes []byte
|
bytes []byte
|
||||||
category path.CategoryType
|
category path.CategoryType
|
||||||
cleanupFunc func(context.Context, string, string) error
|
|
||||||
destination func(*testing.T, context.Context) string
|
destination func(*testing.T, context.Context) string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Test Mail",
|
name: "Test Mail",
|
||||||
bytes: mockconnector.GetMockMessageBytes("Restore Exchange Object"),
|
bytes: mockconnector.GetMockMessageBytes("Restore Exchange Object"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
cleanupFunc: suite.ac.Mail().DeleteMailFolder,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailObject: " + common.FormatSimpleDateTime(now)
|
folderName := "TestRestoreMailObject: " + common.FormatSimpleDateTime(now)
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
@ -156,10 +164,9 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test Mail: One Direct Attachment",
|
name: "Test Mail: One Direct Attachment",
|
||||||
bytes: mockconnector.GetMockMessageWithDirectAttachment("Restore 1 Attachment"),
|
bytes: mockconnector.GetMockMessageWithDirectAttachment("Restore 1 Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
cleanupFunc: suite.ac.Mail().DeleteMailFolder,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithAttachment: " + common.FormatSimpleDateTime(now)
|
folderName := "TestRestoreMailwithAttachment: " + common.FormatSimpleDateTime(now)
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
@ -169,10 +176,9 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test Mail: One Large Attachment",
|
name: "Test Mail: One Large Attachment",
|
||||||
bytes: mockconnector.GetMockMessageWithLargeAttachment("Restore Large Attachment"),
|
bytes: mockconnector.GetMockMessageWithLargeAttachment("Restore Large Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
cleanupFunc: suite.ac.Mail().DeleteMailFolder,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithLargeAttachment: " + common.FormatSimpleDateTime(now)
|
folderName := "TestRestoreMailwithLargeAttachment: " + common.FormatSimpleDateTime(now)
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
@ -182,10 +188,9 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test Mail: Two Attachments",
|
name: "Test Mail: Two Attachments",
|
||||||
bytes: mockconnector.GetMockMessageWithTwoAttachments("Restore 2 Attachments"),
|
bytes: mockconnector.GetMockMessageWithTwoAttachments("Restore 2 Attachments"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
cleanupFunc: suite.ac.Mail().DeleteMailFolder,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithAttachments: " + common.FormatSimpleDateTime(now)
|
folderName := "TestRestoreMailwithAttachments: " + common.FormatSimpleDateTime(now)
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
@ -195,10 +200,9 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test Mail: Reference(OneDrive) Attachment",
|
name: "Test Mail: Reference(OneDrive) Attachment",
|
||||||
bytes: mockconnector.GetMessageWithOneDriveAttachment("Restore Reference(OneDrive) Attachment"),
|
bytes: mockconnector.GetMessageWithOneDriveAttachment("Restore Reference(OneDrive) Attachment"),
|
||||||
category: path.EmailCategory,
|
category: path.EmailCategory,
|
||||||
cleanupFunc: suite.ac.Mail().DeleteMailFolder,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreMailwithReferenceAttachment: " + common.FormatSimpleDateTime(now)
|
folderName := "TestRestoreMailwithReferenceAttachment: " + common.FormatSimpleDateTime(now)
|
||||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
@ -209,10 +213,9 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
},
|
},
|
||||||
// TODO: #884 - reinstate when able to specify root folder by name
|
// TODO: #884 - reinstate when able to specify root folder by name
|
||||||
{
|
{
|
||||||
name: "Test Contact",
|
name: "Test Contact",
|
||||||
bytes: mockconnector.GetMockContactBytes("Test_Omega"),
|
bytes: mockconnector.GetMockContactBytes("Test_Omega"),
|
||||||
category: path.ContactsCategory,
|
category: path.ContactsCategory,
|
||||||
cleanupFunc: suite.ac.Contacts().DeleteContactFolder,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
folderName := "TestRestoreContactObject: " + common.FormatSimpleDateTime(now)
|
folderName := "TestRestoreContactObject: " + common.FormatSimpleDateTime(now)
|
||||||
folder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName)
|
folder, err := suite.ac.Contacts().CreateContactFolder(ctx, userID, folderName)
|
||||||
@ -222,10 +225,9 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test Events",
|
name: "Test Events",
|
||||||
bytes: mockconnector.GetDefaultMockEventBytes("Restored Event Object"),
|
bytes: mockconnector.GetDefaultMockEventBytes("Restored Event Object"),
|
||||||
category: path.EventsCategory,
|
category: path.EventsCategory,
|
||||||
cleanupFunc: suite.ac.Events().DeleteCalendar,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
calendarName := "TestRestoreEventObject: " + common.FormatSimpleDateTime(now)
|
calendarName := "TestRestoreEventObject: " + common.FormatSimpleDateTime(now)
|
||||||
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName)
|
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName)
|
||||||
@ -235,10 +237,9 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test Event with Attachment",
|
name: "Test Event with Attachment",
|
||||||
bytes: mockconnector.GetMockEventWithAttachment("Restored Event Attachment"),
|
bytes: mockconnector.GetMockEventWithAttachment("Restored Event Attachment"),
|
||||||
category: path.EventsCategory,
|
category: path.EventsCategory,
|
||||||
cleanupFunc: suite.ac.Events().DeleteCalendar,
|
|
||||||
destination: func(t *testing.T, ctx context.Context) string {
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
calendarName := "TestRestoreEventObject_" + common.FormatSimpleDateTime(now)
|
calendarName := "TestRestoreEventObject_" + common.FormatSimpleDateTime(now)
|
||||||
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName)
|
calendar, err := suite.ac.Events().CreateCalendar(ctx, userID, calendarName)
|
||||||
@ -266,9 +267,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
)
|
)
|
||||||
assert.NoError(t, err, support.ConnectorStackErrorTrace(err))
|
assert.NoError(t, err, support.ConnectorStackErrorTrace(err))
|
||||||
assert.NotNil(t, info, "item info is populated")
|
assert.NotNil(t, info, "item info is populated")
|
||||||
|
assert.NoError(t, deleters[test.category].DeleteContainer(ctx, userID, destination))
|
||||||
cleanupError := test.cleanupFunc(ctx, userID, destination)
|
|
||||||
assert.NoError(t, cleanupError)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -93,8 +93,7 @@ func filterContainersAndFillCollections(
|
|||||||
|
|
||||||
added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta)
|
added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// note == nil check; only catches non-inFlight error cases.
|
if !graph.IsErrDeletedInFlight(err) {
|
||||||
if graph.IsErrDeletedInFlight(err) == nil {
|
|
||||||
errs = support.WrapAndAppend(qp.ResourceOwner, err, errs)
|
errs = support.WrapAndAppend(qp.ResourceOwner, err, errs)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@ -374,7 +374,11 @@ func restoreCollection(
|
|||||||
user = directory.ResourceOwner()
|
user = directory.ResourceOwner()
|
||||||
)
|
)
|
||||||
|
|
||||||
colProgress, closer := observe.CollectionProgress(ctx, user, category.String(), directory.Folder())
|
colProgress, closer := observe.CollectionProgress(
|
||||||
|
ctx,
|
||||||
|
category.String(),
|
||||||
|
observe.PII(user),
|
||||||
|
observe.PII(directory.Folder()))
|
||||||
defer closer()
|
defer closer()
|
||||||
defer close(colProgress)
|
defer close(colProgress)
|
||||||
|
|
||||||
|
|||||||
30
src/internal/connector/graph/api/api.go
Normal file
30
src/internal/connector/graph/api/api.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
type PageLinker interface {
|
||||||
|
GetOdataNextLink() *string
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeltaPageLinker interface {
|
||||||
|
PageLinker
|
||||||
|
GetOdataDeltaLink() *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NextLink(pl PageLinker) string {
|
||||||
|
next := pl.GetOdataNextLink()
|
||||||
|
if next == nil || len(*next) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return *next
|
||||||
|
}
|
||||||
|
|
||||||
|
func NextAndDeltaLink(pl DeltaPageLinker) (string, string) {
|
||||||
|
next := NextLink(pl)
|
||||||
|
|
||||||
|
delta := pl.GetOdataDeltaLink()
|
||||||
|
if delta == nil || len(*delta) == 0 {
|
||||||
|
return next, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return next, *delta
|
||||||
|
}
|
||||||
114
src/internal/connector/graph/api/api_test.go
Normal file
114
src/internal/connector/graph/api/api_test.go
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
package api_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockNextLink struct {
|
||||||
|
nextLink *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l mockNextLink) GetOdataNextLink() *string {
|
||||||
|
return l.nextLink
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockDeltaNextLink struct {
|
||||||
|
mockNextLink
|
||||||
|
deltaLink *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l mockDeltaNextLink) GetOdataDeltaLink() *string {
|
||||||
|
return l.deltaLink
|
||||||
|
}
|
||||||
|
|
||||||
|
type testInput struct {
|
||||||
|
name string
|
||||||
|
inputLink *string
|
||||||
|
expectedLink string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Needs to be var not const so we can take the address of it.
|
||||||
|
var (
|
||||||
|
emptyLink = ""
|
||||||
|
link = "foo"
|
||||||
|
link2 = "bar"
|
||||||
|
|
||||||
|
nextLinkInputs = []testInput{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
inputLink: &emptyLink,
|
||||||
|
expectedLink: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
inputLink: nil,
|
||||||
|
expectedLink: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non_empty",
|
||||||
|
inputLink: &link,
|
||||||
|
expectedLink: link,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type APIUnitSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPIUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, new(APIUnitSuite))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *APIUnitSuite) TestNextLink() {
|
||||||
|
for _, test := range nextLinkInputs {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
l := mockNextLink{nextLink: test.inputLink}
|
||||||
|
assert.Equal(t, test.expectedLink, api.NextLink(l))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *APIUnitSuite) TestNextAndDeltaLink() {
|
||||||
|
deltaTable := []testInput{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
inputLink: &emptyLink,
|
||||||
|
expectedLink: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
inputLink: nil,
|
||||||
|
expectedLink: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non_empty",
|
||||||
|
// Use a different link so we can see if the results get swapped or something.
|
||||||
|
inputLink: &link2,
|
||||||
|
expectedLink: link2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, next := range nextLinkInputs {
|
||||||
|
for _, delta := range deltaTable {
|
||||||
|
name := strings.Join([]string{next.name, "next", delta.name, "delta"}, "_")
|
||||||
|
|
||||||
|
suite.T().Run(name, func(t *testing.T) {
|
||||||
|
l := mockDeltaNextLink{
|
||||||
|
mockNextLink: mockNextLink{nextLink: next.inputLink},
|
||||||
|
deltaLink: delta.inputLink,
|
||||||
|
}
|
||||||
|
gotNext, gotDelta := api.NextAndDeltaLink(l)
|
||||||
|
|
||||||
|
assert.Equal(t, next.expectedLink, gotNext)
|
||||||
|
assert.Equal(t, delta.expectedLink, gotDelta)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -15,6 +15,7 @@ import (
|
|||||||
// Details on how the Code was generated is present in `kioter-lock.json`.
|
// Details on how the Code was generated is present in `kioter-lock.json`.
|
||||||
// NOTE: kiota gen file is altered to indicate what files are included in the created
|
// NOTE: kiota gen file is altered to indicate what files are included in the created
|
||||||
//
|
//
|
||||||
|
<<<<<<< HEAD
|
||||||
// Beta files use an adapter that allows for ASync() request. This feature is disabled in main. Generic Kiota adapters do not support.
|
// Beta files use an adapter that allows for ASync() request. This feature is disabled in main. Generic Kiota adapters do not support.
|
||||||
// For the client, only calls that begin as client.SitesBy(siteID).Pages() have an endpoint.
|
// For the client, only calls that begin as client.SitesBy(siteID).Pages() have an endpoint.
|
||||||
//
|
//
|
||||||
@ -24,6 +25,19 @@ import (
|
|||||||
// Supported Call source are located within the sites subdirectory
|
// Supported Call source are located within the sites subdirectory
|
||||||
// Specifics on `betaClient.SitesById(siteID).Pages` are located: sites/site_item_request_builder.go
|
// Specifics on `betaClient.SitesById(siteID).Pages` are located: sites/site_item_request_builder.go
|
||||||
//
|
//
|
||||||
|
=======
|
||||||
|
// Changes to Sites Directory:
|
||||||
|
// Access files send requests with an adapter's with ASync() support.
|
||||||
|
// This feature is not enabled in v1.0. Manually changed in remaining files.
|
||||||
|
// Additionally, only calls that begin as client.SitesBy(siteID).Pages() have an endpoint.
|
||||||
|
//
|
||||||
|
// The use case specific to Pages(). All other requests should be routed to the /internal/connector/graph.Servicer
|
||||||
|
// Specifics on `betaClient.SitesById(siteID).Pages` are located: sites/site_item_request_builder.go
|
||||||
|
//
|
||||||
|
// Required model files are identified as `modelFiles` in kiota-lock.json. Directory -> betasdk/models
|
||||||
|
// Required access files are identified as `sitesFiles` in kiota-lock.json. Directory -> betasdk/sites
|
||||||
|
//
|
||||||
|
>>>>>>> main
|
||||||
// BetaClient minimal msgraph-beta-sdk-go for connecting to msgraph-beta-sdk-go
|
// BetaClient minimal msgraph-beta-sdk-go for connecting to msgraph-beta-sdk-go
|
||||||
// for retrieving `SharePoint.Pages`. Code is generated from kiota.dev.
|
// for retrieving `SharePoint.Pages`. Code is generated from kiota.dev.
|
||||||
// requestAdapter is registered with the following the serializers:
|
// requestAdapter is registered with the following the serializers:
|
||||||
@ -82,3 +96,8 @@ func (m *BetaClient) SitesById(id string) *i1a3c1a5501c5e41b7fd169f2d4c768dce9b0
|
|||||||
}
|
}
|
||||||
return i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411.NewSiteItemRequestBuilderInternal(urlTplParams, m.requestAdapter)
|
return i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411.NewSiteItemRequestBuilderInternal(urlTplParams, m.requestAdapter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Adapter() helper method to export Adapter for iterating
|
||||||
|
func (m *BetaClient) Adapter() *msgraphsdk.GraphRequestAdapter {
|
||||||
|
return m.requestAdapter
|
||||||
|
}
|
||||||
|
|||||||
@ -1,34 +1,131 @@
|
|||||||
{
|
{
|
||||||
"lockFileVersion": "1.0.0",
|
"lockFileVersion": "1.0.0",
|
||||||
"kiotaVersion": "0.10.0.0",
|
"kiotaVersion": "0.10.0.0",
|
||||||
"clientClassName": "BetaClient",
|
"clientClassName": "BetaClient",
|
||||||
"clientNamespaceName": "github.com/alcionai/corso/src/internal/connector/graph/betasdk",
|
"clientNamespaceName": "github.com/alcionai/corso/src/internal/connector/graph/betasdk",
|
||||||
"language": "Go",
|
"language": "Go",
|
||||||
"usesBackingStore": false,
|
"betaVersion": "0.53.0",
|
||||||
"includeAdditionalData": true,
|
"usesBackingStore": false,
|
||||||
"serializers": [
|
"includeAdditionalData": true,
|
||||||
"Microsoft.Kiota.Serialization.Json.JsonSerializationWriterFactory",
|
"serializers": [
|
||||||
"Microsoft.Kiota.Serialization.Text.TextSerializationWriterFactory",
|
"Microsoft.Kiota.Serialization.Json.JsonSerializationWriterFactory",
|
||||||
"Microsoft.Kiota.Serialization.Form.FormSerializationWriterFactory"
|
"Microsoft.Kiota.Serialization.Text.TextSerializationWriterFactory",
|
||||||
],
|
"Microsoft.Kiota.Serialization.Form.FormSerializationWriterFactory"
|
||||||
"deserializers": [
|
],
|
||||||
"Microsoft.Kiota.Serialization.Json.JsonParseNodeFactory",
|
"deserializers": [
|
||||||
"Microsoft.Kiota.Serialization.Text.TextParseNodeFactory",
|
"Microsoft.Kiota.Serialization.Json.JsonParseNodeFactory",
|
||||||
"Microsoft.Kiota.Serialization.Form.FormParseNodeFactory"
|
"Microsoft.Kiota.Serialization.Text.TextParseNodeFactory",
|
||||||
],
|
"Microsoft.Kiota.Serialization.Form.FormParseNodeFactory"
|
||||||
"structuredMimeTypes": [
|
],
|
||||||
"application/json",
|
"structuredMimeTypes": [
|
||||||
"text/plain",
|
"application/json",
|
||||||
"application/x-www-form-urlencoded"
|
"text/plain",
|
||||||
],
|
"application/x-www-form-urlencoded"
|
||||||
"includePatterns": [
|
],
|
||||||
"**/sites/**"
|
"includePatterns": [
|
||||||
],
|
"**/sites/**"
|
||||||
"excludePatterns": [
|
],
|
||||||
"**/admin/**",
|
"excludePatterns": [
|
||||||
"**/users/**",
|
"**/admin/**",
|
||||||
"**/groups/**",
|
"**/users/**",
|
||||||
"**/onenote/**"
|
"**/groups/**",
|
||||||
],
|
"**/onenote/**"
|
||||||
"disabledValidationRules": []
|
],
|
||||||
|
"sitesFiles": [
|
||||||
|
"count_request_builder.go",
|
||||||
|
"item_pages_count_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_count_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_horizontal_section_item_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_item_columns_count_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_item_columns_horizontal_section_column_item_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_item_columns_item_webparts_count_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_item_columns_item_webparts_item_get_position_of_web_part_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_item_columns_item_webparts_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_item_columns_item_webparts_web_part_item_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_item_columns_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_horizontal_sections_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_vertical_section_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_vertical_section_webparts_count_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_vertical_section_webparts_item_get_position_of_web_part_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_vertical_section_webparts_request_builder.go",
|
||||||
|
"item_pages_item_canvas_layout_vertical_section_webparts_web_part_item_request_builder.go",
|
||||||
|
"item_pages_item_get_web_parts_by_position_post_request_body.go",
|
||||||
|
"item_pages_item_get_web_parts_by_position_post_request_bodyable.go",
|
||||||
|
"item_pages_item_get_web_parts_by_position_request_builder.go",
|
||||||
|
"item_pages_item_get_web_parts_by_position_response.go",
|
||||||
|
"item_pages_item_get_web_parts_by_position_responseable.go",
|
||||||
|
"item_pages_item_publish_request_builder.go",
|
||||||
|
"item_pages_item_web_parts_count_request_builder.go",
|
||||||
|
"item_pages_item_web_parts_item_get_position_of_web_part_request_builder.go",
|
||||||
|
"item_pages_item_web_parts_request_builder.go",
|
||||||
|
"item_pages_item_web_parts_web_part_item_request_builder.go",
|
||||||
|
"item_pages_request_builder.go",
|
||||||
|
"item_pages_site_page_item_request_builder.go",
|
||||||
|
"item_sites_count_request_builder.go",
|
||||||
|
"item_sites_site_item_request_builder.go",
|
||||||
|
"site_item_request_builder.go"
|
||||||
|
],
|
||||||
|
"modelFiles":[
|
||||||
|
"base_item.go",
|
||||||
|
"page_layout_type.go",
|
||||||
|
"standard_web_partable.go",
|
||||||
|
"canvas_layout.go",
|
||||||
|
"page_promotion_type.go",
|
||||||
|
"text_web_part.go",
|
||||||
|
"canvas_layoutable.go",
|
||||||
|
"publication_facet.go",
|
||||||
|
"text_web_part_collection_response.go",
|
||||||
|
"horizontal_section.go",
|
||||||
|
"publication_facetable.go",
|
||||||
|
"text_web_part_collection_responseable.go",
|
||||||
|
"horizontal_section_collection_response.go",
|
||||||
|
"reactions_facet.go",
|
||||||
|
"text_web_partable.go",
|
||||||
|
"horizontal_section_collection_responseable.go",
|
||||||
|
"reactions_facetable.go",
|
||||||
|
"title_area.go",
|
||||||
|
"horizontal_section_column.go",
|
||||||
|
"section_emphasis_type.go",
|
||||||
|
"title_area_layout_type.go",
|
||||||
|
"horizontal_section_column_collection_response.go",
|
||||||
|
"server_processed_content.go",
|
||||||
|
"title_area_text_alignment_type.go",
|
||||||
|
"horizontal_section_column_collection_responseable.go",
|
||||||
|
"server_processed_contentable.go",
|
||||||
|
"title_areaable.go",
|
||||||
|
"horizontal_section_columnable.go",
|
||||||
|
"site_access_type.go",
|
||||||
|
"vertical_section.go",
|
||||||
|
"horizontal_section_layout_type.go",
|
||||||
|
"site_page.go",
|
||||||
|
"vertical_sectionable.go",
|
||||||
|
"horizontal_sectionable.go",
|
||||||
|
"site_page_collection_response.go",
|
||||||
|
"web_part.go",
|
||||||
|
"meta_data_key_string_pair.go",
|
||||||
|
"site_page_collection_responseable.go",
|
||||||
|
"web_part_collection_response.go",
|
||||||
|
"meta_data_key_string_pair_collection_response.go",
|
||||||
|
"site_pageable.go",
|
||||||
|
"web_part_collection_responseable.go",
|
||||||
|
"meta_data_key_string_pair_collection_responseable.go",
|
||||||
|
"site_security_level.go",
|
||||||
|
"web_part_data.go",
|
||||||
|
"meta_data_key_string_pairable.go",
|
||||||
|
"site_settings.go",
|
||||||
|
"web_part_dataable.go",
|
||||||
|
"meta_data_key_value_pair.go",
|
||||||
|
"site_settingsable.go",
|
||||||
|
"web_part_position.go",
|
||||||
|
"meta_data_key_value_pair_collection_response.go",
|
||||||
|
"standard_web_part.go",
|
||||||
|
"web_part_positionable.go",
|
||||||
|
"meta_data_key_value_pair_collection_responseable.go",
|
||||||
|
"standard_web_part_collection_response.go",
|
||||||
|
"web_partable.go",
|
||||||
|
"meta_data_key_value_pairable.go",
|
||||||
|
"standard_web_part_collection_responseable.go"
|
||||||
|
],
|
||||||
|
"disabledValidationRules": []
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,27 +26,32 @@ const (
|
|||||||
errCodeMailboxNotEnabledForRESTAPI = "MailboxNotEnabledForRESTAPI"
|
errCodeMailboxNotEnabledForRESTAPI = "MailboxNotEnabledForRESTAPI"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Err401Unauthorized = errors.New("401 unauthorized")
|
||||||
|
// normally the graph client will catch this for us, but in case we
|
||||||
|
// run our own client Do(), we need to translate it to a timeout type
|
||||||
|
// failure locally.
|
||||||
|
Err429TooManyRequests = errors.New("429 too many requests")
|
||||||
|
Err503ServiceUnavailable = errors.New("503 Service Unavailable")
|
||||||
|
)
|
||||||
|
|
||||||
// The folder or item was deleted between the time we identified
|
// The folder or item was deleted between the time we identified
|
||||||
// it and when we tried to fetch data for it.
|
// it and when we tried to fetch data for it.
|
||||||
type ErrDeletedInFlight struct {
|
type ErrDeletedInFlight struct {
|
||||||
common.Err
|
common.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsErrDeletedInFlight(err error) error {
|
func IsErrDeletedInFlight(err error) bool {
|
||||||
if asDeletedInFlight(err) {
|
e := ErrDeletedInFlight{}
|
||||||
return err
|
if errors.As(err, &e) {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasErrorCode(err, errCodeItemNotFound, errCodeSyncFolderNotFound) {
|
if hasErrorCode(err, errCodeItemNotFound, errCodeSyncFolderNotFound) {
|
||||||
return ErrDeletedInFlight{*common.EncapsulateError(err)}
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return false
|
||||||
}
|
|
||||||
|
|
||||||
func asDeletedInFlight(err error) bool {
|
|
||||||
e := ErrDeletedInFlight{}
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delta tokens can be desycned or expired. In either case, the token
|
// Delta tokens can be desycned or expired. In either case, the token
|
||||||
@ -56,21 +61,17 @@ type ErrInvalidDelta struct {
|
|||||||
common.Err
|
common.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsErrInvalidDelta(err error) error {
|
func IsErrInvalidDelta(err error) bool {
|
||||||
if asInvalidDelta(err) {
|
e := ErrInvalidDelta{}
|
||||||
return err
|
if errors.As(err, &e) {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasErrorCode(err, errCodeSyncStateNotFound, errCodeResyncRequired) {
|
if hasErrorCode(err, errCodeSyncStateNotFound, errCodeResyncRequired) {
|
||||||
return ErrInvalidDelta{*common.EncapsulateError(err)}
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return false
|
||||||
}
|
|
||||||
|
|
||||||
func asInvalidDelta(err error) bool {
|
|
||||||
e := ErrInvalidDelta{}
|
|
||||||
return errors.As(err, &e)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsErrExchangeMailFolderNotFound(err error) bool {
|
func IsErrExchangeMailFolderNotFound(err error) bool {
|
||||||
@ -85,23 +86,72 @@ type ErrTimeout struct {
|
|||||||
common.Err
|
common.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsErrTimeout(err error) error {
|
func IsErrTimeout(err error) bool {
|
||||||
if asTimeout(err) {
|
e := ErrTimeout{}
|
||||||
return err
|
if errors.As(err, &e) {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if isTimeoutErr(err) {
|
if errors.Is(err, context.DeadlineExceeded) || os.IsTimeout(err) {
|
||||||
return ErrTimeout{*common.EncapsulateError(err)}
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
switch err := err.(type) {
|
||||||
|
case *url.Error:
|
||||||
|
return err.Timeout()
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func asTimeout(err error) bool {
|
type ErrThrottled struct {
|
||||||
e := ErrTimeout{}
|
common.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsErrThrottled(err error) bool {
|
||||||
|
if errors.Is(err, Err429TooManyRequests) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
e := ErrThrottled{}
|
||||||
|
|
||||||
return errors.As(err, &e)
|
return errors.As(err, &e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ErrUnauthorized struct {
|
||||||
|
common.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsErrUnauthorized(err error) bool {
|
||||||
|
// TODO: refine this investigation. We don't currently know if
|
||||||
|
// a specific item download url expired, or if the full connection
|
||||||
|
// auth expired.
|
||||||
|
if errors.Is(err, Err401Unauthorized) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
e := ErrUnauthorized{}
|
||||||
|
|
||||||
|
return errors.As(err, &e)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrServiceUnavailable struct {
|
||||||
|
common.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsSericeUnavailable(err error) bool {
|
||||||
|
if errors.Is(err, Err503ServiceUnavailable) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
e := ErrUnauthorized{}
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// error parsers
|
// error parsers
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -122,20 +172,3 @@ func hasErrorCode(err error, codes ...string) bool {
|
|||||||
|
|
||||||
return slices.Contains(codes, *oDataError.GetError().GetCode())
|
return slices.Contains(codes, *oDataError.GetError().GetCode())
|
||||||
}
|
}
|
||||||
|
|
||||||
// isTimeoutErr is used to determine if the Graph error returned is
|
|
||||||
// because of Timeout. This is used to restrict retries to just
|
|
||||||
// timeouts as other errors are handled within a middleware in the
|
|
||||||
// client.
|
|
||||||
func isTimeoutErr(err error) bool {
|
|
||||||
if errors.Is(err, context.DeadlineExceeded) || os.IsTimeout(err) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err := err.(type) {
|
|
||||||
case *url.Error:
|
|
||||||
return err.Timeout()
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -2,15 +2,28 @@ package graph
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
|
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
|
ka "github.com/microsoft/kiota-authentication-azure-go"
|
||||||
|
khttp "github.com/microsoft/kiota-http-go"
|
||||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||||
|
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS"
|
||||||
|
)
|
||||||
|
|
||||||
// AllMetadataFileNames produces the standard set of filenames used to store graph
|
// AllMetadataFileNames produces the standard set of filenames used to store graph
|
||||||
// metadata such as delta tokens and folderID->path references.
|
// metadata such as delta tokens and folderID->path references.
|
||||||
func AllMetadataFileNames() []string {
|
func AllMetadataFileNames() []string {
|
||||||
@ -23,6 +36,10 @@ type QueryParams struct {
|
|||||||
Credentials account.M365Config
|
Credentials account.M365Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Service Handler
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
var _ Servicer = &Service{}
|
var _ Servicer = &Service{}
|
||||||
|
|
||||||
type Service struct {
|
type Service struct {
|
||||||
@ -47,7 +64,7 @@ func (s Service) Client() *msgraphsdk.GraphServiceClient {
|
|||||||
|
|
||||||
// Seraialize writes an M365 parsable object into a byte array using the built-in
|
// Seraialize writes an M365 parsable object into a byte array using the built-in
|
||||||
// application/json writer within the adapter.
|
// application/json writer within the adapter.
|
||||||
func (s Service) Serialize(object absser.Parsable) ([]byte, error) {
|
func (s Service) Serialize(object serialization.Parsable) ([]byte, error) {
|
||||||
writer, err := s.adapter.GetSerializationWriterFactory().GetSerializationWriter("application/json")
|
writer, err := s.adapter.GetSerializationWriterFactory().GetSerializationWriter("application/json")
|
||||||
if err != nil || writer == nil {
|
if err != nil || writer == nil {
|
||||||
return nil, errors.Wrap(err, "creating json serialization writer")
|
return nil, errors.Wrap(err, "creating json serialization writer")
|
||||||
@ -61,6 +78,90 @@ func (s Service) Serialize(object absser.Parsable) ([]byte, error) {
|
|||||||
return writer.GetSerializedContent()
|
return writer.GetSerializedContent()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Helpers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type clientConfig struct {
|
||||||
|
noTimeout bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type option func(*clientConfig)
|
||||||
|
|
||||||
|
// populate constructs a clientConfig according to the provided options.
|
||||||
|
func (c *clientConfig) populate(opts ...option) *clientConfig {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply updates the http.Client with the expected options.
|
||||||
|
func (c *clientConfig) apply(hc *http.Client) {
|
||||||
|
if c.noTimeout {
|
||||||
|
hc.Timeout = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoTimeout sets the httpClient.Timeout to 0 (unlimited).
|
||||||
|
// The resulting client isn't suitable for most queries, due to the
|
||||||
|
// capacity for a call to persist forever. This configuration should
|
||||||
|
// only be used when downloading very large files.
|
||||||
|
func NoTimeout() option {
|
||||||
|
return func(c *clientConfig) {
|
||||||
|
c.noTimeout = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateAdapter uses provided credentials to log into M365 using Kiota Azure Library
|
||||||
|
// with Azure identity package. An adapter object is a necessary to component
|
||||||
|
// to create *msgraphsdk.GraphServiceClient
|
||||||
|
func CreateAdapter(tenant, client, secret string, opts ...option) (*msgraphsdk.GraphRequestAdapter, error) {
|
||||||
|
// Client Provider: Uses Secret for access to tenant-level data
|
||||||
|
cred, err := azidentity.NewClientSecretCredential(tenant, client, secret, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "creating m365 client secret credentials")
|
||||||
|
}
|
||||||
|
|
||||||
|
auth, err := ka.NewAzureIdentityAuthenticationProviderWithScopes(
|
||||||
|
cred,
|
||||||
|
[]string{"https://graph.microsoft.com/.default"},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "creating new AzureIdentityAuthentication")
|
||||||
|
}
|
||||||
|
|
||||||
|
httpClient := HTTPClient(opts...)
|
||||||
|
|
||||||
|
return msgraphsdk.NewGraphRequestAdapterWithParseNodeFactoryAndSerializationWriterFactoryAndHttpClient(
|
||||||
|
auth, nil, nil, httpClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPClient creates the httpClient with middlewares and timeout configured
|
||||||
|
//
|
||||||
|
// Re-use of http clients is critical, or else we leak OS resources
|
||||||
|
// and consume relatively unbound socket connections. It is important
|
||||||
|
// to centralize this client to be passed downstream where api calls
|
||||||
|
// can utilize it on a per-download basis.
|
||||||
|
func HTTPClient(opts ...option) *http.Client {
|
||||||
|
clientOptions := msgraphsdk.GetDefaultClientOptions()
|
||||||
|
middlewares := msgraphgocore.GetDefaultMiddlewaresWithOptions(&clientOptions)
|
||||||
|
middlewares = append(middlewares, &LoggingMiddleware{})
|
||||||
|
httpClient := msgraphgocore.GetDefaultClient(&clientOptions, middlewares...)
|
||||||
|
httpClient.Timeout = time.Second * 90
|
||||||
|
|
||||||
|
(&clientConfig{}).
|
||||||
|
populate(opts...).
|
||||||
|
apply(httpClient)
|
||||||
|
|
||||||
|
return httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Interfaces
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
type Servicer interface {
|
type Servicer interface {
|
||||||
// Client() returns msgraph Service client that can be used to process and execute
|
// Client() returns msgraph Service client that can be used to process and execute
|
||||||
// the majority of the queries to the M365 Backstore
|
// the majority of the queries to the M365 Backstore
|
||||||
@ -120,3 +221,78 @@ type ContainerResolver interface {
|
|||||||
// Items returns the containers in the cache.
|
// Items returns the containers in the cache.
|
||||||
Items() []CachedContainer
|
Items() []CachedContainer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Client Middleware
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// LoggingMiddleware can be used to log the http request sent by the graph client
|
||||||
|
type LoggingMiddleware struct{}
|
||||||
|
|
||||||
|
func (handler *LoggingMiddleware) Intercept(
|
||||||
|
pipeline khttp.Pipeline,
|
||||||
|
middlewareIndex int,
|
||||||
|
req *http.Request,
|
||||||
|
) (*http.Response, error) {
|
||||||
|
var (
|
||||||
|
ctx = req.Context()
|
||||||
|
resp, err = pipeline.Next(req, middlewareIndex)
|
||||||
|
)
|
||||||
|
|
||||||
|
if resp == nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return immediately if the response is good (2xx).
|
||||||
|
// If api logging is toggled, log a body-less dump of the request/resp.
|
||||||
|
if (resp.StatusCode / 100) == 2 {
|
||||||
|
if logger.DebugAPI || os.Getenv(logGraphRequestsEnvKey) != "" {
|
||||||
|
respDump, _ := httputil.DumpResponse(resp, false)
|
||||||
|
|
||||||
|
metadata := []any{
|
||||||
|
"idx", middlewareIndex,
|
||||||
|
"method", req.Method,
|
||||||
|
"status", resp.Status,
|
||||||
|
"statusCode", resp.StatusCode,
|
||||||
|
"requestLen", req.ContentLength,
|
||||||
|
"url", req.URL,
|
||||||
|
"response", respDump,
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Debugw("2xx graph api resp", metadata...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log errors according to api debugging configurations.
|
||||||
|
// When debugging is toggled, every non-2xx is recorded with a respose dump.
|
||||||
|
// Otherwise, throttling cases and other non-2xx responses are logged
|
||||||
|
// with a slimmer reference for telemetry/supportability purposes.
|
||||||
|
if logger.DebugAPI || os.Getenv(logGraphRequestsEnvKey) != "" {
|
||||||
|
respDump, _ := httputil.DumpResponse(resp, true)
|
||||||
|
|
||||||
|
metadata := []any{
|
||||||
|
"idx", middlewareIndex,
|
||||||
|
"method", req.Method,
|
||||||
|
"status", resp.Status,
|
||||||
|
"statusCode", resp.StatusCode,
|
||||||
|
"requestLen", req.ContentLength,
|
||||||
|
"url", req.URL,
|
||||||
|
"response", string(respDump),
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Errorw("non-2xx graph api response", metadata...)
|
||||||
|
} else {
|
||||||
|
// special case for supportability: log all throttling cases.
|
||||||
|
if resp.StatusCode == http.StatusTooManyRequests {
|
||||||
|
logger.Ctx(ctx).Infow("graph api throttling", "method", req.Method, "url", req.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusTooManyRequests && (resp.StatusCode/100) != 2 {
|
||||||
|
logger.Ctx(ctx).Infow("graph api error", "status", resp.Status, "method", req.Method, "url", req.URL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|||||||
@ -1,125 +0,0 @@
|
|||||||
package graph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
az "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
|
||||||
ka "github.com/microsoft/kiota-authentication-azure-go"
|
|
||||||
khttp "github.com/microsoft/kiota-http-go"
|
|
||||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
|
||||||
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CreateAdapter uses provided credentials to log into M365 using Kiota Azure Library
|
|
||||||
// with Azure identity package. An adapter object is a necessary to component
|
|
||||||
// to create *msgraphsdk.GraphServiceClient
|
|
||||||
func CreateAdapter(tenant, client, secret string) (*msgraphsdk.GraphRequestAdapter, error) {
|
|
||||||
// Client Provider: Uses Secret for access to tenant-level data
|
|
||||||
cred, err := az.NewClientSecretCredential(tenant, client, secret, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "creating m365 client secret credentials")
|
|
||||||
}
|
|
||||||
|
|
||||||
auth, err := ka.NewAzureIdentityAuthenticationProviderWithScopes(
|
|
||||||
cred,
|
|
||||||
[]string{"https://graph.microsoft.com/.default"},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "creating new AzureIdentityAuthentication")
|
|
||||||
}
|
|
||||||
|
|
||||||
httpClient := CreateHTTPClient()
|
|
||||||
|
|
||||||
return msgraphsdk.NewGraphRequestAdapterWithParseNodeFactoryAndSerializationWriterFactoryAndHttpClient(
|
|
||||||
auth, nil, nil, httpClient)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateHTTPClient creates the httpClient with middlewares and timeout configured
|
|
||||||
func CreateHTTPClient() *http.Client {
|
|
||||||
clientOptions := msgraphsdk.GetDefaultClientOptions()
|
|
||||||
middlewares := msgraphgocore.GetDefaultMiddlewaresWithOptions(&clientOptions)
|
|
||||||
middlewares = append(middlewares, &LoggingMiddleware{})
|
|
||||||
httpClient := msgraphgocore.GetDefaultClient(&clientOptions, middlewares...)
|
|
||||||
httpClient.Timeout = time.Second * 90
|
|
||||||
|
|
||||||
return httpClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// LargeItemClient generates a client that's configured to handle
|
|
||||||
// large file downloads. This client isn't suitable for other queries
|
|
||||||
// due to loose restrictions on timeouts and such.
|
|
||||||
//
|
|
||||||
// Re-use of http clients is critical, or else we leak os resources
|
|
||||||
// and consume relatively unbound socket connections. It is important
|
|
||||||
// to centralize this client to be passed downstream where api calls
|
|
||||||
// can utilize it on a per-download basis.
|
|
||||||
//
|
|
||||||
// TODO: this should get owned by an API client layer, not the GC itself.
|
|
||||||
func LargeItemClient() *http.Client {
|
|
||||||
httpClient := CreateHTTPClient()
|
|
||||||
httpClient.Timeout = 0 // infinite timeout for pulling large files
|
|
||||||
|
|
||||||
return httpClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Logging Middleware
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// LoggingMiddleware can be used to log the http request sent by the graph client
|
|
||||||
type LoggingMiddleware struct{}
|
|
||||||
|
|
||||||
func (handler *LoggingMiddleware) Intercept(
|
|
||||||
pipeline khttp.Pipeline,
|
|
||||||
middlewareIndex int,
|
|
||||||
req *http.Request,
|
|
||||||
) (*http.Response, error) {
|
|
||||||
var (
|
|
||||||
ctx = req.Context()
|
|
||||||
resp, err = pipeline.Next(req, middlewareIndex)
|
|
||||||
)
|
|
||||||
|
|
||||||
if resp == nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if (resp.StatusCode / 100) == 2 {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// special case for supportability: log all throttling cases.
|
|
||||||
if resp.StatusCode == http.StatusTooManyRequests {
|
|
||||||
logger.Ctx(ctx).Infow("graph api throttling", "method", req.Method, "url", req.URL)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusTooManyRequests && (resp.StatusCode/100) != 2 {
|
|
||||||
logger.Ctx(ctx).Infow("graph api error", "method", req.Method, "url", req.URL)
|
|
||||||
}
|
|
||||||
|
|
||||||
if logger.DebugAPI || os.Getenv(logGraphRequestsEnvKey) != "" {
|
|
||||||
respDump, _ := httputil.DumpResponse(resp, true)
|
|
||||||
|
|
||||||
metadata := []any{
|
|
||||||
"method", req.Method,
|
|
||||||
"url", req.URL,
|
|
||||||
"requestLen", req.ContentLength,
|
|
||||||
"status", resp.Status,
|
|
||||||
"statusCode", resp.StatusCode,
|
|
||||||
"request", string(respDump),
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ctx).Errorw("non-2xx graph api response", metadata...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
@ -1,14 +1,15 @@
|
|||||||
package graph_test
|
package graph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
)
|
)
|
||||||
@ -33,26 +34,54 @@ func (suite *GraphUnitSuite) SetupSuite() {
|
|||||||
|
|
||||||
func (suite *GraphUnitSuite) TestCreateAdapter() {
|
func (suite *GraphUnitSuite) TestCreateAdapter() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
adpt, err := graph.CreateAdapter(
|
adpt, err := CreateAdapter(
|
||||||
suite.credentials.AzureTenantID,
|
suite.credentials.AzureTenantID,
|
||||||
suite.credentials.AzureClientID,
|
suite.credentials.AzureClientID,
|
||||||
suite.credentials.AzureClientSecret,
|
suite.credentials.AzureClientSecret)
|
||||||
)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, adpt)
|
assert.NotNil(t, adpt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *GraphUnitSuite) TestHTTPClient() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
opts []option
|
||||||
|
check func(*testing.T, *http.Client)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no options",
|
||||||
|
opts: []option{},
|
||||||
|
check: func(t *testing.T, c *http.Client) {
|
||||||
|
assert.Equal(t, 90*time.Second, c.Timeout, "default timeout")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no timeout",
|
||||||
|
opts: []option{NoTimeout()},
|
||||||
|
check: func(t *testing.T, c *http.Client) {
|
||||||
|
assert.Equal(t, 0, int(c.Timeout), "unlimited timeout")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
cli := HTTPClient(test.opts...)
|
||||||
|
assert.NotNil(t, cli)
|
||||||
|
test.check(t, cli)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *GraphUnitSuite) TestSerializationEndPoint() {
|
func (suite *GraphUnitSuite) TestSerializationEndPoint() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
adpt, err := graph.CreateAdapter(
|
adpt, err := CreateAdapter(
|
||||||
suite.credentials.AzureTenantID,
|
suite.credentials.AzureTenantID,
|
||||||
suite.credentials.AzureClientID,
|
suite.credentials.AzureClientID,
|
||||||
suite.credentials.AzureClientSecret,
|
suite.credentials.AzureClientSecret)
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
serv := graph.NewService(adpt)
|
serv := NewService(adpt)
|
||||||
email := models.NewMessage()
|
email := models.NewMessage()
|
||||||
subject := "TestSerializationEndPoint"
|
subject := "TestSerializationEndPoint"
|
||||||
email.SetSubject(&subject)
|
email.SetSubject(&subject)
|
||||||
|
|||||||
@ -66,7 +66,7 @@ func (suite *DisconnectedGraphConnectorSuite) TestBadConnection() {
|
|||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
gc, err := NewGraphConnector(ctx, graph.LargeItemClient(), test.acct(t), Users)
|
gc, err := NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), test.acct(t), Users)
|
||||||
assert.Nil(t, gc, test.name+" failed")
|
assert.Nil(t, gc, test.name+" failed")
|
||||||
assert.NotNil(t, err, test.name+"failed")
|
assert.NotNil(t, err, test.name+"failed")
|
||||||
})
|
})
|
||||||
|
|||||||
@ -156,7 +156,7 @@ func (suite *GraphConnectorIntegrationSuite) SetupSuite() {
|
|||||||
|
|
||||||
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
||||||
|
|
||||||
suite.connector = loadConnector(ctx, suite.T(), graph.LargeItemClient(), Users)
|
suite.connector = loadConnector(ctx, suite.T(), graph.HTTPClient(graph.NoTimeout()), Users)
|
||||||
suite.user = tester.M365UserID(suite.T())
|
suite.user = tester.M365UserID(suite.T())
|
||||||
suite.acct = tester.NewM365Account(suite.T())
|
suite.acct = tester.NewM365Account(suite.T())
|
||||||
|
|
||||||
@ -375,12 +375,11 @@ func runRestoreBackupTest(
|
|||||||
t.Logf(
|
t.Logf(
|
||||||
"Restoring collections to %s for resourceOwners(s) %v\n",
|
"Restoring collections to %s for resourceOwners(s) %v\n",
|
||||||
dest.ContainerName,
|
dest.ContainerName,
|
||||||
resourceOwners,
|
resourceOwners)
|
||||||
)
|
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
restoreGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
restoreGC := loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), test.resource)
|
||||||
restoreSel := getSelectorWith(t, test.service, resourceOwners, true)
|
restoreSel := getSelectorWith(t, test.service, resourceOwners, true)
|
||||||
deets, err := restoreGC.RestoreDataCollections(
|
deets, err := restoreGC.RestoreDataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
@ -394,8 +393,10 @@ func runRestoreBackupTest(
|
|||||||
status := restoreGC.AwaitStatus()
|
status := restoreGC.AwaitStatus()
|
||||||
runTime := time.Since(start)
|
runTime := time.Since(start)
|
||||||
|
|
||||||
assert.Equal(t, totalItems, status.ObjectCount, "status.ObjectCount")
|
assert.NoError(t, status.Err, "restored status.Err")
|
||||||
assert.Equal(t, totalItems, status.Successful, "status.Successful")
|
assert.Zero(t, status.ErrorCount, "restored status.ErrorCount")
|
||||||
|
assert.Equal(t, totalItems, status.ObjectCount, "restored status.ObjectCount")
|
||||||
|
assert.Equal(t, totalItems, status.Successful, "restored status.Successful")
|
||||||
assert.Len(
|
assert.Len(
|
||||||
t,
|
t,
|
||||||
deets.Entries,
|
deets.Entries,
|
||||||
@ -419,13 +420,15 @@ func runRestoreBackupTest(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
backupGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
backupGC := loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), test.resource)
|
||||||
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
||||||
t.Logf("Selective backup of %s\n", backupSel)
|
t.Logf("Selective backup of %s\n", backupSel)
|
||||||
|
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
dcs, err := backupGC.DataCollections(ctx, backupSel, nil, control.Options{})
|
dcs, excludes, err := backupGC.DataCollections(ctx, backupSel, nil, control.Options{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
// No excludes yet because this isn't an incremental backup.
|
||||||
|
assert.Empty(t, excludes)
|
||||||
|
|
||||||
t.Logf("Backup enumeration complete in %v\n", time.Since(start))
|
t.Logf("Backup enumeration complete in %v\n", time.Since(start))
|
||||||
|
|
||||||
@ -434,8 +437,13 @@ func runRestoreBackupTest(
|
|||||||
skipped := checkCollections(t, totalItems, expectedData, dcs)
|
skipped := checkCollections(t, totalItems, expectedData, dcs)
|
||||||
|
|
||||||
status = backupGC.AwaitStatus()
|
status = backupGC.AwaitStatus()
|
||||||
assert.Equal(t, totalItems+skipped, status.ObjectCount, "status.ObjectCount")
|
|
||||||
assert.Equal(t, totalItems+skipped, status.Successful, "status.Successful")
|
assert.NoError(t, status.Err, "backup status.Err")
|
||||||
|
assert.Zero(t, status.ErrorCount, "backup status.ErrorCount")
|
||||||
|
assert.Equalf(t, totalItems+skipped, status.ObjectCount,
|
||||||
|
"backup status.ObjectCount; wanted %d items + %d skipped", totalItems, skipped)
|
||||||
|
assert.Equalf(t, totalItems+skipped, status.Successful,
|
||||||
|
"backup status.Successful; wanted %d items + %d skipped", totalItems, skipped)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||||
@ -870,7 +878,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
dest.ContainerName,
|
dest.ContainerName,
|
||||||
)
|
)
|
||||||
|
|
||||||
restoreGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
restoreGC := loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), test.resource)
|
||||||
deets, err := restoreGC.RestoreDataCollections(ctx, suite.acct, restoreSel, dest, collections)
|
deets, err := restoreGC.RestoreDataCollections(ctx, suite.acct, restoreSel, dest, collections)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, deets)
|
require.NotNil(t, deets)
|
||||||
@ -888,12 +896,14 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
|
|
||||||
// Run a backup and compare its output with what we put in.
|
// Run a backup and compare its output with what we put in.
|
||||||
|
|
||||||
backupGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
backupGC := loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), test.resource)
|
||||||
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
||||||
t.Log("Selective backup of", backupSel)
|
t.Log("Selective backup of", backupSel)
|
||||||
|
|
||||||
dcs, err := backupGC.DataCollections(ctx, backupSel, nil, control.Options{})
|
dcs, excludes, err := backupGC.DataCollections(ctx, backupSel, nil, control.Options{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
// No excludes yet because this isn't an incremental backup.
|
||||||
|
assert.Empty(t, excludes)
|
||||||
|
|
||||||
t.Log("Backup enumeration complete")
|
t.Log("Backup enumeration complete")
|
||||||
|
|
||||||
@ -907,3 +917,30 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: this should only be run during smoke tests, not part of the standard CI.
|
||||||
|
// That's why it's set aside instead of being included in the other test set.
|
||||||
|
func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup_largeMailAttachment() {
|
||||||
|
subjectText := "Test message for restore with large attachment"
|
||||||
|
|
||||||
|
test := restoreBackupInfo{
|
||||||
|
name: "EmailsWithLargeAttachments",
|
||||||
|
service: path.ExchangeService,
|
||||||
|
resource: Users,
|
||||||
|
collections: []colInfo{
|
||||||
|
{
|
||||||
|
pathElements: []string{"Inbox"},
|
||||||
|
category: path.EmailCategory,
|
||||||
|
items: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "35mbAttachment",
|
||||||
|
data: mockconnector.GetMockMessageWithSizedAttachment(subjectText, 35),
|
||||||
|
lookupKey: subjectText,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
runRestoreBackupTest(suite.T(), suite.acct, test, suite.connector.tenant, []string{suite.user})
|
||||||
|
}
|
||||||
|
|||||||
@ -155,6 +155,41 @@ func GetMockMessageWith(
|
|||||||
return []byte(message)
|
return []byte(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetMockMessageWithDirectAttachment returns a message an attachment that contains n MB of data.
|
||||||
|
// Max limit on N is 35 (imposed by exchange) .
|
||||||
|
// Serialized with: kiota-serialization-json-go v0.7.1
|
||||||
|
func GetMockMessageWithSizedAttachment(subject string, n int) []byte {
|
||||||
|
// I know we said 35, but after base64encoding, 24mb of base content
|
||||||
|
// bloats up to 34mb (35 baloons to 49). So we have to restrict n
|
||||||
|
// appropriately.
|
||||||
|
if n > 24 {
|
||||||
|
n = 24
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:lll
|
||||||
|
messageFmt := "{\"id\":\"AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwBGAAAAAADCNgjhM9QmQYWNcI7hCpPrBwDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAADSEBNbUIB9RL6ePDeF3FIYAAB4moqeAAA=\"," +
|
||||||
|
"\"@odata.type\":\"#microsoft.graph.message\",\"@odata.etag\":\"W/\\\"CQAAABYAAADSEBNbUIB9RL6ePDeF3FIYAAB3maFQ\\\"\",\"@odata.context\":\"https://graph.microsoft.com/v1.0/$metadata#users('a4a472f8-ccb0-43ec-bf52-3697a91b926c')/messages/$entity\",\"categories\":[]," +
|
||||||
|
"\"changeKey\":\"CQAAABYAAADSEBNbUIB9RL6ePDeF3FIYAAB3maFQ\",\"createdDateTime\":\"2022-09-29T17:39:06Z\",\"lastModifiedDateTime\":\"2022-09-29T17:39:08Z\"," +
|
||||||
|
"\"attachments\":[{\"id\":\"AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwBGAAAAAADCNgjhM9QmQYWNcI7hCpPrBwDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAADSEBNbUIB9RL6ePDeF3FIYAAB4moqeAAABEgAQANMmZLFhjWJJj4X9mj8piqg=\",\"@odata.type\":\"#microsoft.graph.fileAttachment\",\"@odata.mediaContentType\":\"application/octet-stream\"," +
|
||||||
|
"\"contentType\":\"application/octet-stream\",\"isInline\":false,\"lastModifiedDateTime\":\"2022-09-29T17:39:06Z\",\"name\":\"database.db\",\"size\":%d," +
|
||||||
|
"\"contentBytes\":\"%s\"}]," +
|
||||||
|
"\"bccRecipients\":[],\"body\":{\"content\":\"<html><head>\\r\\n<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"><style type=\\\"text/css\\\" style=\\\"display:none\\\">\\r\\n<!--\\r\\np\\r\\n\\t{margin-top:0;\\r\\n\\tmargin-bottom:0}\\r\\n-->\\r\\n</style></head><body dir=\\\"ltr\\\"><div class=\\\"elementToProof\\\" style=\\\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0)\\\"><span class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Lidia,</span> <div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><div class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">I hope this message finds you well. I am researching a database construct for next quarter's review. SkyNet will<span class=\\\"ContentPasted0\\\"> </span><span data-ogsb=\\\"rgb(255, 255, 0)\\\" class=\\\"ContentPasted0\\\" style=\\\"margin:0px; background-color:rgb(255,255,0)!important\\\">not</span><span class=\\\"ContentPasted0\\\"> </span>be able to match our database process speeds if we utilize the formulae that are included. </div><div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><div class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Please give me your thoughts on the implementation.</div><div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><div class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Best,</div><div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><span class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Dustin</span><br></div></body></html>\",\"contentType\":\"html\",\"@odata.type\":\"#microsoft.graph.itemBody\"}," +
|
||||||
|
"\"bodyPreview\":\"Lidia,\\r\\n\\r\\nI hope this message finds you well. I am researching a database construct for next quarter's review. SkyNet will not be able to match our database process speeds if we utilize the formulae that are included.\\r\\n\\r\\nPlease give me your thoughts on th\",\"ccRecipients\":[]," +
|
||||||
|
"\"conversationId\":\"AAQkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwAQANPFOcy_BapBghezTzIIldI=\",\"conversationIndex\":\"AQHY1Cpb08U5zL4FqkGCF7NPMgiV0g==\",\"flag\":{\"flagStatus\":\"notFlagged\",\"@odata.type\":\"#microsoft.graph.followupFlag\"}," +
|
||||||
|
"\"from\":{\"emailAddress\":{\"address\":\"dustina@8qzvrj.onmicrosoft.com\",\"name\":\"Dustin Abbot\",\"@odata.type\":\"#microsoft.graph.emailAddress\"},\"@odata.type\":\"#microsoft.graph.recipient\"},\"hasAttachments\":true,\"importance\":\"normal\",\"inferenceClassification\":\"focused\"," +
|
||||||
|
"\"internetMessageId\":\"<SJ0PR17MB56220C509D0006B8CC8FD952C3579@SJ0PR17MB5622.namprd17.prod.outlook.com>\",\"isDeliveryReceiptRequested\":false,\"isDraft\":false,\"isRead\":false,\"isReadReceiptRequested\":false," +
|
||||||
|
"\"parentFolderId\":\"AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwAuAAAAAADCNgjhM9QmQYWNcI7hCpPrAQDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAAA=\",\"receivedDateTime\":\"2022-09-29T17:39:07Z\",\"replyTo\":[],\"sender\":{\"emailAddress\":{\"address\":\"dustina@8qzvrj.onmicrosoft.com\",\"name\":\"Dustin Abbot\"," +
|
||||||
|
"\"@odata.type\":\"#microsoft.graph.emailAddress\"},\"@odata.type\":\"#microsoft.graph.recipient\"},\"sentDateTime\":\"2022-09-29T17:39:02Z\"," +
|
||||||
|
"\"subject\":\"" + subject + "\",\"toRecipients\":[{\"emailAddress\":{\"address\":\"LidiaH@8qzvrj.onmicrosoft.com\",\"name\":\"Lidia Holloway\",\"@odata.type\":\"#microsoft.graph.emailAddress\"},\"@odata.type\":\"#microsoft.graph.recipient\"}]," +
|
||||||
|
"\"webLink\":\"https://outlook.office365.com/owa/?ItemID=AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwBGAAAAAADCNgjhM9QmQYWNcI7hCpPrBwDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAADSEBNbUIB9RL6ePDeF3FIYAAB4moqeAAA%3D&exvsurl=1&viewmodel=ReadMessageItem\"}"
|
||||||
|
|
||||||
|
attachmentSize := n * 1024 * 1024 // n MB
|
||||||
|
attachmentBytes := make([]byte, attachmentSize)
|
||||||
|
|
||||||
|
// Attachment content bytes are base64 encoded
|
||||||
|
return []byte(fmt.Sprintf(messageFmt, attachmentSize, base64.StdEncoding.EncodeToString([]byte(attachmentBytes))))
|
||||||
|
}
|
||||||
|
|
||||||
// GetMockMessageWithDirectAttachment returns a message with inline attachment
|
// GetMockMessageWithDirectAttachment returns a message with inline attachment
|
||||||
// Serialized with: kiota-serialization-json-go v0.7.1
|
// Serialized with: kiota-serialization-json-go v0.7.1
|
||||||
func GetMockMessageWithDirectAttachment(subject string) []byte {
|
func GetMockMessageWithDirectAttachment(subject string) []byte {
|
||||||
@ -228,28 +263,7 @@ func GetMockMessageWithDirectAttachment(subject string) []byte {
|
|||||||
// used in GetMockMessageWithDirectAttachment
|
// used in GetMockMessageWithDirectAttachment
|
||||||
// Serialized with: kiota-serialization-json-go v0.7.1
|
// Serialized with: kiota-serialization-json-go v0.7.1
|
||||||
func GetMockMessageWithLargeAttachment(subject string) []byte {
|
func GetMockMessageWithLargeAttachment(subject string) []byte {
|
||||||
//nolint:lll
|
return GetMockMessageWithSizedAttachment(subject, 3)
|
||||||
messageFmt := "{\"id\":\"AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwBGAAAAAADCNgjhM9QmQYWNcI7hCpPrBwDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAADSEBNbUIB9RL6ePDeF3FIYAAB4moqeAAA=\"," +
|
|
||||||
"\"@odata.type\":\"#microsoft.graph.message\",\"@odata.etag\":\"W/\\\"CQAAABYAAADSEBNbUIB9RL6ePDeF3FIYAAB3maFQ\\\"\",\"@odata.context\":\"https://graph.microsoft.com/v1.0/$metadata#users('a4a472f8-ccb0-43ec-bf52-3697a91b926c')/messages/$entity\",\"categories\":[]," +
|
|
||||||
"\"changeKey\":\"CQAAABYAAADSEBNbUIB9RL6ePDeF3FIYAAB3maFQ\",\"createdDateTime\":\"2022-09-29T17:39:06Z\",\"lastModifiedDateTime\":\"2022-09-29T17:39:08Z\"," +
|
|
||||||
"\"attachments\":[{\"id\":\"AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwBGAAAAAADCNgjhM9QmQYWNcI7hCpPrBwDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAADSEBNbUIB9RL6ePDeF3FIYAAB4moqeAAABEgAQANMmZLFhjWJJj4X9mj8piqg=\",\"@odata.type\":\"#microsoft.graph.fileAttachment\",\"@odata.mediaContentType\":\"application/octet-stream\"," +
|
|
||||||
"\"contentType\":\"application/octet-stream\",\"isInline\":false,\"lastModifiedDateTime\":\"2022-09-29T17:39:06Z\",\"name\":\"database.db\",\"size\":%d," +
|
|
||||||
"\"contentBytes\":\"%s\"}]," +
|
|
||||||
"\"bccRecipients\":[],\"body\":{\"content\":\"<html><head>\\r\\n<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"><style type=\\\"text/css\\\" style=\\\"display:none\\\">\\r\\n<!--\\r\\np\\r\\n\\t{margin-top:0;\\r\\n\\tmargin-bottom:0}\\r\\n-->\\r\\n</style></head><body dir=\\\"ltr\\\"><div class=\\\"elementToProof\\\" style=\\\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0)\\\"><span class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Lidia,</span> <div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><div class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">I hope this message finds you well. I am researching a database construct for next quarter's review. SkyNet will<span class=\\\"ContentPasted0\\\"> </span><span data-ogsb=\\\"rgb(255, 255, 0)\\\" class=\\\"ContentPasted0\\\" style=\\\"margin:0px; background-color:rgb(255,255,0)!important\\\">not</span><span class=\\\"ContentPasted0\\\"> </span>be able to match our database process speeds if we utilize the formulae that are included. </div><div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><div class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Please give me your thoughts on the implementation.</div><div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><div class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Best,</div><div class=\\\"x_elementToProof\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\"><br class=\\\"ContentPasted0\\\"></div><span class=\\\"x_elementToProof ContentPasted0\\\" data-ogsc=\\\"rgb(0, 0, 0)\\\" data-ogsb=\\\"rgb(255, 255, 255)\\\" style=\\\"font-size:12pt; margin:0px; color:rgb(0,0,0)!important; background-color:rgb(255,255,255)!important\\\">Dustin</span><br></div></body></html>\",\"contentType\":\"html\",\"@odata.type\":\"#microsoft.graph.itemBody\"}," +
|
|
||||||
"\"bodyPreview\":\"Lidia,\\r\\n\\r\\nI hope this message finds you well. I am researching a database construct for next quarter's review. SkyNet will not be able to match our database process speeds if we utilize the formulae that are included.\\r\\n\\r\\nPlease give me your thoughts on th\",\"ccRecipients\":[]," +
|
|
||||||
"\"conversationId\":\"AAQkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwAQANPFOcy_BapBghezTzIIldI=\",\"conversationIndex\":\"AQHY1Cpb08U5zL4FqkGCF7NPMgiV0g==\",\"flag\":{\"flagStatus\":\"notFlagged\",\"@odata.type\":\"#microsoft.graph.followupFlag\"}," +
|
|
||||||
"\"from\":{\"emailAddress\":{\"address\":\"dustina@8qzvrj.onmicrosoft.com\",\"name\":\"Dustin Abbot\",\"@odata.type\":\"#microsoft.graph.emailAddress\"},\"@odata.type\":\"#microsoft.graph.recipient\"},\"hasAttachments\":true,\"importance\":\"normal\",\"inferenceClassification\":\"focused\"," +
|
|
||||||
"\"internetMessageId\":\"<SJ0PR17MB56220C509D0006B8CC8FD952C3579@SJ0PR17MB5622.namprd17.prod.outlook.com>\",\"isDeliveryReceiptRequested\":false,\"isDraft\":false,\"isRead\":false,\"isReadReceiptRequested\":false," +
|
|
||||||
"\"parentFolderId\":\"AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwAuAAAAAADCNgjhM9QmQYWNcI7hCpPrAQDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAAA=\",\"receivedDateTime\":\"2022-09-29T17:39:07Z\",\"replyTo\":[],\"sender\":{\"emailAddress\":{\"address\":\"dustina@8qzvrj.onmicrosoft.com\",\"name\":\"Dustin Abbot\"," +
|
|
||||||
"\"@odata.type\":\"#microsoft.graph.emailAddress\"},\"@odata.type\":\"#microsoft.graph.recipient\"},\"sentDateTime\":\"2022-09-29T17:39:02Z\"," +
|
|
||||||
"\"subject\":\"" + subject + "\",\"toRecipients\":[{\"emailAddress\":{\"address\":\"LidiaH@8qzvrj.onmicrosoft.com\",\"name\":\"Lidia Holloway\",\"@odata.type\":\"#microsoft.graph.emailAddress\"},\"@odata.type\":\"#microsoft.graph.recipient\"}]," +
|
|
||||||
"\"webLink\":\"https://outlook.office365.com/owa/?ItemID=AAMkAGZmNjNlYjI3LWJlZWYtNGI4Mi04YjMyLTIxYThkNGQ4NmY1MwBGAAAAAADCNgjhM9QmQYWNcI7hCpPrBwDSEBNbUIB9RL6ePDeF3FIYAAAAAAEMAADSEBNbUIB9RL6ePDeF3FIYAAB4moqeAAA%3D&exvsurl=1&viewmodel=ReadMessageItem\"}"
|
|
||||||
|
|
||||||
attachmentSize := 3 * 1024 * 1024 // 3 MB
|
|
||||||
attachmentBytes := make([]byte, attachmentSize)
|
|
||||||
|
|
||||||
// Attachment content bytes are base64 encoded
|
|
||||||
return []byte(fmt.Sprintf(messageFmt, attachmentSize, base64.StdEncoding.EncodeToString([]byte(attachmentBytes))))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMessageWithOneDriveAttachment returns a message with an OneDrive attachment represented in bytes
|
// GetMessageWithOneDriveAttachment returns a message with an OneDrive attachment represented in bytes
|
||||||
|
|||||||
105
src/internal/connector/onedrive/api/drive.go
Normal file
105
src/internal/connector/onedrive/api/drive.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
mssites "github.com/microsoftgraph/msgraph-sdk-go/sites"
|
||||||
|
msusers "github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type userDrivePager struct {
|
||||||
|
gs graph.Servicer
|
||||||
|
builder *msusers.ItemDrivesRequestBuilder
|
||||||
|
options *msusers.ItemDrivesRequestBuilderGetRequestConfiguration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUserDrivePager(
|
||||||
|
gs graph.Servicer,
|
||||||
|
userID string,
|
||||||
|
fields []string,
|
||||||
|
) *userDrivePager {
|
||||||
|
requestConfig := &msusers.ItemDrivesRequestBuilderGetRequestConfiguration{
|
||||||
|
QueryParameters: &msusers.ItemDrivesRequestBuilderGetQueryParameters{
|
||||||
|
Select: fields,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res := &userDrivePager{
|
||||||
|
gs: gs,
|
||||||
|
options: requestConfig,
|
||||||
|
builder: gs.Client().UsersById(userID).Drives(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *userDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
|
||||||
|
return p.builder.Get(ctx, p.options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *userDrivePager) SetNext(link string) {
|
||||||
|
p.builder = msusers.NewItemDrivesRequestBuilder(link, p.gs.Adapter())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *userDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
|
||||||
|
page, ok := l.(interface{ GetValue() []models.Driveable })
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf(
|
||||||
|
"response of type [%T] does not comply with GetValue() interface",
|
||||||
|
l,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return page.GetValue(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type siteDrivePager struct {
|
||||||
|
gs graph.Servicer
|
||||||
|
builder *mssites.ItemDrivesRequestBuilder
|
||||||
|
options *mssites.ItemDrivesRequestBuilderGetRequestConfiguration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSiteDrivePager(
|
||||||
|
gs graph.Servicer,
|
||||||
|
siteID string,
|
||||||
|
fields []string,
|
||||||
|
) *siteDrivePager {
|
||||||
|
requestConfig := &mssites.ItemDrivesRequestBuilderGetRequestConfiguration{
|
||||||
|
QueryParameters: &mssites.ItemDrivesRequestBuilderGetQueryParameters{
|
||||||
|
Select: fields,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res := &siteDrivePager{
|
||||||
|
gs: gs,
|
||||||
|
options: requestConfig,
|
||||||
|
builder: gs.Client().SitesById(siteID).Drives(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *siteDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
|
||||||
|
return p.builder.Get(ctx, p.options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *siteDrivePager) SetNext(link string) {
|
||||||
|
p.builder = mssites.NewItemDrivesRequestBuilder(link, p.gs.Adapter())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *siteDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
|
||||||
|
page, ok := l.(interface{ GetValue() []models.Driveable })
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf(
|
||||||
|
"response of type [%T] does not comply with GetValue() interface",
|
||||||
|
l,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return page.GetValue(), nil
|
||||||
|
}
|
||||||
@ -191,7 +191,7 @@ func (oc *Collection) populateItems(ctx context.Context) {
|
|||||||
folderProgress, colCloser := observe.ProgressWithCount(
|
folderProgress, colCloser := observe.ProgressWithCount(
|
||||||
ctx,
|
ctx,
|
||||||
observe.ItemQueueMsg,
|
observe.ItemQueueMsg,
|
||||||
"/"+parentPathString,
|
observe.PII("/"+parentPathString),
|
||||||
int64(len(oc.driveItems)))
|
int64(len(oc.driveItems)))
|
||||||
defer colCloser()
|
defer colCloser()
|
||||||
defer close(folderProgress)
|
defer close(folderProgress)
|
||||||
@ -223,52 +223,89 @@ func (oc *Collection) populateItems(ctx context.Context) {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-semaphoreCh }()
|
defer func() { <-semaphoreCh }()
|
||||||
|
|
||||||
// Read the item
|
|
||||||
var (
|
var (
|
||||||
|
itemID = *item.GetId()
|
||||||
|
itemName = *item.GetName()
|
||||||
|
itemSize = *item.GetSize()
|
||||||
itemInfo details.ItemInfo
|
itemInfo details.ItemInfo
|
||||||
itemData io.ReadCloser
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
for i := 1; i <= maxRetries; i++ {
|
|
||||||
itemInfo, itemData, err = oc.itemReader(oc.itemClient, item)
|
|
||||||
if err == nil || graph.IsErrTimeout(err) == nil {
|
|
||||||
// retry on Timeout type errors, break otherwise.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < maxRetries {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
errUpdater(*item.GetId(), err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
itemName string
|
|
||||||
itemSize int64
|
|
||||||
)
|
)
|
||||||
|
|
||||||
switch oc.source {
|
switch oc.source {
|
||||||
case SharePointSource:
|
case SharePointSource:
|
||||||
|
itemInfo.SharePoint = sharePointItemInfo(item, itemSize)
|
||||||
itemInfo.SharePoint.ParentPath = parentPathString
|
itemInfo.SharePoint.ParentPath = parentPathString
|
||||||
itemName = itemInfo.SharePoint.ItemName
|
|
||||||
itemSize = itemInfo.SharePoint.Size
|
|
||||||
default:
|
default:
|
||||||
|
itemInfo.OneDrive = oneDriveItemInfo(item, itemSize)
|
||||||
itemInfo.OneDrive.ParentPath = parentPathString
|
itemInfo.OneDrive.ParentPath = parentPathString
|
||||||
itemName = itemInfo.OneDrive.ItemName
|
|
||||||
itemSize = itemInfo.OneDrive.Size
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Construct a new lazy readCloser to feed to the collection consumer.
|
||||||
|
// This ensures that downloads won't be attempted unless that consumer
|
||||||
|
// attempts to read bytes. Assumption is that kopia will check things
|
||||||
|
// like file modtimes before attempting to read.
|
||||||
itemReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
|
itemReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
|
||||||
progReader, closer := observe.ItemProgress(ctx, itemData, observe.ItemBackupMsg, itemName, itemSize)
|
// Read the item
|
||||||
|
var (
|
||||||
|
itemData io.ReadCloser
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 1; i <= maxRetries; i++ {
|
||||||
|
_, itemData, err = oc.itemReader(oc.itemClient, item)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if graph.IsErrUnauthorized(err) {
|
||||||
|
// assume unauthorized requests are a sign of an expired
|
||||||
|
// jwt token, and that we've overrun the available window
|
||||||
|
// to download the actual file. Re-downloading the item
|
||||||
|
// will refresh that download url.
|
||||||
|
di, diErr := getDriveItem(ctx, oc.service, oc.driveID, itemID)
|
||||||
|
if diErr != nil {
|
||||||
|
err = errors.Wrap(diErr, "retrieving expired item")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
item = di
|
||||||
|
|
||||||
|
continue
|
||||||
|
|
||||||
|
} else if !graph.IsErrTimeout(err) && !graph.IsErrThrottled(err) && !graph.IsSericeUnavailable(err) {
|
||||||
|
// TODO: graphAPI will provides headers that state the duration to wait
|
||||||
|
// in order to succeed again. The one second sleep won't cut it here.
|
||||||
|
//
|
||||||
|
// for all non-timeout, non-unauth, non-throttling errors, do not retry
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < maxRetries {
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for errors following retries
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(itemID, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// display/log the item download
|
||||||
|
progReader, closer := observe.ItemProgress(ctx, itemData, observe.ItemBackupMsg, observe.PII(itemName), itemSize)
|
||||||
go closer()
|
go closer()
|
||||||
|
|
||||||
return progReader, nil
|
return progReader, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// This can cause inaccurate counts. Right now it counts all the items
|
||||||
|
// we intend to read. Errors within the lazy readCloser will create a
|
||||||
|
// conflict: an item is both successful and erroneous. But the async
|
||||||
|
// control to fix that is more error-prone than helpful.
|
||||||
|
//
|
||||||
|
// TODO: transform this into a stats bus so that async control of stats
|
||||||
|
// aggregation is handled at the backup level, not at the item iteration
|
||||||
|
// level.
|
||||||
|
//
|
||||||
// Item read successfully, add to collection
|
// Item read successfully, add to collection
|
||||||
atomic.AddInt64(&itemsRead, 1)
|
atomic.AddInt64(&itemsRead, 1)
|
||||||
// byteCount iteration
|
// byteCount iteration
|
||||||
|
|||||||
@ -62,17 +62,25 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
now = time.Now()
|
now = time.Now()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type nst struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
numInstances int
|
numInstances int
|
||||||
source driveSource
|
source driveSource
|
||||||
itemReader itemReaderFunc
|
itemReader itemReaderFunc
|
||||||
|
itemDeets nst
|
||||||
infoFrom func(*testing.T, details.ItemInfo) (string, string)
|
infoFrom func(*testing.T, details.ItemInfo) (string, string)
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "oneDrive, no duplicates",
|
name: "oneDrive, no duplicates",
|
||||||
numInstances: 1,
|
numInstances: 1,
|
||||||
source: OneDriveSource,
|
source: OneDriveSource,
|
||||||
|
itemDeets: nst{testItemName, 42, now},
|
||||||
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
@ -87,6 +95,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
name: "oneDrive, duplicates",
|
name: "oneDrive, duplicates",
|
||||||
numInstances: 3,
|
numInstances: 3,
|
||||||
source: OneDriveSource,
|
source: OneDriveSource,
|
||||||
|
itemDeets: nst{testItemName, 42, now},
|
||||||
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
@ -101,6 +110,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
name: "sharePoint, no duplicates",
|
name: "sharePoint, no duplicates",
|
||||||
numInstances: 1,
|
numInstances: 1,
|
||||||
source: SharePointSource,
|
source: SharePointSource,
|
||||||
|
itemDeets: nst{testItemName, 42, now},
|
||||||
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
@ -115,6 +125,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
name: "sharePoint, duplicates",
|
name: "sharePoint, duplicates",
|
||||||
numInstances: 3,
|
numInstances: 3,
|
||||||
source: SharePointSource,
|
source: SharePointSource,
|
||||||
|
itemDeets: nst{testItemName, 42, now},
|
||||||
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
@ -140,7 +151,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
coll := NewCollection(
|
coll := NewCollection(
|
||||||
graph.LargeItemClient(),
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
folderPath,
|
folderPath,
|
||||||
"drive-id",
|
"drive-id",
|
||||||
suite,
|
suite,
|
||||||
@ -153,6 +164,10 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
// Set a item reader, add an item and validate we get the item back
|
// Set a item reader, add an item and validate we get the item back
|
||||||
mockItem := models.NewDriveItem()
|
mockItem := models.NewDriveItem()
|
||||||
mockItem.SetId(&testItemID)
|
mockItem.SetId(&testItemID)
|
||||||
|
mockItem.SetName(&test.itemDeets.name)
|
||||||
|
mockItem.SetSize(&test.itemDeets.size)
|
||||||
|
mockItem.SetCreatedDateTime(&test.itemDeets.time)
|
||||||
|
mockItem.SetLastModifiedDateTime(&test.itemDeets.time)
|
||||||
|
|
||||||
for i := 0; i < test.numInstances; i++ {
|
for i := 0; i < test.numInstances; i++ {
|
||||||
coll.Add(mockItem)
|
coll.Add(mockItem)
|
||||||
@ -169,27 +184,26 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// Expect only 1 item
|
|
||||||
require.Len(t, readItems, 1)
|
|
||||||
require.Equal(t, 1, collStatus.ObjectCount)
|
|
||||||
require.Equal(t, 1, collStatus.Successful)
|
|
||||||
|
|
||||||
// Validate item info and data
|
// Validate item info and data
|
||||||
readItem := readItems[0]
|
readItem := readItems[0]
|
||||||
readItemInfo := readItem.(data.StreamInfo)
|
readItemInfo := readItem.(data.StreamInfo)
|
||||||
|
|
||||||
|
readData, err := io.ReadAll(readItem.ToReader())
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, testItemData, readData)
|
||||||
|
|
||||||
|
// Expect only 1 item
|
||||||
|
require.Len(t, readItems, 1)
|
||||||
|
require.Equal(t, 1, collStatus.ObjectCount, "items iterated")
|
||||||
|
require.Equal(t, 1, collStatus.Successful, "items successful")
|
||||||
|
|
||||||
assert.Equal(t, testItemName, readItem.UUID())
|
assert.Equal(t, testItemName, readItem.UUID())
|
||||||
|
|
||||||
require.Implements(t, (*data.StreamModTime)(nil), readItem)
|
require.Implements(t, (*data.StreamModTime)(nil), readItem)
|
||||||
mt := readItem.(data.StreamModTime)
|
mt := readItem.(data.StreamModTime)
|
||||||
assert.Equal(t, now, mt.ModTime())
|
assert.Equal(t, now, mt.ModTime())
|
||||||
|
|
||||||
readData, err := io.ReadAll(readItem.ToReader())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
name, parentPath := test.infoFrom(t, readItemInfo.Info())
|
name, parentPath := test.infoFrom(t, readItemInfo.Info())
|
||||||
|
|
||||||
assert.Equal(t, testItemData, readData)
|
|
||||||
assert.Equal(t, testItemName, name)
|
assert.Equal(t, testItemName, name)
|
||||||
assert.Equal(t, driveFolderPath, parentPath)
|
assert.Equal(t, driveFolderPath, parentPath)
|
||||||
})
|
})
|
||||||
@ -197,6 +211,12 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
||||||
|
var (
|
||||||
|
name = "name"
|
||||||
|
size int64 = 42
|
||||||
|
now = time.Now()
|
||||||
|
)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
source driveSource
|
source driveSource
|
||||||
@ -225,7 +245,7 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
coll := NewCollection(
|
coll := NewCollection(
|
||||||
graph.LargeItemClient(),
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
folderPath,
|
folderPath,
|
||||||
"fakeDriveID",
|
"fakeDriveID",
|
||||||
suite,
|
suite,
|
||||||
@ -235,18 +255,27 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
|||||||
|
|
||||||
mockItem := models.NewDriveItem()
|
mockItem := models.NewDriveItem()
|
||||||
mockItem.SetId(&testItemID)
|
mockItem.SetId(&testItemID)
|
||||||
|
mockItem.SetName(&name)
|
||||||
|
mockItem.SetSize(&size)
|
||||||
|
mockItem.SetCreatedDateTime(&now)
|
||||||
|
mockItem.SetLastModifiedDateTime(&now)
|
||||||
coll.Add(mockItem)
|
coll.Add(mockItem)
|
||||||
|
|
||||||
coll.itemReader = func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
coll.itemReader = func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{}, nil, assert.AnError
|
return details.ItemInfo{}, nil, assert.AnError
|
||||||
}
|
}
|
||||||
|
|
||||||
coll.Items()
|
collItem, ok := <-coll.Items()
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
_, err = io.ReadAll(collItem.ToReader())
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// Expect no items
|
// Expect no items
|
||||||
require.Equal(t, 1, collStatus.ObjectCount)
|
require.Equal(t, 1, collStatus.ObjectCount, "only one object should be counted")
|
||||||
require.Equal(t, 0, collStatus.Successful)
|
require.Equal(t, 1, collStatus.Successful, "TODO: should be 0, but allowing 1 to reduce async management")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -92,12 +92,20 @@ func NewCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieves drive data as set of `data.Collections`
|
// Retrieves drive data as set of `data.Collections` and a set of item names to
|
||||||
func (c *Collections) Get(ctx context.Context) ([]data.Collection, error) {
|
// be excluded from the upcoming backup.
|
||||||
|
func (c *Collections) Get(ctx context.Context) ([]data.Collection, map[string]struct{}, error) {
|
||||||
// Enumerate drives for the specified resourceOwner
|
// Enumerate drives for the specified resourceOwner
|
||||||
drives, err := drives(ctx, c.service, c.resourceOwner, c.source)
|
pager, err := PagerForSource(c.source, c.service, c.resourceOwner, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
retry := c.source == OneDriveSource
|
||||||
|
|
||||||
|
drives, err := drives(ctx, pager, retry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -126,7 +134,7 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, error) {
|
|||||||
c.UpdateCollections,
|
c.UpdateCollections,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(delta) > 0 {
|
if len(delta) > 0 {
|
||||||
@ -144,7 +152,7 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, error) {
|
|||||||
maps.Copy(excludedItems, excluded)
|
maps.Copy(excludedItems, excluded)
|
||||||
}
|
}
|
||||||
|
|
||||||
observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems))
|
observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items to backup", c.NumItems)))
|
||||||
|
|
||||||
// Add an extra for the metadata collection.
|
// Add an extra for the metadata collection.
|
||||||
collections := make([]data.Collection, 0, len(c.CollectionMap)+1)
|
collections := make([]data.Collection, 0, len(c.CollectionMap)+1)
|
||||||
@ -178,7 +186,8 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, error) {
|
|||||||
collections = append(collections, metadata)
|
collections = append(collections, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, nil
|
// TODO(ashmrtn): Track and return the set of items to exclude.
|
||||||
|
return collections, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCollections initializes and adds the provided drive items to Collections
|
// UpdateCollections initializes and adds the provided drive items to Collections
|
||||||
|
|||||||
@ -588,7 +588,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
outputFolderMap := map[string]string{}
|
outputFolderMap := map[string]string{}
|
||||||
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
||||||
c := NewCollections(
|
c := NewCollections(
|
||||||
graph.LargeItemClient(),
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
tenant,
|
tenant,
|
||||||
user,
|
user,
|
||||||
OneDriveSource,
|
OneDriveSource,
|
||||||
|
|||||||
@ -10,11 +10,12 @@ import (
|
|||||||
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
|
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/sites"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
gapi "github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/onedrive/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
@ -22,86 +23,106 @@ import (
|
|||||||
var errFolderNotFound = errors.New("folder not found")
|
var errFolderNotFound = errors.New("folder not found")
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
getDrivesRetries = 3
|
||||||
|
|
||||||
// nextLinkKey is used to find the next link in a paged
|
// nextLinkKey is used to find the next link in a paged
|
||||||
// graph response
|
// graph response
|
||||||
nextLinkKey = "@odata.nextLink"
|
nextLinkKey = "@odata.nextLink"
|
||||||
itemChildrenRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s/children"
|
itemChildrenRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s/children"
|
||||||
itemByPathRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s:/%s"
|
itemByPathRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s:/%s"
|
||||||
itemNotFoundErrorCode = "itemNotFound"
|
itemNotFoundErrorCode = "itemNotFound"
|
||||||
userMysiteURLNotFound = "BadRequest Unable to retrieve user's mysite URL"
|
userMysiteURLNotFound = "BadRequest Unable to retrieve user's mysite URL"
|
||||||
userMysiteNotFound = "ResourceNotFound User's mysite not found"
|
userMysiteNotFound = "ResourceNotFound User's mysite not found"
|
||||||
|
contextDeadlineExceeded = "context deadline exceeded"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Enumerates the drives for the specified user
|
type drivePager interface {
|
||||||
func drives(
|
GetPage(context.Context) (gapi.PageLinker, error)
|
||||||
ctx context.Context,
|
SetNext(nextLink string)
|
||||||
service graph.Servicer,
|
ValuesIn(gapi.PageLinker) ([]models.Driveable, error)
|
||||||
resourceOwner string,
|
}
|
||||||
|
|
||||||
|
func PagerForSource(
|
||||||
source driveSource,
|
source driveSource,
|
||||||
) ([]models.Driveable, error) {
|
servicer graph.Servicer,
|
||||||
|
resourceOwner string,
|
||||||
|
fields []string,
|
||||||
|
) (drivePager, error) {
|
||||||
switch source {
|
switch source {
|
||||||
case OneDriveSource:
|
case OneDriveSource:
|
||||||
return userDrives(ctx, service, resourceOwner)
|
return api.NewUserDrivePager(servicer, resourceOwner, fields), nil
|
||||||
case SharePointSource:
|
case SharePointSource:
|
||||||
return siteDrives(ctx, service, resourceOwner)
|
return api.NewSiteDrivePager(servicer, resourceOwner, fields), nil
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("unrecognized drive data source")
|
return nil, errors.Errorf("unrecognized drive data source")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func siteDrives(ctx context.Context, service graph.Servicer, site string) ([]models.Driveable, error) {
|
func drives(
|
||||||
options := &sites.ItemDrivesRequestBuilderGetRequestConfiguration{
|
ctx context.Context,
|
||||||
QueryParameters: &sites.ItemDrivesRequestBuilderGetQueryParameters{
|
pager drivePager,
|
||||||
Select: []string{"id", "name", "weburl", "system"},
|
retry bool,
|
||||||
},
|
) ([]models.Driveable, error) {
|
||||||
}
|
|
||||||
|
|
||||||
r, err := service.Client().SitesById(site).Drives().Get(ctx, options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to retrieve site drives. site: %s, details: %s",
|
|
||||||
site, support.ConnectorStackErrorTrace(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.GetValue(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func userDrives(ctx context.Context, service graph.Servicer, user string) ([]models.Driveable, error) {
|
|
||||||
var (
|
var (
|
||||||
numberOfRetries = 3
|
|
||||||
r models.DriveCollectionResponseable
|
|
||||||
err error
|
err error
|
||||||
|
page gapi.PageLinker
|
||||||
|
numberOfRetries = getDrivesRetries
|
||||||
|
drives = []models.Driveable{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Retry Loop for Drive retrieval. Request can timeout
|
if !retry {
|
||||||
for i := 0; i <= numberOfRetries; i++ {
|
numberOfRetries = 0
|
||||||
r, err = service.Client().UsersById(user).Drives().Get(ctx, nil)
|
|
||||||
if err != nil {
|
|
||||||
detailedError := support.ConnectorStackErrorTrace(err)
|
|
||||||
if strings.Contains(detailedError, userMysiteURLNotFound) ||
|
|
||||||
strings.Contains(detailedError, userMysiteNotFound) {
|
|
||||||
logger.Ctx(ctx).Infof("User %s does not have a drive", user)
|
|
||||||
return make([]models.Driveable, 0), nil // no license
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(detailedError, "context deadline exceeded") && i < numberOfRetries {
|
|
||||||
time.Sleep(time.Duration(3*(i+1)) * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.Wrapf(
|
|
||||||
err,
|
|
||||||
"failed to retrieve user drives. user: %s, details: %s",
|
|
||||||
user,
|
|
||||||
detailedError,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Debugf("Found %d drives for user %s", len(r.GetValue()), user)
|
// Loop through all pages returned by Graph API.
|
||||||
|
for {
|
||||||
|
// Retry Loop for Drive retrieval. Request can timeout
|
||||||
|
for i := 0; i <= numberOfRetries; i++ {
|
||||||
|
page, err = pager.GetPage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// Various error handling. May return an error or perform a retry.
|
||||||
|
detailedError := support.ConnectorStackErrorTrace(err)
|
||||||
|
if strings.Contains(detailedError, userMysiteURLNotFound) ||
|
||||||
|
strings.Contains(detailedError, userMysiteNotFound) {
|
||||||
|
logger.Ctx(ctx).Infof("resource owner does not have a drive")
|
||||||
|
return make([]models.Driveable, 0), nil // no license or drives.
|
||||||
|
}
|
||||||
|
|
||||||
return r.GetValue(), nil
|
if strings.Contains(detailedError, contextDeadlineExceeded) && i < numberOfRetries {
|
||||||
|
time.Sleep(time.Duration(3*(i+1)) * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Wrapf(
|
||||||
|
err,
|
||||||
|
"failed to retrieve drives. details: %s",
|
||||||
|
detailedError,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// No error encountered, break the retry loop so we can extract results
|
||||||
|
// and see if there's another page to fetch.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp, err := pager.ValuesIn(page)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "extracting drives from response")
|
||||||
|
}
|
||||||
|
|
||||||
|
drives = append(drives, tmp...)
|
||||||
|
|
||||||
|
nextLink := gapi.NextLink(page)
|
||||||
|
if len(nextLink) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
pager.SetNext(nextLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Debugf("Found %d drives", len(drives))
|
||||||
|
|
||||||
|
return drives, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// itemCollector functions collect the items found in a drive
|
// itemCollector functions collect the items found in a drive
|
||||||
@ -284,10 +305,10 @@ func (op *Displayable) GetDisplayName() *string {
|
|||||||
func GetAllFolders(
|
func GetAllFolders(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
userID string,
|
pager drivePager,
|
||||||
prefix string,
|
prefix string,
|
||||||
) ([]*Displayable, error) {
|
) ([]*Displayable, error) {
|
||||||
drives, err := drives(ctx, gs, userID, OneDriveSource)
|
drives, err := drives(ctx, pager, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getting OneDrive folders")
|
return nil, errors.Wrap(err, "getting OneDrive folders")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,21 +1,323 @@
|
|||||||
package onedrive
|
package onedrive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph/api"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type mockPageLinker struct {
|
||||||
|
link *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pl *mockPageLinker) GetOdataNextLink() *string {
|
||||||
|
return pl.link
|
||||||
|
}
|
||||||
|
|
||||||
|
type pagerResult struct {
|
||||||
|
drives []models.Driveable
|
||||||
|
nextLink *string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockDrivePager struct {
|
||||||
|
toReturn []pagerResult
|
||||||
|
getIdx int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mockDrivePager) GetPage(context.Context) (api.PageLinker, error) {
|
||||||
|
if len(p.toReturn) <= p.getIdx {
|
||||||
|
return nil, assert.AnError
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := p.getIdx
|
||||||
|
p.getIdx++
|
||||||
|
|
||||||
|
return &mockPageLinker{p.toReturn[idx].nextLink}, p.toReturn[idx].err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mockDrivePager) SetNext(string) {}
|
||||||
|
|
||||||
|
func (p *mockDrivePager) ValuesIn(api.PageLinker) ([]models.Driveable, error) {
|
||||||
|
idx := p.getIdx
|
||||||
|
if idx > 0 {
|
||||||
|
// Return values lag by one since we increment in GetPage().
|
||||||
|
idx--
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.toReturn) <= idx {
|
||||||
|
return nil, assert.AnError
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.toReturn[idx].drives, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unit tests
|
||||||
|
type OneDriveUnitSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOneDriveUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, new(OneDriveUnitSuite))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *OneDriveUnitSuite) TestDrives() {
|
||||||
|
numDriveResults := 4
|
||||||
|
emptyLink := ""
|
||||||
|
link := "foo"
|
||||||
|
|
||||||
|
// These errors won't be the "correct" format when compared to what graph
|
||||||
|
// returns, but they're close enough to have the same info when the inner
|
||||||
|
// details are extracted via support package.
|
||||||
|
tmp := userMysiteURLNotFound
|
||||||
|
tmpMySiteURLNotFound := odataerrors.NewMainError()
|
||||||
|
tmpMySiteURLNotFound.SetMessage(&tmp)
|
||||||
|
|
||||||
|
mySiteURLNotFound := odataerrors.NewODataError()
|
||||||
|
mySiteURLNotFound.SetError(tmpMySiteURLNotFound)
|
||||||
|
|
||||||
|
tmp2 := userMysiteNotFound
|
||||||
|
tmpMySiteNotFound := odataerrors.NewMainError()
|
||||||
|
tmpMySiteNotFound.SetMessage(&tmp2)
|
||||||
|
|
||||||
|
mySiteNotFound := odataerrors.NewODataError()
|
||||||
|
mySiteNotFound.SetError(tmpMySiteNotFound)
|
||||||
|
|
||||||
|
tmp3 := contextDeadlineExceeded
|
||||||
|
tmpDeadlineExceeded := odataerrors.NewMainError()
|
||||||
|
tmpDeadlineExceeded.SetMessage(&tmp3)
|
||||||
|
|
||||||
|
deadlineExceeded := odataerrors.NewODataError()
|
||||||
|
deadlineExceeded.SetError(tmpDeadlineExceeded)
|
||||||
|
|
||||||
|
resultDrives := make([]models.Driveable, 0, numDriveResults)
|
||||||
|
|
||||||
|
for i := 0; i < numDriveResults; i++ {
|
||||||
|
d := models.NewDrive()
|
||||||
|
id := uuid.NewString()
|
||||||
|
d.SetId(&id)
|
||||||
|
|
||||||
|
resultDrives = append(resultDrives, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
tooManyRetries := make([]pagerResult, 0, getDrivesRetries+1)
|
||||||
|
|
||||||
|
for i := 0; i < getDrivesRetries+1; i++ {
|
||||||
|
tooManyRetries = append(tooManyRetries, pagerResult{
|
||||||
|
err: deadlineExceeded,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
pagerResults []pagerResult
|
||||||
|
retry bool
|
||||||
|
expectedErr assert.ErrorAssertionFunc
|
||||||
|
expectedResults []models.Driveable
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "AllOneResultNilNextLink",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives,
|
||||||
|
nextLink: nil,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: false,
|
||||||
|
expectedErr: assert.NoError,
|
||||||
|
expectedResults: resultDrives,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AllOneResultEmptyNextLink",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives,
|
||||||
|
nextLink: &emptyLink,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: false,
|
||||||
|
expectedErr: assert.NoError,
|
||||||
|
expectedResults: resultDrives,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SplitResultsNilNextLink",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives[:numDriveResults/2],
|
||||||
|
nextLink: &link,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
drives: resultDrives[numDriveResults/2:],
|
||||||
|
nextLink: nil,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: false,
|
||||||
|
expectedErr: assert.NoError,
|
||||||
|
expectedResults: resultDrives,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SplitResultsEmptyNextLink",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives[:numDriveResults/2],
|
||||||
|
nextLink: &link,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
drives: resultDrives[numDriveResults/2:],
|
||||||
|
nextLink: &emptyLink,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: false,
|
||||||
|
expectedErr: assert.NoError,
|
||||||
|
expectedResults: resultDrives,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NonRetryableError",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives,
|
||||||
|
nextLink: &link,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
drives: nil,
|
||||||
|
nextLink: nil,
|
||||||
|
err: assert.AnError,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: true,
|
||||||
|
expectedErr: assert.Error,
|
||||||
|
expectedResults: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SiteURLNotFound",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: nil,
|
||||||
|
nextLink: nil,
|
||||||
|
err: mySiteURLNotFound,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: true,
|
||||||
|
expectedErr: assert.NoError,
|
||||||
|
expectedResults: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SiteNotFound",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: nil,
|
||||||
|
nextLink: nil,
|
||||||
|
err: mySiteNotFound,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: true,
|
||||||
|
expectedErr: assert.NoError,
|
||||||
|
expectedResults: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SplitResultsContextTimeoutWithRetries",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives[:numDriveResults/2],
|
||||||
|
nextLink: &link,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
drives: nil,
|
||||||
|
nextLink: nil,
|
||||||
|
err: deadlineExceeded,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
drives: resultDrives[numDriveResults/2:],
|
||||||
|
nextLink: &emptyLink,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: true,
|
||||||
|
expectedErr: assert.NoError,
|
||||||
|
expectedResults: resultDrives,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SplitResultsContextTimeoutNoRetries",
|
||||||
|
pagerResults: []pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives[:numDriveResults/2],
|
||||||
|
nextLink: &link,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
drives: nil,
|
||||||
|
nextLink: nil,
|
||||||
|
err: deadlineExceeded,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
drives: resultDrives[numDriveResults/2:],
|
||||||
|
nextLink: &emptyLink,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
retry: false,
|
||||||
|
expectedErr: assert.Error,
|
||||||
|
expectedResults: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TooManyRetries",
|
||||||
|
pagerResults: append(
|
||||||
|
[]pagerResult{
|
||||||
|
{
|
||||||
|
drives: resultDrives[:numDriveResults/2],
|
||||||
|
nextLink: &link,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tooManyRetries...,
|
||||||
|
),
|
||||||
|
retry: true,
|
||||||
|
expectedErr: assert.Error,
|
||||||
|
expectedResults: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
pager := &mockDrivePager{
|
||||||
|
toReturn: test.pagerResults,
|
||||||
|
}
|
||||||
|
|
||||||
|
drives, err := drives(ctx, pager, test.retry)
|
||||||
|
test.expectedErr(t, err)
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, test.expectedResults, drives)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Integration tests
|
||||||
|
|
||||||
type OneDriveSuite struct {
|
type OneDriveSuite struct {
|
||||||
suite.Suite
|
suite.Suite
|
||||||
userID string
|
userID string
|
||||||
@ -44,7 +346,10 @@ func (suite *OneDriveSuite) TestCreateGetDeleteFolder() {
|
|||||||
folderElements := []string{folderName1}
|
folderElements := []string{folderName1}
|
||||||
gs := loadTestService(t)
|
gs := loadTestService(t)
|
||||||
|
|
||||||
drives, err := drives(ctx, gs, suite.userID, OneDriveSource)
|
pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
drives, err := drives(ctx, pager, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEmpty(t, drives)
|
require.NotEmpty(t, drives)
|
||||||
|
|
||||||
@ -89,7 +394,10 @@ func (suite *OneDriveSuite) TestCreateGetDeleteFolder() {
|
|||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
allFolders, err := GetAllFolders(ctx, gs, suite.userID, test.prefix)
|
pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
allFolders, err := GetAllFolders(ctx, gs, pager, test.prefix)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
foundFolderIDs := []string{}
|
foundFolderIDs := []string{}
|
||||||
@ -146,8 +454,8 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() {
|
|||||||
scope := selectors.
|
scope := selectors.
|
||||||
NewOneDriveBackup([]string{test.user}).
|
NewOneDriveBackup([]string{test.user}).
|
||||||
AllData()[0]
|
AllData()[0]
|
||||||
odcs, err := NewCollections(
|
odcs, excludes, err := NewCollections(
|
||||||
graph.LargeItemClient(),
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
test.user,
|
test.user,
|
||||||
OneDriveSource,
|
OneDriveSource,
|
||||||
@ -157,6 +465,8 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() {
|
|||||||
control.Options{},
|
control.Options{},
|
||||||
).Get(ctx)
|
).Get(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
// Don't expect excludes as this isn't an incremental backup.
|
||||||
|
assert.Empty(t, excludes)
|
||||||
|
|
||||||
for _, entry := range odcs {
|
for _, entry := range odcs {
|
||||||
assert.NotEmpty(t, entry.FullPath())
|
assert.NotEmpty(t, entry.FullPath())
|
||||||
|
|||||||
@ -25,6 +25,15 @@ const (
|
|||||||
downloadURLKey = "@microsoft.graph.downloadUrl"
|
downloadURLKey = "@microsoft.graph.downloadUrl"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// generic drive item getter
|
||||||
|
func getDriveItem(
|
||||||
|
ctx context.Context,
|
||||||
|
srv graph.Servicer,
|
||||||
|
driveID, itemID string,
|
||||||
|
) (models.DriveItemable, error) {
|
||||||
|
return srv.Client().DrivesById(driveID).ItemsById(itemID).Get(ctx, nil)
|
||||||
|
}
|
||||||
|
|
||||||
// sharePointItemReader will return a io.ReadCloser for the specified item
|
// sharePointItemReader will return a io.ReadCloser for the specified item
|
||||||
// It crafts this by querying M365 for a download URL for the item
|
// It crafts this by querying M365 for a download URL for the item
|
||||||
// and using a http client to initialize a reader
|
// and using a http client to initialize a reader
|
||||||
@ -32,14 +41,9 @@ func sharePointItemReader(
|
|||||||
hc *http.Client,
|
hc *http.Client,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
) (details.ItemInfo, io.ReadCloser, error) {
|
) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
resp, err := downloadItem(hc, item)
|
||||||
if !ok {
|
|
||||||
return details.ItemInfo{}, nil, fmt.Errorf("failed to get url for %s", *item.GetName())
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := hc.Get(*url)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return details.ItemInfo{}, nil, err
|
return details.ItemInfo{}, nil, errors.Wrap(err, "downloading item")
|
||||||
}
|
}
|
||||||
|
|
||||||
dii := details.ItemInfo{
|
dii := details.ItemInfo{
|
||||||
@ -56,24 +60,9 @@ func oneDriveItemReader(
|
|||||||
hc *http.Client,
|
hc *http.Client,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
) (details.ItemInfo, io.ReadCloser, error) {
|
) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
resp, err := downloadItem(hc, item)
|
||||||
if !ok {
|
|
||||||
return details.ItemInfo{}, nil, fmt.Errorf("failed to get url for %s", *item.GetName())
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, *url, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return details.ItemInfo{}, nil, err
|
return details.ItemInfo{}, nil, errors.Wrap(err, "downloading item")
|
||||||
}
|
|
||||||
|
|
||||||
// Decorate the traffic
|
|
||||||
//nolint:lll
|
|
||||||
// See https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic
|
|
||||||
req.Header.Set("User-Agent", "ISV|Alcion|Corso/"+version.Version)
|
|
||||||
|
|
||||||
resp, err := hc.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return details.ItemInfo{}, nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dii := details.ItemInfo{
|
dii := details.ItemInfo{
|
||||||
@ -83,6 +72,46 @@ func oneDriveItemReader(
|
|||||||
return dii, resp.Body, nil
|
return dii, resp.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, error) {
|
||||||
|
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("extracting file url: file %s", *item.GetId())
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, *url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "new request")
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:lll
|
||||||
|
// Decorate the traffic
|
||||||
|
// See https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic
|
||||||
|
req.Header.Set("User-Agent", "ISV|Alcion|Corso/"+version.Version)
|
||||||
|
|
||||||
|
resp, err := hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if (resp.StatusCode / 100) == 2 {
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusTooManyRequests {
|
||||||
|
return resp, graph.Err429TooManyRequests
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusUnauthorized {
|
||||||
|
return resp, graph.Err401Unauthorized
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusServiceUnavailable {
|
||||||
|
return resp, graph.Err503ServiceUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, errors.New("non-2xx http response: " + resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
// oneDriveItemInfo will populate a details.OneDriveInfo struct
|
// oneDriveItemInfo will populate a details.OneDriveInfo struct
|
||||||
// with properties from the drive item. ItemSize is specified
|
// with properties from the drive item. ItemSize is specified
|
||||||
// separately for restore processes because the local itemable
|
// separately for restore processes because the local itemable
|
||||||
|
|||||||
@ -75,7 +75,10 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
|||||||
|
|
||||||
suite.user = tester.SecondaryM365UserID(t)
|
suite.user = tester.SecondaryM365UserID(t)
|
||||||
|
|
||||||
odDrives, err := drives(ctx, suite, suite.user, OneDriveSource)
|
pager, err := PagerForSource(OneDriveSource, suite, suite.user, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
odDrives, err := drives(ctx, pager, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Test Requirement 1: Need a drive
|
// Test Requirement 1: Need a drive
|
||||||
require.Greaterf(t, len(odDrives), 0, "user %s does not have a drive", suite.user)
|
require.Greaterf(t, len(odDrives), 0, "user %s does not have a drive", suite.user)
|
||||||
@ -126,7 +129,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
|
|
||||||
// Read data for the file
|
// Read data for the file
|
||||||
|
|
||||||
itemInfo, itemData, err := oneDriveItemReader(graph.LargeItemClient(), driveItem)
|
itemInfo, itemData, err := oneDriveItemReader(graph.HTTPClient(graph.NoTimeout()), driveItem)
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(suite.T(), err)
|
||||||
require.NotNil(suite.T(), itemInfo.OneDrive)
|
require.NotNil(suite.T(), itemInfo.OneDrive)
|
||||||
require.NotEmpty(suite.T(), itemInfo.OneDrive.ItemName)
|
require.NotEmpty(suite.T(), itemInfo.OneDrive.ItemName)
|
||||||
|
|||||||
@ -243,7 +243,7 @@ func restoreItem(
|
|||||||
}
|
}
|
||||||
|
|
||||||
iReader := itemData.ToReader()
|
iReader := itemData.ToReader()
|
||||||
progReader, closer := observe.ItemProgress(ctx, iReader, observe.ItemRestoreMsg, itemName, ss.Size())
|
progReader, closer := observe.ItemProgress(ctx, iReader, observe.ItemRestoreMsg, observe.PII(itemName), ss.Size())
|
||||||
|
|
||||||
go closer()
|
go closer()
|
||||||
|
|
||||||
|
|||||||
@ -158,9 +158,9 @@ func (sc *Collection) populate(ctx context.Context) {
|
|||||||
// TODO: Insert correct ID for CollectionProgress
|
// TODO: Insert correct ID for CollectionProgress
|
||||||
colProgress, closer := observe.CollectionProgress(
|
colProgress, closer := observe.CollectionProgress(
|
||||||
ctx,
|
ctx,
|
||||||
"name",
|
|
||||||
sc.fullPath.Category().String(),
|
sc.fullPath.Category().String(),
|
||||||
sc.fullPath.Folder())
|
observe.Safe("name"),
|
||||||
|
observe.PII(sc.fullPath.Folder()))
|
||||||
go closer()
|
go closer()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
@ -31,10 +31,10 @@ func DataCollections(
|
|||||||
serv graph.Servicer,
|
serv graph.Servicer,
|
||||||
su statusUpdater,
|
su statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.Collection, map[string]struct{}, error) {
|
||||||
b, err := selector.ToSharePointBackup()
|
b, err := selector.ToSharePointBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "sharePointDataCollection: parsing selector")
|
return nil, nil, errors.Wrap(err, "sharePointDataCollection: parsing selector")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -46,7 +46,8 @@ func DataCollections(
|
|||||||
for _, scope := range b.Scopes() {
|
for _, scope := range b.Scopes() {
|
||||||
foldersComplete, closer := observe.MessageWithCompletion(ctx, observe.Bulletf(
|
foldersComplete, closer := observe.MessageWithCompletion(ctx, observe.Bulletf(
|
||||||
"%s - %s",
|
"%s - %s",
|
||||||
scope.Category().PathType(), site))
|
observe.Safe(scope.Category().PathType().String()),
|
||||||
|
observe.PII(site)))
|
||||||
defer closer()
|
defer closer()
|
||||||
defer close(foldersComplete)
|
defer close(foldersComplete)
|
||||||
|
|
||||||
@ -62,11 +63,11 @@ func DataCollections(
|
|||||||
su,
|
su,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, support.WrapAndAppend(site, err, errs)
|
return nil, nil, support.WrapAndAppend(site, err, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
case path.LibrariesCategory:
|
case path.LibrariesCategory:
|
||||||
spcs, err = collectLibraries(
|
spcs, _, err = collectLibraries(
|
||||||
ctx,
|
ctx,
|
||||||
itemClient,
|
itemClient,
|
||||||
serv,
|
serv,
|
||||||
@ -76,7 +77,7 @@ func DataCollections(
|
|||||||
su,
|
su,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, support.WrapAndAppend(site, err, errs)
|
return nil, nil, support.WrapAndAppend(site, err, errs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,7 +85,7 @@ func DataCollections(
|
|||||||
foldersComplete <- struct{}{}
|
foldersComplete <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, errs
|
return collections, nil, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
func collectLists(
|
func collectLists(
|
||||||
@ -133,7 +134,7 @@ func collectLibraries(
|
|||||||
scope selectors.SharePointScope,
|
scope selectors.SharePointScope,
|
||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.Collection, map[string]struct{}, error) {
|
||||||
var (
|
var (
|
||||||
collections = []data.Collection{}
|
collections = []data.Collection{}
|
||||||
errs error
|
errs error
|
||||||
@ -151,12 +152,12 @@ func collectLibraries(
|
|||||||
updater.UpdateStatus,
|
updater.UpdateStatus,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
|
|
||||||
odcs, err := colls.Get(ctx)
|
odcs, excludes, err := colls.Get(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, support.WrapAndAppend(siteID, err, errs)
|
return nil, nil, support.WrapAndAppend(siteID, err, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
return append(collections, odcs...), errs
|
return append(collections, odcs...), excludes, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
type folderMatcher struct {
|
type folderMatcher struct {
|
||||||
|
|||||||
@ -92,7 +92,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() {
|
|||||||
newPaths := map[string]string{}
|
newPaths := map[string]string{}
|
||||||
excluded := map[string]struct{}{}
|
excluded := map[string]struct{}{}
|
||||||
c := onedrive.NewCollections(
|
c := onedrive.NewCollections(
|
||||||
graph.LargeItemClient(),
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
tenant,
|
tenant,
|
||||||
site,
|
site,
|
||||||
onedrive.SharePointSource,
|
onedrive.SharePointSource,
|
||||||
|
|||||||
@ -1,8 +1,6 @@
|
|||||||
package support
|
package support
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
|
|
||||||
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
js "github.com/microsoft/kiota-serialization-json-go"
|
js "github.com/microsoft/kiota-serialization-json-go"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
@ -73,14 +71,3 @@ func CreateListFromBytes(bytes []byte) (models.Listable, error) {
|
|||||||
|
|
||||||
return list, nil
|
return list, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func HasAttachments(body models.ItemBodyable) bool {
|
|
||||||
if body.GetContent() == nil || body.GetContentType() == nil ||
|
|
||||||
*body.GetContentType() == models.TEXT_BODYTYPE || len(*body.GetContent()) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
content := *body.GetContent()
|
|
||||||
|
|
||||||
return strings.Contains(content, "src=\"cid:")
|
|
||||||
}
|
|
||||||
|
|||||||
@ -3,7 +3,6 @@ package support
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -160,56 +159,3 @@ func (suite *DataSupportSuite) TestCreateListFromBytes() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *DataSupportSuite) TestHasAttachments() {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
hasAttachment assert.BoolAssertionFunc
|
|
||||||
getBodyable func(t *testing.T) models.ItemBodyable
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Mock w/out attachment",
|
|
||||||
hasAttachment: assert.False,
|
|
||||||
getBodyable: func(t *testing.T) models.ItemBodyable {
|
|
||||||
byteArray := mockconnector.GetMockMessageWithBodyBytes(
|
|
||||||
"Test",
|
|
||||||
"This is testing",
|
|
||||||
"This is testing",
|
|
||||||
)
|
|
||||||
message, err := CreateMessageFromBytes(byteArray)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return message.GetBody()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Mock w/ inline attachment",
|
|
||||||
hasAttachment: assert.True,
|
|
||||||
getBodyable: func(t *testing.T) models.ItemBodyable {
|
|
||||||
byteArray := mockconnector.GetMessageWithOneDriveAttachment("Test legacy")
|
|
||||||
message, err := CreateMessageFromBytes(byteArray)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return message.GetBody()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Edge Case",
|
|
||||||
hasAttachment: assert.True,
|
|
||||||
getBodyable: func(t *testing.T) models.ItemBodyable {
|
|
||||||
//nolint:lll
|
|
||||||
content := "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><style type=\"text/css\" style=\"display:none\">\r\n<!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n-->\r\n</style></head><body dir=\"ltr\"><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Happy New Year,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">In accordance with TPS report guidelines, there have been questions about how to address our activities SharePoint Cover page. Do you believe this is the best picture? </div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><img class=\"FluidPluginCopy ContentPasted0 w-2070 h-1380\" size=\"5854817\" data-outlook-trace=\"F:1|T:1\" src=\"cid:85f4faa3-9851-40c7-ba0a-e63dce1185f9\" style=\"max-width:100%\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Let me know if this meets our culture requirements.</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Warm Regards,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Dustin</div></body></html>"
|
|
||||||
body := models.NewItemBody()
|
|
||||||
body.SetContent(&content)
|
|
||||||
cat := models.HTML_BODYTYPE
|
|
||||||
body.SetContentType(&cat)
|
|
||||||
return body
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
|
||||||
found := HasAttachments(test.getBodyable(t))
|
|
||||||
test.hasAttachment(t, found)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -3,10 +3,13 @@ package kopia
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"runtime/trace"
|
"runtime/trace"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@ -204,6 +207,19 @@ func (cp *corsoProgress) FinishedHashingFile(fname string, bs int64) {
|
|||||||
// Pass the call through as well so we don't break expected functionality.
|
// Pass the call through as well so we don't break expected functionality.
|
||||||
defer cp.UploadProgress.FinishedHashingFile(fname, bs)
|
defer cp.UploadProgress.FinishedHashingFile(fname, bs)
|
||||||
|
|
||||||
|
sl := strings.Split(fname, "/")
|
||||||
|
|
||||||
|
for i := range sl {
|
||||||
|
rdt, err := base64.StdEncoding.DecodeString(sl[i])
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("f did not decode")
|
||||||
|
}
|
||||||
|
|
||||||
|
sl[i] = string(rdt)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(context.Background()).Debugw("finished hashing file", "path", sl[2:])
|
||||||
|
|
||||||
atomic.AddInt64(&cp.totalBytes, bs)
|
atomic.AddInt64(&cp.totalBytes, bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -119,6 +119,7 @@ func (w Wrapper) BackupCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
previousSnapshots []IncrementalBase,
|
previousSnapshots []IncrementalBase,
|
||||||
collections []data.Collection,
|
collections []data.Collection,
|
||||||
|
globalExcludeSet map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
) (*BackupStats, *details.Builder, map[string]path.Path, error) {
|
) (*BackupStats, *details.Builder, map[string]path.Path, error) {
|
||||||
@ -129,10 +130,6 @@ func (w Wrapper) BackupCollections(
|
|||||||
ctx, end := D.Span(ctx, "kopia:backupCollections")
|
ctx, end := D.Span(ctx, "kopia:backupCollections")
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
// TODO(ashmrtn): Make this a parameter when actually enabling the global
|
|
||||||
// exclude set.
|
|
||||||
var globalExcludeSet map[string]struct{}
|
|
||||||
|
|
||||||
if len(collections) == 0 && len(globalExcludeSet) == 0 {
|
if len(collections) == 0 && len(globalExcludeSet) == 0 {
|
||||||
return &BackupStats{}, &details.Builder{}, nil, nil
|
return &BackupStats{}, &details.Builder{}, nil, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -266,6 +266,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
|||||||
suite.ctx,
|
suite.ctx,
|
||||||
prevSnaps,
|
prevSnaps,
|
||||||
collections,
|
collections,
|
||||||
|
nil,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
@ -353,6 +354,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
ctx,
|
ctx,
|
||||||
nil,
|
nil,
|
||||||
[]data.Collection{dc1, dc2},
|
[]data.Collection{dc1, dc2},
|
||||||
|
nil,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
@ -435,6 +437,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
suite.ctx,
|
suite.ctx,
|
||||||
nil,
|
nil,
|
||||||
collections,
|
collections,
|
||||||
|
nil,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
@ -447,6 +450,22 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
assert.False(t, stats.Incomplete)
|
assert.False(t, stats.Incomplete)
|
||||||
// 5 file and 6 folder entries.
|
// 5 file and 6 folder entries.
|
||||||
assert.Len(t, deets.Details().Entries, 5+6)
|
assert.Len(t, deets.Details().Entries, 5+6)
|
||||||
|
|
||||||
|
failedPath, err := suite.testPath2.Append(testFileName4, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ic := i64counter{}
|
||||||
|
|
||||||
|
_, err = suite.w.RestoreMultipleItems(
|
||||||
|
suite.ctx,
|
||||||
|
string(stats.SnapshotID),
|
||||||
|
[]path.Path{failedPath},
|
||||||
|
&ic,
|
||||||
|
)
|
||||||
|
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
||||||
|
// may run into kopia-assisted incrementals issues because only mod time and
|
||||||
|
// not file size is checked for StreamingFiles.
|
||||||
|
assert.ErrorIs(t, err, ErrNotFound, "errored file is restorable")
|
||||||
}
|
}
|
||||||
|
|
||||||
type backedupFile struct {
|
type backedupFile struct {
|
||||||
@ -480,6 +499,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
|
|||||||
nil,
|
nil,
|
||||||
test.collections,
|
test.collections,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -637,6 +657,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
suite.ctx,
|
suite.ctx,
|
||||||
nil,
|
nil,
|
||||||
collections,
|
collections,
|
||||||
|
nil,
|
||||||
tags,
|
tags,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
@ -665,6 +686,136 @@ func (c *i64counter) Count(i int64) {
|
|||||||
c.i += i
|
c.i += i
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||||
|
reason := Reason{
|
||||||
|
ResourceOwner: testUser,
|
||||||
|
Service: path.ExchangeService,
|
||||||
|
Category: path.EmailCategory,
|
||||||
|
}
|
||||||
|
|
||||||
|
subtreePathTmp, err := path.Builder{}.Append("tmp").ToDataLayerExchangePathForCategory(
|
||||||
|
testTenant,
|
||||||
|
testUser,
|
||||||
|
path.EmailCategory,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
subtreePath := subtreePathTmp.ToBuilder().Dir()
|
||||||
|
|
||||||
|
manifests, err := suite.w.FetchPrevSnapshotManifests(
|
||||||
|
suite.ctx,
|
||||||
|
[]Reason{reason},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
require.Len(suite.T(), manifests, 1)
|
||||||
|
require.Equal(suite.T(), suite.snapshotID, manifests[0].ID)
|
||||||
|
|
||||||
|
tags := map[string]string{}
|
||||||
|
|
||||||
|
for _, k := range reason.TagKeys() {
|
||||||
|
tags[k] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
excludeItem bool
|
||||||
|
expectedCachedItems int
|
||||||
|
expectedUncachedItems int
|
||||||
|
cols func() []data.Collection
|
||||||
|
backupIDCheck require.ValueAssertionFunc
|
||||||
|
restoreCheck assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "ExcludeItem",
|
||||||
|
excludeItem: true,
|
||||||
|
expectedCachedItems: len(suite.filesByPath) - 1,
|
||||||
|
expectedUncachedItems: 0,
|
||||||
|
cols: func() []data.Collection {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
backupIDCheck: require.NotEmpty,
|
||||||
|
restoreCheck: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoExcludeItemNoChanges",
|
||||||
|
// No snapshot should be made since there were no changes.
|
||||||
|
expectedCachedItems: 0,
|
||||||
|
expectedUncachedItems: 0,
|
||||||
|
cols: func() []data.Collection {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
// Backup doesn't run.
|
||||||
|
backupIDCheck: require.Empty,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoExcludeItemWithChanges",
|
||||||
|
expectedCachedItems: len(suite.filesByPath),
|
||||||
|
expectedUncachedItems: 1,
|
||||||
|
cols: func() []data.Collection {
|
||||||
|
c := mockconnector.NewMockExchangeCollection(
|
||||||
|
suite.testPath1,
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
c.ColState = data.NotMovedState
|
||||||
|
|
||||||
|
return []data.Collection{c}
|
||||||
|
},
|
||||||
|
backupIDCheck: require.NotEmpty,
|
||||||
|
restoreCheck: assert.NoError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
var excluded map[string]struct{}
|
||||||
|
if test.excludeItem {
|
||||||
|
excluded = map[string]struct{}{
|
||||||
|
suite.files[suite.testPath1.String()][0].itemPath.Item(): {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, _, _, err := suite.w.BackupCollections(
|
||||||
|
suite.ctx,
|
||||||
|
[]IncrementalBase{
|
||||||
|
{
|
||||||
|
Manifest: manifests[0].Manifest,
|
||||||
|
SubtreePaths: []*path.Builder{
|
||||||
|
subtreePath,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
test.cols(),
|
||||||
|
excluded,
|
||||||
|
tags,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, test.expectedCachedItems, stats.CachedFileCount)
|
||||||
|
assert.Equal(t, test.expectedUncachedItems, stats.UncachedFileCount)
|
||||||
|
|
||||||
|
test.backupIDCheck(t, stats.SnapshotID)
|
||||||
|
|
||||||
|
if len(stats.SnapshotID) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ic := i64counter{}
|
||||||
|
|
||||||
|
_, err = suite.w.RestoreMultipleItems(
|
||||||
|
suite.ctx,
|
||||||
|
string(stats.SnapshotID),
|
||||||
|
[]path.Path{
|
||||||
|
suite.files[suite.testPath1.String()][0].itemPath,
|
||||||
|
},
|
||||||
|
&ic,
|
||||||
|
)
|
||||||
|
test.restoreCheck(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
||||||
doesntExist, err := path.Builder{}.Append("subdir", "foo").ToDataLayerExchangePathForCategory(
|
doesntExist, err := path.Builder{}.Append("subdir", "foo").ToDataLayerExchangePathForCategory(
|
||||||
testTenant,
|
testTenant,
|
||||||
|
|||||||
@ -138,8 +138,9 @@ const (
|
|||||||
// Progress Updates
|
// Progress Updates
|
||||||
|
|
||||||
// Message is used to display a progress message
|
// Message is used to display a progress message
|
||||||
func Message(ctx context.Context, message string) {
|
func Message(ctx context.Context, msg cleanable) {
|
||||||
logger.Ctx(ctx).Info(message)
|
logger.Ctx(ctx).Info(msg.clean())
|
||||||
|
message := msg.String()
|
||||||
|
|
||||||
if cfg.hidden() {
|
if cfg.hidden() {
|
||||||
return
|
return
|
||||||
@ -163,9 +164,15 @@ func Message(ctx context.Context, message string) {
|
|||||||
|
|
||||||
// MessageWithCompletion is used to display progress with a spinner
|
// MessageWithCompletion is used to display progress with a spinner
|
||||||
// that switches to "done" when the completion channel is signalled
|
// that switches to "done" when the completion channel is signalled
|
||||||
func MessageWithCompletion(ctx context.Context, message string) (chan<- struct{}, func()) {
|
func MessageWithCompletion(
|
||||||
|
ctx context.Context,
|
||||||
|
msg cleanable,
|
||||||
|
) (chan<- struct{}, func()) {
|
||||||
|
clean := msg.clean()
|
||||||
|
message := msg.String()
|
||||||
|
|
||||||
log := logger.Ctx(ctx)
|
log := logger.Ctx(ctx)
|
||||||
log.Info(message)
|
log.Info(clean)
|
||||||
|
|
||||||
completionCh := make(chan struct{}, 1)
|
completionCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
@ -201,7 +208,7 @@ func MessageWithCompletion(ctx context.Context, message string) (chan<- struct{}
|
|||||||
}(completionCh)
|
}(completionCh)
|
||||||
|
|
||||||
wacb := waitAndCloseBar(bar, func() {
|
wacb := waitAndCloseBar(bar, func() {
|
||||||
log.Info("done - " + message)
|
log.Info("done - " + clean)
|
||||||
})
|
})
|
||||||
|
|
||||||
return completionCh, wacb
|
return completionCh, wacb
|
||||||
@ -217,10 +224,11 @@ func MessageWithCompletion(ctx context.Context, message string) (chan<- struct{}
|
|||||||
func ItemProgress(
|
func ItemProgress(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
rc io.ReadCloser,
|
rc io.ReadCloser,
|
||||||
header, iname string,
|
header string,
|
||||||
|
iname cleanable,
|
||||||
totalBytes int64,
|
totalBytes int64,
|
||||||
) (io.ReadCloser, func()) {
|
) (io.ReadCloser, func()) {
|
||||||
log := logger.Ctx(ctx).With("item", iname, "size", humanize.Bytes(uint64(totalBytes)))
|
log := logger.Ctx(ctx).With("item", iname.clean(), "size", humanize.Bytes(uint64(totalBytes)))
|
||||||
log.Debug(header)
|
log.Debug(header)
|
||||||
|
|
||||||
if cfg.hidden() || rc == nil || totalBytes == 0 {
|
if cfg.hidden() || rc == nil || totalBytes == 0 {
|
||||||
@ -232,7 +240,7 @@ func ItemProgress(
|
|||||||
barOpts := []mpb.BarOption{
|
barOpts := []mpb.BarOption{
|
||||||
mpb.PrependDecorators(
|
mpb.PrependDecorators(
|
||||||
decor.Name(header, decor.WCSyncSpaceR),
|
decor.Name(header, decor.WCSyncSpaceR),
|
||||||
decor.Name(iname, decor.WCSyncSpaceR),
|
decor.Name(iname.String(), decor.WCSyncSpaceR),
|
||||||
decor.CountersKibiByte(" %.1f/%.1f ", decor.WC{W: 8}),
|
decor.CountersKibiByte(" %.1f/%.1f ", decor.WC{W: 8}),
|
||||||
decor.NewPercentage("%d ", decor.WC{W: 4}),
|
decor.NewPercentage("%d ", decor.WC{W: 4}),
|
||||||
),
|
),
|
||||||
@ -256,9 +264,14 @@ func ItemProgress(
|
|||||||
// of the specified count.
|
// of the specified count.
|
||||||
// Each write to the provided channel counts as a single increment.
|
// Each write to the provided channel counts as a single increment.
|
||||||
// The caller is expected to close the channel.
|
// The caller is expected to close the channel.
|
||||||
func ProgressWithCount(ctx context.Context, header, message string, count int64) (chan<- struct{}, func()) {
|
func ProgressWithCount(
|
||||||
|
ctx context.Context,
|
||||||
|
header string,
|
||||||
|
message cleanable,
|
||||||
|
count int64,
|
||||||
|
) (chan<- struct{}, func()) {
|
||||||
log := logger.Ctx(ctx)
|
log := logger.Ctx(ctx)
|
||||||
lmsg := fmt.Sprintf("%s %s - %d", header, message, count)
|
lmsg := fmt.Sprintf("%s %s - %d", header, message.clean(), count)
|
||||||
log.Info(lmsg)
|
log.Info(lmsg)
|
||||||
|
|
||||||
progressCh := make(chan struct{})
|
progressCh := make(chan struct{})
|
||||||
@ -281,7 +294,7 @@ func ProgressWithCount(ctx context.Context, header, message string, count int64)
|
|||||||
barOpts := []mpb.BarOption{
|
barOpts := []mpb.BarOption{
|
||||||
mpb.PrependDecorators(
|
mpb.PrependDecorators(
|
||||||
decor.Name(header, decor.WCSyncSpaceR),
|
decor.Name(header, decor.WCSyncSpaceR),
|
||||||
decor.Name(message),
|
decor.Name(message.String()),
|
||||||
decor.Counters(0, " %d/%d "),
|
decor.Counters(0, " %d/%d "),
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@ -355,13 +368,17 @@ func makeSpinFrames(barWidth int) {
|
|||||||
// counts as a single increment. The caller is expected to close the channel.
|
// counts as a single increment. The caller is expected to close the channel.
|
||||||
func CollectionProgress(
|
func CollectionProgress(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
user, category, dirName string,
|
category string,
|
||||||
|
user, dirName cleanable,
|
||||||
) (chan<- struct{}, func()) {
|
) (chan<- struct{}, func()) {
|
||||||
log := logger.Ctx(ctx).With("user", user, "category", category, "dir", dirName)
|
log := logger.Ctx(ctx).With(
|
||||||
message := "Collecting " + dirName
|
"user", user.clean(),
|
||||||
|
"category", category,
|
||||||
|
"dir", dirName.clean())
|
||||||
|
message := "Collecting Directory"
|
||||||
log.Info(message)
|
log.Info(message)
|
||||||
|
|
||||||
if cfg.hidden() || len(user) == 0 || len(dirName) == 0 {
|
if cfg.hidden() || len(user.String()) == 0 || len(dirName.String()) == 0 {
|
||||||
ch := make(chan struct{})
|
ch := make(chan struct{})
|
||||||
|
|
||||||
go func(ci <-chan struct{}) {
|
go func(ci <-chan struct{}) {
|
||||||
@ -379,7 +396,7 @@ func CollectionProgress(
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
barOpts := []mpb.BarOption{
|
barOpts := []mpb.BarOption{
|
||||||
mpb.PrependDecorators(decor.Name(category)),
|
mpb.PrependDecorators(decor.Name(string(category))),
|
||||||
mpb.AppendDecorators(
|
mpb.AppendDecorators(
|
||||||
decor.CurrentNoUnit("%d - ", decor.WCSyncSpace),
|
decor.CurrentNoUnit("%d - ", decor.WCSyncSpace),
|
||||||
decor.Name(fmt.Sprintf("%s - %s", user, dirName)),
|
decor.Name(fmt.Sprintf("%s - %s", user, dirName)),
|
||||||
@ -439,8 +456,65 @@ func waitAndCloseBar(bar *mpb.Bar, log func()) func() {
|
|||||||
// other funcs
|
// other funcs
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
// Bulletf prepends the message with "∙ ", and formats it.
|
const Bullet = "∙"
|
||||||
// Ex: Bulletf("%s", "foo") => "∙ foo"
|
|
||||||
func Bulletf(template string, vs ...any) string {
|
// ---------------------------------------------------------------------------
|
||||||
return fmt.Sprintf("∙ "+template, vs...)
|
// PII redaction
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type cleanable interface {
|
||||||
|
clean() string
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PII string
|
||||||
|
|
||||||
|
func (p PII) clean() string {
|
||||||
|
return "***"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PII) String() string {
|
||||||
|
return string(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Safe string
|
||||||
|
|
||||||
|
func (s Safe) clean() string {
|
||||||
|
return string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Safe) String() string {
|
||||||
|
return string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bulletPII struct {
|
||||||
|
tmpl string
|
||||||
|
vars []cleanable
|
||||||
|
}
|
||||||
|
|
||||||
|
func Bulletf(template string, vs ...cleanable) bulletPII {
|
||||||
|
return bulletPII{
|
||||||
|
tmpl: "∙ " + template,
|
||||||
|
vars: vs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bulletPII) clean() string {
|
||||||
|
vs := make([]any, 0, len(b.vars))
|
||||||
|
|
||||||
|
for _, v := range b.vars {
|
||||||
|
vs = append(vs, v.clean())
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(b.tmpl, vs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bulletPII) String() string {
|
||||||
|
vs := make([]any, 0, len(b.vars))
|
||||||
|
|
||||||
|
for _, v := range b.vars {
|
||||||
|
vs = append(vs, v.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(b.tmpl, vs...)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,6 +26,12 @@ func TestObserveProgressUnitSuite(t *testing.T) {
|
|||||||
suite.Run(t, new(ObserveProgressUnitSuite))
|
suite.Run(t, new(ObserveProgressUnitSuite))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
tst = observe.Safe("test")
|
||||||
|
testcat = observe.Safe("testcat")
|
||||||
|
testertons = observe.Safe("testertons")
|
||||||
|
)
|
||||||
|
|
||||||
func (suite *ObserveProgressUnitSuite) TestItemProgress() {
|
func (suite *ObserveProgressUnitSuite) TestItemProgress() {
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
@ -47,7 +53,7 @@ func (suite *ObserveProgressUnitSuite) TestItemProgress() {
|
|||||||
ctx,
|
ctx,
|
||||||
io.NopCloser(bytes.NewReader(from)),
|
io.NopCloser(bytes.NewReader(from)),
|
||||||
"folder",
|
"folder",
|
||||||
"test",
|
tst,
|
||||||
100)
|
100)
|
||||||
require.NotNil(t, prog)
|
require.NotNil(t, prog)
|
||||||
require.NotNil(t, closer)
|
require.NotNil(t, closer)
|
||||||
@ -97,7 +103,7 @@ func (suite *ObserveProgressUnitSuite) TestCollectionProgress_unblockOnCtxCancel
|
|||||||
observe.SeedWriter(context.Background(), nil, nil)
|
observe.SeedWriter(context.Background(), nil, nil)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
progCh, closer := observe.CollectionProgress(ctx, "test", "testcat", "testertons")
|
progCh, closer := observe.CollectionProgress(ctx, "test", testcat, testertons)
|
||||||
require.NotNil(t, progCh)
|
require.NotNil(t, progCh)
|
||||||
require.NotNil(t, closer)
|
require.NotNil(t, closer)
|
||||||
|
|
||||||
@ -132,7 +138,7 @@ func (suite *ObserveProgressUnitSuite) TestCollectionProgress_unblockOnChannelCl
|
|||||||
observe.SeedWriter(context.Background(), nil, nil)
|
observe.SeedWriter(context.Background(), nil, nil)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
progCh, closer := observe.CollectionProgress(ctx, "test", "testcat", "testertons")
|
progCh, closer := observe.CollectionProgress(ctx, "test", testcat, testertons)
|
||||||
require.NotNil(t, progCh)
|
require.NotNil(t, progCh)
|
||||||
require.NotNil(t, closer)
|
require.NotNil(t, closer)
|
||||||
|
|
||||||
@ -164,7 +170,7 @@ func (suite *ObserveProgressUnitSuite) TestObserveProgress() {
|
|||||||
|
|
||||||
message := "Test Message"
|
message := "Test Message"
|
||||||
|
|
||||||
observe.Message(ctx, message)
|
observe.Message(ctx, observe.Safe(message))
|
||||||
observe.Complete()
|
observe.Complete()
|
||||||
require.NotEmpty(suite.T(), recorder.String())
|
require.NotEmpty(suite.T(), recorder.String())
|
||||||
require.Contains(suite.T(), recorder.String(), message)
|
require.Contains(suite.T(), recorder.String(), message)
|
||||||
@ -185,7 +191,7 @@ func (suite *ObserveProgressUnitSuite) TestObserveProgressWithCompletion() {
|
|||||||
|
|
||||||
message := "Test Message"
|
message := "Test Message"
|
||||||
|
|
||||||
ch, closer := observe.MessageWithCompletion(ctx, message)
|
ch, closer := observe.MessageWithCompletion(ctx, observe.Safe(message))
|
||||||
|
|
||||||
// Trigger completion
|
// Trigger completion
|
||||||
ch <- struct{}{}
|
ch <- struct{}{}
|
||||||
@ -215,7 +221,7 @@ func (suite *ObserveProgressUnitSuite) TestObserveProgressWithChannelClosed() {
|
|||||||
|
|
||||||
message := "Test Message"
|
message := "Test Message"
|
||||||
|
|
||||||
ch, closer := observe.MessageWithCompletion(ctx, message)
|
ch, closer := observe.MessageWithCompletion(ctx, observe.Safe(message))
|
||||||
|
|
||||||
// Close channel without completing
|
// Close channel without completing
|
||||||
close(ch)
|
close(ch)
|
||||||
@ -247,7 +253,7 @@ func (suite *ObserveProgressUnitSuite) TestObserveProgressWithContextCancelled()
|
|||||||
|
|
||||||
message := "Test Message"
|
message := "Test Message"
|
||||||
|
|
||||||
_, closer := observe.MessageWithCompletion(ctx, message)
|
_, closer := observe.MessageWithCompletion(ctx, observe.Safe(message))
|
||||||
|
|
||||||
// cancel context
|
// cancel context
|
||||||
cancel()
|
cancel()
|
||||||
@ -278,7 +284,7 @@ func (suite *ObserveProgressUnitSuite) TestObserveProgressWithCount() {
|
|||||||
message := "Test Message"
|
message := "Test Message"
|
||||||
count := 3
|
count := 3
|
||||||
|
|
||||||
ch, closer := observe.ProgressWithCount(ctx, header, message, int64(count))
|
ch, closer := observe.ProgressWithCount(ctx, header, observe.Safe(message), int64(count))
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ch <- struct{}{}
|
ch <- struct{}{}
|
||||||
@ -311,7 +317,7 @@ func (suite *ObserveProgressUnitSuite) TestObserveProgressWithCountChannelClosed
|
|||||||
message := "Test Message"
|
message := "Test Message"
|
||||||
count := 3
|
count := 3
|
||||||
|
|
||||||
ch, closer := observe.ProgressWithCount(ctx, header, message, int64(count))
|
ch, closer := observe.ProgressWithCount(ctx, header, observe.Safe(message), int64(count))
|
||||||
|
|
||||||
close(ch)
|
close(ch)
|
||||||
|
|
||||||
|
|||||||
@ -44,7 +44,7 @@ type BackupOperation struct {
|
|||||||
|
|
||||||
// BackupResults aggregate the details of the result of the operation.
|
// BackupResults aggregate the details of the result of the operation.
|
||||||
type BackupResults struct {
|
type BackupResults struct {
|
||||||
stats.Errs
|
stats.Errs // deprecated in place of fault.Errors in the base operation.
|
||||||
stats.ReadWrites
|
stats.ReadWrites
|
||||||
stats.StartAndEndTime
|
stats.StartAndEndTime
|
||||||
BackupID model.StableID `json:"backupID"`
|
BackupID model.StableID `json:"backupID"`
|
||||||
@ -90,7 +90,6 @@ type backupStats struct {
|
|||||||
k *kopia.BackupStats
|
k *kopia.BackupStats
|
||||||
gc *support.ConnectorOperationStatus
|
gc *support.ConnectorOperationStatus
|
||||||
resourceCount int
|
resourceCount int
|
||||||
started bool
|
|
||||||
readErr, writeErr error
|
readErr, writeErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,7 +227,6 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// should always be 1, since backups are 1:1 with resourceOwners.
|
// should always be 1, since backups are 1:1 with resourceOwners.
|
||||||
opStats.resourceCount = 1
|
opStats.resourceCount = 1
|
||||||
opStats.started = true
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -256,14 +254,18 @@ func produceBackupDataCollections(
|
|||||||
metadata []data.Collection,
|
metadata []data.Collection,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.Collection, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, "Discovering items to backup")
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup"))
|
||||||
defer func() {
|
defer func() {
|
||||||
complete <- struct{}{}
|
complete <- struct{}{}
|
||||||
close(complete)
|
close(complete)
|
||||||
closer()
|
closer()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return gc.DataCollections(ctx, sel, metadata, ctrlOpts)
|
// TODO(ashmrtn): When we're ready to wire up the global exclude list return
|
||||||
|
// all values.
|
||||||
|
cols, _, errs := gc.DataCollections(ctx, sel, metadata, ctrlOpts)
|
||||||
|
|
||||||
|
return cols, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -275,6 +277,7 @@ type backuper interface {
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bases []kopia.IncrementalBase,
|
bases []kopia.IncrementalBase,
|
||||||
cs []data.Collection,
|
cs []data.Collection,
|
||||||
|
excluded map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error)
|
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error)
|
||||||
@ -338,7 +341,7 @@ func consumeBackupDataCollections(
|
|||||||
backupID model.StableID,
|
backupID model.StableID,
|
||||||
isIncremental bool,
|
isIncremental bool,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
|
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, "Backing up data")
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data"))
|
||||||
defer func() {
|
defer func() {
|
||||||
complete <- struct{}{}
|
complete <- struct{}{}
|
||||||
close(complete)
|
close(complete)
|
||||||
@ -400,7 +403,33 @@ func consumeBackupDataCollections(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bu.BackupCollections(ctx, bases, cs, tags, isIncremental)
|
kopiaStats, deets, itemsSourcedFromBase, err := bu.BackupCollections(
|
||||||
|
ctx,
|
||||||
|
bases,
|
||||||
|
cs,
|
||||||
|
nil,
|
||||||
|
tags,
|
||||||
|
isIncremental,
|
||||||
|
)
|
||||||
|
|
||||||
|
if kopiaStats.ErrorCount > 0 || kopiaStats.IgnoredErrorCount > 0 {
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(
|
||||||
|
err,
|
||||||
|
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
|
||||||
|
kopiaStats.ErrorCount,
|
||||||
|
kopiaStats.IgnoredErrorCount,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
err = errors.Errorf(
|
||||||
|
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
|
||||||
|
kopiaStats.ErrorCount,
|
||||||
|
kopiaStats.IgnoredErrorCount,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return kopiaStats, deets, itemsSourcedFromBase, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchesReason(reasons []kopia.Reason, p path.Path) bool {
|
func matchesReason(reasons []kopia.Reason, p path.Path) bool {
|
||||||
@ -531,9 +560,12 @@ func (op *BackupOperation) persistResults(
|
|||||||
) error {
|
) error {
|
||||||
op.Results.StartedAt = started
|
op.Results.StartedAt = started
|
||||||
op.Results.CompletedAt = time.Now()
|
op.Results.CompletedAt = time.Now()
|
||||||
|
op.Results.ReadErrors = opStats.readErr
|
||||||
|
op.Results.WriteErrors = opStats.writeErr
|
||||||
|
|
||||||
op.Status = Completed
|
op.Status = Completed
|
||||||
if !opStats.started {
|
|
||||||
|
if opStats.readErr != nil || opStats.writeErr != nil {
|
||||||
op.Status = Failed
|
op.Status = Failed
|
||||||
|
|
||||||
return multierror.Append(
|
return multierror.Append(
|
||||||
@ -546,9 +578,6 @@ func (op *BackupOperation) persistResults(
|
|||||||
op.Status = NoData
|
op.Status = NoData
|
||||||
}
|
}
|
||||||
|
|
||||||
op.Results.ReadErrors = opStats.readErr
|
|
||||||
op.Results.WriteErrors = opStats.writeErr
|
|
||||||
|
|
||||||
op.Results.BytesRead = opStats.k.TotalHashedBytes
|
op.Results.BytesRead = opStats.k.TotalHashedBytes
|
||||||
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
|
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
|
||||||
op.Results.ItemsRead = opStats.gc.Successful
|
op.Results.ItemsRead = opStats.gc.Successful
|
||||||
@ -580,6 +609,7 @@ func (op *BackupOperation) createBackupModels(
|
|||||||
op.Selectors,
|
op.Selectors,
|
||||||
op.Results.ReadWrites,
|
op.Results.ReadWrites,
|
||||||
op.Results.StartAndEndTime,
|
op.Results.StartAndEndTime,
|
||||||
|
op.Errors,
|
||||||
)
|
)
|
||||||
|
|
||||||
err = op.store.Put(ctx, model.BackupSchema, b)
|
err = op.store.Put(ctx, model.BackupSchema, b)
|
||||||
|
|||||||
@ -153,6 +153,8 @@ func runAndCheckBackup(
|
|||||||
assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read")
|
assert.Less(t, int64(0), bo.Results.BytesRead, "bytes read")
|
||||||
assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded")
|
assert.Less(t, int64(0), bo.Results.BytesUploaded, "bytes uploaded")
|
||||||
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
|
assert.Equal(t, 1, bo.Results.ResourceOwners, "count of resource owners")
|
||||||
|
assert.NoError(t, bo.Errors.Err(), "incremental non-recoverable error")
|
||||||
|
assert.Empty(t, bo.Errors.Errs(), "incremental recoverable/iteration errors")
|
||||||
assert.NoError(t, bo.Results.ReadErrors, "errors reading data")
|
assert.NoError(t, bo.Results.ReadErrors, "errors reading data")
|
||||||
assert.NoError(t, bo.Results.WriteErrors, "errors writing data")
|
assert.NoError(t, bo.Results.WriteErrors, "errors writing data")
|
||||||
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
|
assert.Equal(t, 1, mb.TimesCalled[events.BackupStart], "backup-start events")
|
||||||
@ -616,6 +618,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
|
|||||||
assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
|
assert.Greater(t, bo.Results.BytesRead, incBO.Results.BytesRead, "incremental bytes read")
|
||||||
assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
|
assert.Greater(t, bo.Results.BytesUploaded, incBO.Results.BytesUploaded, "incremental bytes uploaded")
|
||||||
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
|
assert.Equal(t, bo.Results.ResourceOwners, incBO.Results.ResourceOwners, "incremental backup resource owner")
|
||||||
|
assert.NoError(t, incBO.Errors.Err(), "incremental non-recoverable error")
|
||||||
|
assert.Empty(t, incBO.Errors.Errs(), "count incremental recoverable/iteration errors")
|
||||||
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
||||||
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
||||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
||||||
@ -633,6 +637,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
tester.LogTimeOfTest(suite.T())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
acct = tester.NewM365Account(t)
|
acct = tester.NewM365Account(t)
|
||||||
@ -655,7 +661,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
m365, err := acct.M365Config()
|
m365, err := acct.M365Config()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ac, err := api.NewClient(m365)
|
ac, err := api.NewClient(m365)
|
||||||
@ -803,7 +809,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
{
|
{
|
||||||
name: "move an email folder to a subfolder",
|
name: "move an email folder to a subfolder",
|
||||||
updateUserData: func(t *testing.T) {
|
updateUserData: func(t *testing.T) {
|
||||||
// contacts cannot be sufoldered; this is an email-only change
|
// contacts and events cannot be sufoldered; this is an email-only change
|
||||||
toContainer := dataset[path.EmailCategory].dests[container1].containerID
|
toContainer := dataset[path.EmailCategory].dests[container1].containerID
|
||||||
fromContainer := dataset[path.EmailCategory].dests[container2].containerID
|
fromContainer := dataset[path.EmailCategory].dests[container2].containerID
|
||||||
|
|
||||||
@ -826,23 +832,22 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
updateUserData: func(t *testing.T) {
|
updateUserData: func(t *testing.T) {
|
||||||
for category, d := range dataset {
|
for category, d := range dataset {
|
||||||
containerID := d.dests[container2].containerID
|
containerID := d.dests[container2].containerID
|
||||||
cli := gc.Service.Client().UsersById(suite.user)
|
|
||||||
|
|
||||||
switch category {
|
switch category {
|
||||||
case path.EmailCategory:
|
case path.EmailCategory:
|
||||||
require.NoError(
|
require.NoError(
|
||||||
t,
|
t,
|
||||||
cli.MailFoldersById(containerID).Delete(ctx, nil),
|
ac.Mail().DeleteContainer(ctx, suite.user, containerID),
|
||||||
"deleting an email folder")
|
"deleting an email folder")
|
||||||
case path.ContactsCategory:
|
case path.ContactsCategory:
|
||||||
require.NoError(
|
require.NoError(
|
||||||
t,
|
t,
|
||||||
cli.ContactFoldersById(containerID).Delete(ctx, nil),
|
ac.Contacts().DeleteContainer(ctx, suite.user, containerID),
|
||||||
"deleting a contacts folder")
|
"deleting a contacts folder")
|
||||||
case path.EventsCategory:
|
case path.EventsCategory:
|
||||||
require.NoError(
|
require.NoError(
|
||||||
t,
|
t,
|
||||||
cli.CalendarsById(containerID).Delete(ctx, nil),
|
ac.Events().DeleteContainer(ctx, suite.user, containerID),
|
||||||
"deleting a calendar")
|
"deleting a calendar")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -923,19 +928,19 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
require.NoError(t, err, "updating contact folder name")
|
require.NoError(t, err, "updating contact folder name")
|
||||||
|
|
||||||
case path.EventsCategory:
|
case path.EventsCategory:
|
||||||
ccf := cli.CalendarsById(containerID)
|
cbi := cli.CalendarsById(containerID)
|
||||||
|
|
||||||
body, err := ccf.Get(ctx, nil)
|
body, err := cbi.Get(ctx, nil)
|
||||||
require.NoError(t, err, "getting calendar")
|
require.NoError(t, err, "getting calendar")
|
||||||
|
|
||||||
body.SetName(&containerRename)
|
body.SetName(&containerRename)
|
||||||
_, err = ccf.Patch(ctx, body, nil)
|
_, err = cbi.Patch(ctx, body, nil)
|
||||||
require.NoError(t, err, "updating calendar name")
|
require.NoError(t, err, "updating calendar name")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0, // containers are not counted as reads
|
||||||
itemsWritten: 4,
|
itemsWritten: 4, // two items per category
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "add a new item",
|
name: "add a new item",
|
||||||
@ -1038,6 +1043,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
|
// +4 on read/writes to account for metadata: 1 delta and 1 path for each type.
|
||||||
assert.Equal(t, test.itemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
|
assert.Equal(t, test.itemsWritten+4, incBO.Results.ItemsWritten, "incremental items written")
|
||||||
assert.Equal(t, test.itemsRead+4, incBO.Results.ItemsRead, "incremental items read")
|
assert.Equal(t, test.itemsRead+4, incBO.Results.ItemsRead, "incremental items read")
|
||||||
|
assert.NoError(t, incBO.Errors.Err(), "incremental non-recoverable error")
|
||||||
|
assert.Empty(t, incBO.Errors.Errs(), "incremental recoverable/iteration errors")
|
||||||
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
assert.NoError(t, incBO.Results.ReadErrors, "incremental read errors")
|
||||||
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
assert.NoError(t, incBO.Results.WriteErrors, "incremental write errors")
|
||||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
||||||
|
|||||||
@ -95,6 +95,7 @@ func (mbu mockBackuper) BackupCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bases []kopia.IncrementalBase,
|
bases []kopia.IncrementalBase,
|
||||||
cs []data.Collection,
|
cs []data.Collection,
|
||||||
|
excluded map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
|
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
|
||||||
@ -372,7 +373,6 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
|||||||
expectStatus: Completed,
|
expectStatus: Completed,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
stats: backupStats{
|
stats: backupStats{
|
||||||
started: true,
|
|
||||||
resourceCount: 1,
|
resourceCount: 1,
|
||||||
k: &kopia.BackupStats{
|
k: &kopia.BackupStats{
|
||||||
TotalFileCount: 1,
|
TotalFileCount: 1,
|
||||||
@ -388,7 +388,7 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
|||||||
expectStatus: Failed,
|
expectStatus: Failed,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
stats: backupStats{
|
stats: backupStats{
|
||||||
started: false,
|
readErr: assert.AnError,
|
||||||
k: &kopia.BackupStats{},
|
k: &kopia.BackupStats{},
|
||||||
gc: &support.ConnectorOperationStatus{},
|
gc: &support.ConnectorOperationStatus{},
|
||||||
},
|
},
|
||||||
@ -397,9 +397,8 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
|||||||
expectStatus: NoData,
|
expectStatus: NoData,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
stats: backupStats{
|
stats: backupStats{
|
||||||
started: true,
|
k: &kopia.BackupStats{},
|
||||||
k: &kopia.BackupStats{},
|
gc: &support.ConnectorOperationStatus{},
|
||||||
gc: &support.ConnectorOperationStatus{},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -421,11 +420,11 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
|||||||
|
|
||||||
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
||||||
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsRead, "items read")
|
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsRead, "items read")
|
||||||
assert.Equal(t, test.stats.readErr, op.Results.ReadErrors, "read errors")
|
|
||||||
assert.Equal(t, test.stats.k.TotalFileCount, op.Results.ItemsWritten, "items written")
|
assert.Equal(t, test.stats.k.TotalFileCount, op.Results.ItemsWritten, "items written")
|
||||||
assert.Equal(t, test.stats.k.TotalHashedBytes, op.Results.BytesRead, "bytes read")
|
assert.Equal(t, test.stats.k.TotalHashedBytes, op.Results.BytesRead, "bytes read")
|
||||||
assert.Equal(t, test.stats.k.TotalUploadedBytes, op.Results.BytesUploaded, "bytes written")
|
assert.Equal(t, test.stats.k.TotalUploadedBytes, op.Results.BytesUploaded, "bytes written")
|
||||||
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
||||||
|
assert.Equal(t, test.stats.readErr, op.Results.ReadErrors, "read errors")
|
||||||
assert.Equal(t, test.stats.writeErr, op.Results.WriteErrors, "write errors")
|
assert.Equal(t, test.stats.writeErr, op.Results.WriteErrors, "write errors")
|
||||||
assert.Equal(t, now, op.Results.StartedAt, "started at")
|
assert.Equal(t, now, op.Results.StartedAt, "started at")
|
||||||
assert.Less(t, now, op.Results.CompletedAt, "completed at")
|
assert.Less(t, now, op.Results.CompletedAt, "completed at")
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/store"
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
)
|
)
|
||||||
@ -52,9 +53,11 @@ const (
|
|||||||
// Specific processes (eg: backups, restores, etc) are expected to wrap operation
|
// Specific processes (eg: backups, restores, etc) are expected to wrap operation
|
||||||
// with process specific details.
|
// with process specific details.
|
||||||
type operation struct {
|
type operation struct {
|
||||||
CreatedAt time.Time `json:"createdAt"` // datetime of the operation's creation
|
CreatedAt time.Time `json:"createdAt"`
|
||||||
Options control.Options `json:"options"`
|
|
||||||
Status opStatus `json:"status"`
|
Errors *fault.Errors `json:"errors"`
|
||||||
|
Options control.Options `json:"options"`
|
||||||
|
Status opStatus `json:"status"`
|
||||||
|
|
||||||
bus events.Eventer
|
bus events.Eventer
|
||||||
kopia *kopia.Wrapper
|
kopia *kopia.Wrapper
|
||||||
@ -69,11 +72,14 @@ func newOperation(
|
|||||||
) operation {
|
) operation {
|
||||||
return operation{
|
return operation{
|
||||||
CreatedAt: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
|
Errors: fault.New(opts.FailFast),
|
||||||
Options: opts,
|
Options: opts,
|
||||||
bus: bus,
|
|
||||||
kopia: kw,
|
bus: bus,
|
||||||
store: sw,
|
kopia: kw,
|
||||||
Status: InProgress,
|
store: sw,
|
||||||
|
|
||||||
|
Status: InProgress,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,7 +101,7 @@ func connectToM365(
|
|||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
) (*connector.GraphConnector, error) {
|
) (*connector.GraphConnector, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, "Connecting to M365")
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Connecting to M365"))
|
||||||
defer func() {
|
defer func() {
|
||||||
complete <- struct{}{}
|
complete <- struct{}{}
|
||||||
close(complete)
|
close(complete)
|
||||||
@ -108,7 +114,7 @@ func connectToM365(
|
|||||||
resource = connector.Sites
|
resource = connector.Sites
|
||||||
}
|
}
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, resource)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, resource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -44,7 +44,7 @@ type RestoreOperation struct {
|
|||||||
|
|
||||||
// RestoreResults aggregate the details of the results of the operation.
|
// RestoreResults aggregate the details of the results of the operation.
|
||||||
type RestoreResults struct {
|
type RestoreResults struct {
|
||||||
stats.Errs
|
stats.Errs // deprecated in place of fault.Errors in the base operation.
|
||||||
stats.ReadWrites
|
stats.ReadWrites
|
||||||
stats.StartAndEndTime
|
stats.StartAndEndTime
|
||||||
}
|
}
|
||||||
@ -89,7 +89,6 @@ type restoreStats struct {
|
|||||||
gc *support.ConnectorOperationStatus
|
gc *support.ConnectorOperationStatus
|
||||||
bytesRead *stats.ByteCounter
|
bytesRead *stats.ByteCounter
|
||||||
resourceCount int
|
resourceCount int
|
||||||
started bool
|
|
||||||
readErr, writeErr error
|
readErr, writeErr error
|
||||||
|
|
||||||
// a transient value only used to pair up start-end events.
|
// a transient value only used to pair up start-end events.
|
||||||
@ -143,10 +142,8 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
detailsStore,
|
detailsStore,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "restore")
|
opStats.readErr = errors.Wrap(err, "restore")
|
||||||
opStats.readErr = err
|
return nil, opStats.readErr
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "resource_owner", bup.Selector.DiscreteOwner)
|
ctx = clues.Add(ctx, "resource_owner", bup.Selector.DiscreteOwner)
|
||||||
@ -170,18 +167,16 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
|
|
||||||
ctx = clues.Add(ctx, "details_paths", len(paths))
|
ctx = clues.Add(ctx, "details_paths", len(paths))
|
||||||
|
|
||||||
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))
|
observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID)))
|
||||||
|
|
||||||
kopiaComplete, closer := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
|
kopiaComplete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Enumerating items in repository"))
|
||||||
defer closer()
|
defer closer()
|
||||||
defer close(kopiaComplete)
|
defer close(kopiaComplete)
|
||||||
|
|
||||||
dcs, err := op.kopia.RestoreMultipleItems(ctx, bup.SnapshotID, paths, opStats.bytesRead)
|
dcs, err := op.kopia.RestoreMultipleItems(ctx, bup.SnapshotID, paths, opStats.bytesRead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "retrieving service data")
|
opStats.readErr = errors.Wrap(err, "retrieving service data")
|
||||||
opStats.readErr = err
|
return nil, opStats.readErr
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
kopiaComplete <- struct{}{}
|
kopiaComplete <- struct{}{}
|
||||||
|
|
||||||
@ -196,7 +191,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
return nil, opStats.readErr
|
return nil, opStats.readErr
|
||||||
}
|
}
|
||||||
|
|
||||||
restoreComplete, closer := observe.MessageWithCompletion(ctx, "Restoring data")
|
restoreComplete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Restoring data"))
|
||||||
defer closer()
|
defer closer()
|
||||||
defer close(restoreComplete)
|
defer close(restoreComplete)
|
||||||
|
|
||||||
@ -207,14 +202,11 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
op.Destination,
|
op.Destination,
|
||||||
dcs)
|
dcs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "restoring service data")
|
opStats.writeErr = errors.Wrap(err, "restoring service data")
|
||||||
opStats.writeErr = err
|
return nil, opStats.writeErr
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
restoreComplete <- struct{}{}
|
restoreComplete <- struct{}{}
|
||||||
|
|
||||||
opStats.started = true
|
|
||||||
opStats.gc = gc.AwaitStatus()
|
opStats.gc = gc.AwaitStatus()
|
||||||
|
|
||||||
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
||||||
@ -230,10 +222,12 @@ func (op *RestoreOperation) persistResults(
|
|||||||
) error {
|
) error {
|
||||||
op.Results.StartedAt = started
|
op.Results.StartedAt = started
|
||||||
op.Results.CompletedAt = time.Now()
|
op.Results.CompletedAt = time.Now()
|
||||||
|
op.Results.ReadErrors = opStats.readErr
|
||||||
|
op.Results.WriteErrors = opStats.writeErr
|
||||||
|
|
||||||
op.Status = Completed
|
op.Status = Completed
|
||||||
|
|
||||||
if !opStats.started {
|
if opStats.readErr != nil || opStats.writeErr != nil {
|
||||||
op.Status = Failed
|
op.Status = Failed
|
||||||
|
|
||||||
return multierror.Append(
|
return multierror.Append(
|
||||||
@ -246,9 +240,6 @@ func (op *RestoreOperation) persistResults(
|
|||||||
op.Status = NoData
|
op.Status = NoData
|
||||||
}
|
}
|
||||||
|
|
||||||
op.Results.ReadErrors = opStats.readErr
|
|
||||||
op.Results.WriteErrors = opStats.writeErr
|
|
||||||
|
|
||||||
op.Results.BytesRead = opStats.bytesRead.NumBytes
|
op.Results.BytesRead = opStats.bytesRead.NumBytes
|
||||||
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
|
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
|
||||||
op.Results.ItemsWritten = opStats.gc.Successful
|
op.Results.ItemsWritten = opStats.gc.Successful
|
||||||
@ -309,5 +300,9 @@ func formatDetailsForRestoration(
|
|||||||
paths[i] = p
|
paths[i] = p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if errs != nil {
|
||||||
|
return nil, errs
|
||||||
|
}
|
||||||
|
|
||||||
return paths, nil
|
return paths, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -57,7 +57,6 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
|||||||
expectStatus: Completed,
|
expectStatus: Completed,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
stats: restoreStats{
|
stats: restoreStats{
|
||||||
started: true,
|
|
||||||
resourceCount: 1,
|
resourceCount: 1,
|
||||||
bytesRead: &stats.ByteCounter{
|
bytesRead: &stats.ByteCounter{
|
||||||
NumBytes: 42,
|
NumBytes: 42,
|
||||||
@ -73,7 +72,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
|||||||
expectStatus: Failed,
|
expectStatus: Failed,
|
||||||
expectErr: assert.Error,
|
expectErr: assert.Error,
|
||||||
stats: restoreStats{
|
stats: restoreStats{
|
||||||
started: false,
|
readErr: assert.AnError,
|
||||||
bytesRead: &stats.ByteCounter{},
|
bytesRead: &stats.ByteCounter{},
|
||||||
gc: &support.ConnectorOperationStatus{},
|
gc: &support.ConnectorOperationStatus{},
|
||||||
},
|
},
|
||||||
@ -82,7 +81,6 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
|||||||
expectStatus: NoData,
|
expectStatus: NoData,
|
||||||
expectErr: assert.NoError,
|
expectErr: assert.NoError,
|
||||||
stats: restoreStats{
|
stats: restoreStats{
|
||||||
started: true,
|
|
||||||
bytesRead: &stats.ByteCounter{},
|
bytesRead: &stats.ByteCounter{},
|
||||||
cs: []data.Collection{},
|
cs: []data.Collection{},
|
||||||
gc: &support.ConnectorOperationStatus{},
|
gc: &support.ConnectorOperationStatus{},
|
||||||
@ -106,10 +104,10 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
|||||||
|
|
||||||
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
||||||
assert.Equal(t, len(test.stats.cs), op.Results.ItemsRead, "items read")
|
assert.Equal(t, len(test.stats.cs), op.Results.ItemsRead, "items read")
|
||||||
assert.Equal(t, test.stats.readErr, op.Results.ReadErrors, "read errors")
|
|
||||||
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsWritten, "items written")
|
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsWritten, "items written")
|
||||||
assert.Equal(t, test.stats.bytesRead.NumBytes, op.Results.BytesRead, "resource owners")
|
assert.Equal(t, test.stats.bytesRead.NumBytes, op.Results.BytesRead, "resource owners")
|
||||||
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
||||||
|
assert.Equal(t, test.stats.readErr, op.Results.ReadErrors, "read errors")
|
||||||
assert.Equal(t, test.stats.writeErr, op.Results.WriteErrors, "write errors")
|
assert.Equal(t, test.stats.writeErr, op.Results.WriteErrors, "write errors")
|
||||||
assert.Equal(t, now, op.Results.StartedAt, "started at")
|
assert.Equal(t, now, op.Results.StartedAt, "started at")
|
||||||
assert.Less(t, now, op.Results.CompletedAt, "completed at")
|
assert.Less(t, now, op.Results.CompletedAt, "completed at")
|
||||||
@ -295,8 +293,10 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run() {
|
|||||||
assert.Less(t, 0, ro.Results.ItemsWritten, "restored items written")
|
assert.Less(t, 0, ro.Results.ItemsWritten, "restored items written")
|
||||||
assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read")
|
assert.Less(t, int64(0), ro.Results.BytesRead, "bytes read")
|
||||||
assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners")
|
assert.Equal(t, 1, ro.Results.ResourceOwners, "resource Owners")
|
||||||
assert.Zero(t, ro.Results.ReadErrors, "errors while reading restore data")
|
assert.NoError(t, ro.Errors.Err(), "non-recoverable error")
|
||||||
assert.Zero(t, ro.Results.WriteErrors, "errors while writing restore data")
|
assert.Empty(t, ro.Errors.Errs(), "recoverable errors")
|
||||||
|
assert.NoError(t, ro.Results.ReadErrors, "errors while reading restore data")
|
||||||
|
assert.NoError(t, ro.Results.WriteErrors, "errors while writing restore data")
|
||||||
assert.Equal(t, suite.numItems, ro.Results.ItemsWritten, "backup and restore wrote the same num of items")
|
assert.Equal(t, suite.numItems, ro.Results.ItemsWritten, "backup and restore wrote the same num of items")
|
||||||
assert.Equal(t, 1, mb.TimesCalled[events.RestoreStart], "restore-start events")
|
assert.Equal(t, 1, mb.TimesCalled[events.RestoreStart], "restore-start events")
|
||||||
assert.Equal(t, 1, mb.TimesCalled[events.RestoreEnd], "restore-end events")
|
assert.Equal(t, 1, mb.TimesCalled[events.RestoreEnd], "restore-end events")
|
||||||
|
|||||||
@ -78,6 +78,7 @@ func (ss *streamStore) WriteBackupDetails(
|
|||||||
nil,
|
nil,
|
||||||
[]data.Collection{dc},
|
[]data.Collection{dc},
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -29,11 +29,14 @@ func NewPrefixedS3Storage(t *testing.T) storage.Storage {
|
|||||||
cfg, err := readTestConfig()
|
cfg, err := readTestConfig()
|
||||||
require.NoError(t, err, "configuring storage from test file")
|
require.NoError(t, err, "configuring storage from test file")
|
||||||
|
|
||||||
|
prefix := testRepoRootPrefix + t.Name() + "-" + now
|
||||||
|
t.Logf("testing at s3 bucket [%s] prefix [%s]", cfg[TestCfgBucket], prefix)
|
||||||
|
|
||||||
st, err := storage.NewStorage(
|
st, err := storage.NewStorage(
|
||||||
storage.ProviderS3,
|
storage.ProviderS3,
|
||||||
storage.S3Config{
|
storage.S3Config{
|
||||||
Bucket: cfg[TestCfgBucket],
|
Bucket: cfg[TestCfgBucket],
|
||||||
Prefix: testRepoRootPrefix + t.Name() + "-" + now,
|
Prefix: prefix,
|
||||||
},
|
},
|
||||||
storage.CommonConfig{
|
storage.CommonConfig{
|
||||||
Corso: credentials.GetCorso(),
|
Corso: credentials.GetCorso(),
|
||||||
|
|||||||
@ -1,3 +1,29 @@
|
|||||||
package version
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
var Version = "dev"
|
var Version = "dev"
|
||||||
|
|
||||||
|
func CurrentVersion() string {
|
||||||
|
if len(Version) == 0 || Version == "dev" {
|
||||||
|
c, b := exec.Command("git", "describe", "--tag"), new(strings.Builder)
|
||||||
|
c.Stdout = b
|
||||||
|
|
||||||
|
if err := c.Run(); err != nil {
|
||||||
|
return "dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
s := strings.TrimRight(b.String(), "\n")
|
||||||
|
|
||||||
|
if len(s) != 0 {
|
||||||
|
return "dev-" + s
|
||||||
|
}
|
||||||
|
|
||||||
|
return "dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
return Version
|
||||||
|
}
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/internal/stats"
|
"github.com/alcionai/corso/src/internal/stats"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,8 +32,11 @@ type Backup struct {
|
|||||||
// Selector used in this operation
|
// Selector used in this operation
|
||||||
Selector selectors.Selector `json:"selectors"`
|
Selector selectors.Selector `json:"selectors"`
|
||||||
|
|
||||||
|
// Errors contains all errors aggregated during a backup operation.
|
||||||
|
Errors fault.ErrorsData `json:"errors"`
|
||||||
|
|
||||||
// stats are embedded so that the values appear as top-level properties
|
// stats are embedded so that the values appear as top-level properties
|
||||||
stats.Errs
|
stats.Errs // Deprecated, replaced with Errors.
|
||||||
stats.ReadWrites
|
stats.ReadWrites
|
||||||
stats.StartAndEndTime
|
stats.StartAndEndTime
|
||||||
}
|
}
|
||||||
@ -46,6 +50,7 @@ func New(
|
|||||||
selector selectors.Selector,
|
selector selectors.Selector,
|
||||||
rw stats.ReadWrites,
|
rw stats.ReadWrites,
|
||||||
se stats.StartAndEndTime,
|
se stats.StartAndEndTime,
|
||||||
|
errs *fault.Errors,
|
||||||
) *Backup {
|
) *Backup {
|
||||||
return &Backup{
|
return &Backup{
|
||||||
BaseModel: model.BaseModel{
|
BaseModel: model.BaseModel{
|
||||||
@ -59,6 +64,7 @@ func New(
|
|||||||
DetailsID: detailsID,
|
DetailsID: detailsID,
|
||||||
Status: status,
|
Status: status,
|
||||||
Selector: selector,
|
Selector: selector,
|
||||||
|
Errors: errs.Data(),
|
||||||
ReadWrites: rw,
|
ReadWrites: rw,
|
||||||
StartAndEndTime: se,
|
StartAndEndTime: se,
|
||||||
}
|
}
|
||||||
@ -102,7 +108,7 @@ type Printable struct {
|
|||||||
func (b Backup) MinimumPrintable() any {
|
func (b Backup) MinimumPrintable() any {
|
||||||
return Printable{
|
return Printable{
|
||||||
ID: b.ID,
|
ID: b.ID,
|
||||||
ErrorCount: support.GetNumberOfErrors(b.ReadErrors) + support.GetNumberOfErrors(b.WriteErrors),
|
ErrorCount: b.errorCount(),
|
||||||
StartedAt: b.StartedAt,
|
StartedAt: b.StartedAt,
|
||||||
Status: b.Status,
|
Status: b.Status,
|
||||||
Version: "0",
|
Version: "0",
|
||||||
@ -125,8 +131,7 @@ func (b Backup) Headers() []string {
|
|||||||
// Values returns the values matching the Headers list for printing
|
// Values returns the values matching the Headers list for printing
|
||||||
// out to a terminal in a columnar display.
|
// out to a terminal in a columnar display.
|
||||||
func (b Backup) Values() []string {
|
func (b Backup) Values() []string {
|
||||||
errCount := support.GetNumberOfErrors(b.ReadErrors) + support.GetNumberOfErrors(b.WriteErrors)
|
status := fmt.Sprintf("%s (%d errors)", b.Status, b.errorCount())
|
||||||
status := fmt.Sprintf("%s (%d errors)", b.Status, errCount)
|
|
||||||
|
|
||||||
return []string{
|
return []string{
|
||||||
common.FormatTabularDisplayTime(b.StartedAt),
|
common.FormatTabularDisplayTime(b.StartedAt),
|
||||||
@ -135,3 +140,23 @@ func (b Backup) Values() []string {
|
|||||||
b.Selector.DiscreteOwner,
|
b.Selector.DiscreteOwner,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b Backup) errorCount() int {
|
||||||
|
var errCount int
|
||||||
|
|
||||||
|
// current tracking
|
||||||
|
if b.ReadErrors != nil || b.WriteErrors != nil {
|
||||||
|
return support.GetNumberOfErrors(b.ReadErrors) + support.GetNumberOfErrors(b.WriteErrors)
|
||||||
|
}
|
||||||
|
|
||||||
|
// future tracking
|
||||||
|
if b.Errors.Err != nil || len(b.Errors.Errs) > 0 {
|
||||||
|
if b.Errors.Err != nil {
|
||||||
|
errCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
errCount += len(b.Errors.Errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errCount
|
||||||
|
}
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/internal/stats"
|
"github.com/alcionai/corso/src/internal/stats"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,6 +41,9 @@ func stubBackup(t time.Time) backup.Backup {
|
|||||||
DetailsID: "details",
|
DetailsID: "details",
|
||||||
Status: "status",
|
Status: "status",
|
||||||
Selector: sel.Selector,
|
Selector: sel.Selector,
|
||||||
|
Errors: fault.ErrorsData{
|
||||||
|
Errs: []error{errors.New("read"), errors.New("write")},
|
||||||
|
},
|
||||||
Errs: stats.Errs{
|
Errs: stats.Errs{
|
||||||
ReadErrors: errors.New("1"),
|
ReadErrors: errors.New("1"),
|
||||||
WriteErrors: errors.New("1"),
|
WriteErrors: errors.New("1"),
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -264,7 +265,7 @@ func Ctx(ctx context.Context) *zap.SugaredLogger {
|
|||||||
return singleton(levelOf(llFlag), defaultLogLocation())
|
return singleton(levelOf(llFlag), defaultLogLocation())
|
||||||
}
|
}
|
||||||
|
|
||||||
return l.(*zap.SugaredLogger)
|
return l.(*zap.SugaredLogger).With(clues.Slice(ctx)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// transforms the llevel flag value to a logLevel enum
|
// transforms the llevel flag value to a logLevel enum
|
||||||
|
|||||||
@ -154,7 +154,7 @@ func Connect(
|
|||||||
// their output getting clobbered (#1720)
|
// their output getting clobbered (#1720)
|
||||||
defer observe.Complete()
|
defer observe.Complete()
|
||||||
|
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, "Connecting to repository")
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Connecting to repository"))
|
||||||
defer closer()
|
defer closer()
|
||||||
defer close(complete)
|
defer close(complete)
|
||||||
|
|
||||||
|
|||||||
@ -114,6 +114,7 @@ func runLoadTest(
|
|||||||
prefix, service string,
|
prefix, service string,
|
||||||
usersUnderTest []string,
|
usersUnderTest []string,
|
||||||
bupSel, restSel selectors.Selector,
|
bupSel, restSel selectors.Selector,
|
||||||
|
runRestore bool,
|
||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
//revive:enable:context-as-argument
|
||||||
t.Run(prefix+"_load_test_main", func(t *testing.T) {
|
t.Run(prefix+"_load_test_main", func(t *testing.T) {
|
||||||
@ -126,12 +127,33 @@ func runLoadTest(
|
|||||||
runBackupListLoadTest(t, ctx, r, service, bid)
|
runBackupListLoadTest(t, ctx, r, service, bid)
|
||||||
runBackupDetailsLoadTest(t, ctx, r, service, bid, usersUnderTest)
|
runBackupDetailsLoadTest(t, ctx, r, service, bid, usersUnderTest)
|
||||||
|
|
||||||
|
runRestoreLoadTest(t, ctx, r, prefix, service, bid, usersUnderTest, restSel, b, runRestore)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
//revive:disable:context-as-argument
|
||||||
|
func runRestoreLoadTest(
|
||||||
|
t *testing.T,
|
||||||
|
ctx context.Context,
|
||||||
|
r repository.Repository,
|
||||||
|
prefix, service, backupID string,
|
||||||
|
usersUnderTest []string,
|
||||||
|
restSel selectors.Selector,
|
||||||
|
bup operations.BackupOperation,
|
||||||
|
runRestore bool,
|
||||||
|
) {
|
||||||
|
//revive:enable:context-as-argument
|
||||||
|
t.Run(prefix+"_load_test_restore", func(t *testing.T) {
|
||||||
|
if !runRestore {
|
||||||
|
t.Skip("restore load test is toggled off")
|
||||||
|
}
|
||||||
|
|
||||||
dest := tester.DefaultTestRestoreDestination()
|
dest := tester.DefaultTestRestoreDestination()
|
||||||
|
|
||||||
rst, err := r.NewRestore(ctx, bid, restSel, dest)
|
rst, err := r.NewRestore(ctx, backupID, restSel, dest)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
runRestoreLoadTest(t, ctx, rst, service, b.Results.ItemsWritten, usersUnderTest)
|
doRestoreLoadTest(t, ctx, rst, service, bup.Results.ItemsWritten, usersUnderTest)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,8 +184,10 @@ func runBackupLoadTest(
|
|||||||
assert.Less(t, 0, b.Results.ItemsWritten, "items written")
|
assert.Less(t, 0, b.Results.ItemsWritten, "items written")
|
||||||
assert.Less(t, int64(0), b.Results.BytesUploaded, "bytes uploaded")
|
assert.Less(t, int64(0), b.Results.BytesUploaded, "bytes uploaded")
|
||||||
assert.Equal(t, len(users), b.Results.ResourceOwners, "resource owners")
|
assert.Equal(t, len(users), b.Results.ResourceOwners, "resource owners")
|
||||||
assert.Zero(t, b.Results.ReadErrors, "read errors")
|
assert.NoError(t, b.Errors.Err(), "non-recoverable error")
|
||||||
assert.Zero(t, b.Results.WriteErrors, "write errors")
|
assert.Empty(t, b.Errors.Errs(), "recoverable errors")
|
||||||
|
assert.NoError(t, b.Results.ReadErrors, "read errors")
|
||||||
|
assert.NoError(t, b.Results.WriteErrors, "write errors")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,7 +264,7 @@ func runBackupDetailsLoadTest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
//revive:disable:context-as-argument
|
//revive:disable:context-as-argument
|
||||||
func runRestoreLoadTest(
|
func doRestoreLoadTest(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r operations.RestoreOperation,
|
r operations.RestoreOperation,
|
||||||
@ -268,8 +292,10 @@ func runRestoreLoadTest(
|
|||||||
assert.Less(t, 0, r.Results.ItemsRead, "items read")
|
assert.Less(t, 0, r.Results.ItemsRead, "items read")
|
||||||
assert.Less(t, 0, r.Results.ItemsWritten, "items written")
|
assert.Less(t, 0, r.Results.ItemsWritten, "items written")
|
||||||
assert.Equal(t, len(users), r.Results.ResourceOwners, "resource owners")
|
assert.Equal(t, len(users), r.Results.ResourceOwners, "resource owners")
|
||||||
assert.Zero(t, r.Results.ReadErrors, "read errors")
|
assert.NoError(t, r.Errors.Err(), "non-recoverable error")
|
||||||
assert.Zero(t, r.Results.WriteErrors, "write errors")
|
assert.Empty(t, r.Errors.Errs(), "recoverable errors")
|
||||||
|
assert.NoError(t, r.Results.ReadErrors, "read errors")
|
||||||
|
assert.NoError(t, r.Results.WriteErrors, "write errors")
|
||||||
assert.Equal(t, expectItemCount, r.Results.ItemsWritten, "backup and restore wrote the same count of items")
|
assert.Equal(t, expectItemCount, r.Results.ItemsWritten, "backup and restore wrote the same count of items")
|
||||||
|
|
||||||
ensureAllUsersInDetails(t, users, ds, "restore", name)
|
ensureAllUsersInDetails(t, users, ds, "restore", name)
|
||||||
@ -408,6 +434,7 @@ func (suite *RepositoryLoadTestExchangeSuite) TestExchange() {
|
|||||||
"all_users", "exchange",
|
"all_users", "exchange",
|
||||||
suite.usersUnderTest,
|
suite.usersUnderTest,
|
||||||
sel, sel, // same selection for backup and restore
|
sel, sel, // same selection for backup and restore
|
||||||
|
true,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -456,6 +483,7 @@ func (suite *RepositoryIndividualLoadTestExchangeSuite) TestExchange() {
|
|||||||
"single_user", "exchange",
|
"single_user", "exchange",
|
||||||
suite.usersUnderTest,
|
suite.usersUnderTest,
|
||||||
sel, sel, // same selection for backup and restore
|
sel, sel, // same selection for backup and restore
|
||||||
|
true,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,6 +532,7 @@ func (suite *RepositoryLoadTestOneDriveSuite) TestOneDrive() {
|
|||||||
"all_users", "one_drive",
|
"all_users", "one_drive",
|
||||||
suite.usersUnderTest,
|
suite.usersUnderTest,
|
||||||
sel, sel, // same selection for backup and restore
|
sel, sel, // same selection for backup and restore
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,7 +552,6 @@ func TestRepositoryIndividualLoadTestOneDriveSuite(t *testing.T) {
|
|||||||
|
|
||||||
func (suite *RepositoryIndividualLoadTestOneDriveSuite) SetupSuite() {
|
func (suite *RepositoryIndividualLoadTestOneDriveSuite) SetupSuite() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
t.Skip("not running onedrive load tests atm")
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
suite.ctx, suite.repo, suite.acct, suite.st = initM365Repo(t)
|
suite.ctx, suite.repo, suite.acct, suite.st = initM365Repo(t)
|
||||||
suite.usersUnderTest = singleUserSet(t)
|
suite.usersUnderTest = singleUserSet(t)
|
||||||
@ -548,6 +576,7 @@ func (suite *RepositoryIndividualLoadTestOneDriveSuite) TestOneDrive() {
|
|||||||
"single_user", "one_drive",
|
"single_user", "one_drive",
|
||||||
suite.usersUnderTest,
|
suite.usersUnderTest,
|
||||||
sel, sel, // same selection for backup and restore
|
sel, sel, // same selection for backup and restore
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -596,6 +625,7 @@ func (suite *RepositoryLoadTestSharePointSuite) TestSharePoint() {
|
|||||||
"all_sites", "share_point",
|
"all_sites", "share_point",
|
||||||
suite.sitesUnderTest,
|
suite.sitesUnderTest,
|
||||||
sel, sel, // same selection for backup and restore
|
sel, sel, // same selection for backup and restore
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -640,5 +670,6 @@ func (suite *RepositoryIndividualLoadTestSharePointSuite) TestSharePoint() {
|
|||||||
"single_site", "share_point",
|
"single_site", "share_point",
|
||||||
suite.sitesUnderTest,
|
suite.sitesUnderTest,
|
||||||
sel, sel, // same selection for backup and restore
|
sel, sel, // same selection for backup and restore
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,7 +21,7 @@ type User struct {
|
|||||||
// Users returns a list of users in the specified M365 tenant
|
// Users returns a list of users in the specified M365 tenant
|
||||||
// TODO: Implement paging support
|
// TODO: Implement paging support
|
||||||
func Users(ctx context.Context, m365Account account.Account) ([]*User, error) {
|
func Users(ctx context.Context, m365Account account.Account) ([]*User, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), m365Account, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), m365Account, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
||||||
}
|
}
|
||||||
@ -77,7 +77,7 @@ func UserPNs(ctx context.Context, m365Account account.Account) ([]string, error)
|
|||||||
|
|
||||||
// SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant
|
// SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant
|
||||||
func SiteURLs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
func SiteURLs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), m365Account, connector.Sites)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), m365Account, connector.Sites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
||||||
}
|
}
|
||||||
@ -87,7 +87,7 @@ func SiteURLs(ctx context.Context, m365Account account.Account) ([]string, error
|
|||||||
|
|
||||||
// SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant
|
// SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant
|
||||||
func SiteIDs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
func SiteIDs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), m365Account, connector.Sites)
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), m365Account, connector.Sites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
FROM amazonlinux
|
FROM amazonlinux:2
|
||||||
RUN yum install -y unzip
|
RUN yum install -y unzip
|
||||||
RUN curl -o daemon.zip https://s3.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-linux-3.x.zip
|
RUN curl -o daemon.zip https://s3.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-linux-3.x.zip
|
||||||
RUN unzip daemon.zip && cp xray /usr/bin/xray
|
RUN unzip daemon.zip && cp xray /usr/bin/xray
|
||||||
|
|||||||
@ -19,10 +19,12 @@ If you don't have Go available, you can find installation instructions [here](ht
|
|||||||
This will generate a binary named `corso` in the directory where you run the build.
|
This will generate a binary named `corso` in the directory where you run the build.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
You can download binary artifacts of the latest commit from GitHub by
|
Prebuilt binary artifacts of the latest commit are available on GitHub.
|
||||||
navigating to the "Summary" page of the `Build/Release Corso` CI job
|
You can access them by navigating to the "Summary" page of
|
||||||
that was run for that commit.
|
the [`Build/Release Corso` CI job](https://github.com/alcionai/corso/actions/workflows/ci.yml?query=branch%3Amain)
|
||||||
You will find the artifacts at the bottom of the page.
|
that was run for the latest commit on the `main` branch.
|
||||||
|
The downloads will be available in the "Artifacts" section towards the
|
||||||
|
bottom of the page.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### Building via Docker
|
### Building via Docker
|
||||||
|
|||||||
48
website/docs/setup/fault-tolerance.md
Normal file
48
website/docs/setup/fault-tolerance.md
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# Fault tolerance
|
||||||
|
|
||||||
|
Given the millions of objects found in a typical Microsoft 365 tenant,
|
||||||
|
Corso is optimized for high-performance processing, hardened to
|
||||||
|
tolerate transient failures and, most importantly, able to restart backups.
|
||||||
|
|
||||||
|
Corso’s fault-tolerance architecture is motivated by Microsoft’s Graph
|
||||||
|
API variable performance and throttling. Corso follows Microsoft’s
|
||||||
|
recommend best practices (for example, [correctly decorating API
|
||||||
|
traffic](https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic))
|
||||||
|
and, in addition, implements a number of optimizations to improve
|
||||||
|
backup and restore reliability.
|
||||||
|
|
||||||
|
## Recovery from transient failures
|
||||||
|
|
||||||
|
Corso, at the HTTP layer, will retry requests (after a HTTP timeout,
|
||||||
|
for example) and will respect Graph API’s directives such as the
|
||||||
|
`retry-after` header to backoff when needed. This allows backups to
|
||||||
|
succeed in the face of transient or temporary failures.
|
||||||
|
|
||||||
|
## Restarting from permanent API failures
|
||||||
|
|
||||||
|
The Graph API can, for internal reasons, exhibit extended periods of
|
||||||
|
failures for particular Graph objects. In this scenario, bounded retries
|
||||||
|
will be ineffective. Unless invoked with the
|
||||||
|
fail fast option, Corso will skip over these failing objects. For
|
||||||
|
backups, it will move forward with backing up other objects belonging
|
||||||
|
to the user and, for restores, it will continue with trying to restore
|
||||||
|
any remaining objects. If a multi-user backed is in progress (via `*`
|
||||||
|
or by specifying multiple users with the `—user` argument), Corso will
|
||||||
|
also continue processing backups for the remaining users. In both
|
||||||
|
cases, Corso will exit with a non-zero exit code to reflect incomplete
|
||||||
|
backups or restores.
|
||||||
|
|
||||||
|
On subsequent backup attempts, Corso will try to
|
||||||
|
minimize the work involved. If the previous backup was successful and
|
||||||
|
Corso’s stored state tokens haven’t expired, it will use [delta
|
||||||
|
queries](https://learn.microsoft.com/en-us/graph/delta-query-overview),
|
||||||
|
wherever supported, to perform incremental backups.
|
||||||
|
|
||||||
|
If the previous backup for a user had resulted in a failure, Corso
|
||||||
|
uses a variety of fallback mechanisms to reduce the amount of data
|
||||||
|
downloaded and reduce the number of objects enumerated. For example, with
|
||||||
|
OneDrive, Corso won't redo downloads of data from Microsoft 365 or
|
||||||
|
uploads of data to the Corso repository if it had successfully backed
|
||||||
|
up that OneDrive file as a part of a previously incomplete and failed
|
||||||
|
backup. Even if the Graph API might not allow Corso to skip
|
||||||
|
downloading data, Corso can still skip another upload it to the repository.
|
||||||
@ -10,9 +10,15 @@ import TabItem from '@theme/TabItem';
|
|||||||
import TOCInline from '@theme/TOCInline';
|
import TOCInline from '@theme/TOCInline';
|
||||||
import {Version} from '@site/src/corsoEnv';
|
import {Version} from '@site/src/corsoEnv';
|
||||||
|
|
||||||
A Corso [repository](../concepts#corso-concepts) stores encrypted copies of your backup data. Corso uses
|
A Corso [repository](../concepts#corso-concepts) stores encrypted copies of a Microsoft 365 tenant's
|
||||||
|
backup data. Each repository is configured to store data in an object storage bucket and, optionally,
|
||||||
|
a user-specified prefix within the bucket. A repository is only meant to store a single tenant's data
|
||||||
|
but a single object storage bucket can contain multiple repositories if unique `--prefix` options are
|
||||||
|
specified when initializing a repository.
|
||||||
|
|
||||||
|
Within a repository, Corso uses
|
||||||
AES256-GCM-HMAC-SHA256 to encrypt data at rest using keys that are derived from the repository passphrase.
|
AES256-GCM-HMAC-SHA256 to encrypt data at rest using keys that are derived from the repository passphrase.
|
||||||
Data in flight is encrypted via TLS.
|
Data in flight to and from the repositiry is encrypted via TLS.
|
||||||
|
|
||||||
Repositories are supported on the following object storage systems:
|
Repositories are supported on the following object storage systems:
|
||||||
|
|
||||||
|
|||||||
@ -59,7 +59,7 @@ const config = {
|
|||||||
filename: 'sitemap.xml',
|
filename: 'sitemap.xml',
|
||||||
},
|
},
|
||||||
gtag: {
|
gtag: {
|
||||||
trackingID: 'G-YXBFPQZ05N',
|
trackingID: 'GTM-KM3XWPV',
|
||||||
},
|
},
|
||||||
theme: {
|
theme: {
|
||||||
customCss: require.resolve('./src/css/custom.scss'),
|
customCss: require.resolve('./src/css/custom.scss'),
|
||||||
|
|||||||
1401
website/package-lock.json
generated
1401
website/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -14,10 +14,10 @@
|
|||||||
"write-heading-ids": "docusaurus write-heading-ids"
|
"write-heading-ids": "docusaurus write-heading-ids"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@docusaurus/core": "2.2.0",
|
"@docusaurus/core": "2.3.0",
|
||||||
"@docusaurus/plugin-google-gtag": "^2.2.0",
|
"@docusaurus/plugin-google-gtag": "^2.3.0",
|
||||||
"@docusaurus/preset-classic": "2.2.0",
|
"@docusaurus/preset-classic": "2.3.0",
|
||||||
"@loadable/component": "^5.15.2",
|
"@loadable/component": "^5.15.3",
|
||||||
"@mdx-js/react": "^1.6.22",
|
"@mdx-js/react": "^1.6.22",
|
||||||
"animate.css": "^4.1.1",
|
"animate.css": "^4.1.1",
|
||||||
"clsx": "^1.2.1",
|
"clsx": "^1.2.1",
|
||||||
@ -35,8 +35,8 @@
|
|||||||
"wow.js": "^1.2.2"
|
"wow.js": "^1.2.2"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@docusaurus/module-type-aliases": "2.2.0",
|
"@docusaurus/module-type-aliases": "2.3.0",
|
||||||
"@iconify/react": "^4.0.1",
|
"@iconify/react": "^4.1.0",
|
||||||
"autoprefixer": "^10.4.13",
|
"autoprefixer": "^10.4.13",
|
||||||
"postcss": "^8.4.21",
|
"postcss": "^8.4.21",
|
||||||
"tailwindcss": "^3.2.4"
|
"tailwindcss": "^3.2.4"
|
||||||
|
|||||||
@ -19,8 +19,8 @@ const sidebars = {
|
|||||||
'quickstart',
|
'quickstart',
|
||||||
{
|
{
|
||||||
type: 'category',
|
type: 'category',
|
||||||
label: 'Corso setup',
|
label: 'Usage',
|
||||||
items: ['setup/concepts', 'setup/download', 'setup/m365-access', 'setup/configuration', 'setup/repos'],
|
items: ['setup/concepts', 'setup/download', 'setup/m365-access', 'setup/configuration', 'setup/repos', 'setup/fault-tolerance'],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
type: 'category',
|
type: 'category',
|
||||||
|
|||||||
@ -36,4 +36,5 @@ Atlassian
|
|||||||
SLAs
|
SLAs
|
||||||
runbooks
|
runbooks
|
||||||
stdout
|
stdout
|
||||||
stderr
|
stderr
|
||||||
|
backoff
|
||||||
Loading…
x
Reference in New Issue
Block a user