Merge branch 'main' of https://github.com/alcionai/corso into teamsDiscovery
This commit is contained in:
commit
8e35c81197
72
.github/actions/backup-restore-test/action.yml
vendored
72
.github/actions/backup-restore-test/action.yml
vendored
@ -72,12 +72,78 @@ runs:
|
||||
|
||||
cat /tmp/corsologs
|
||||
|
||||
- name: Check ${{ inputs.service }} ${{ inputs.kind }}
|
||||
- name: Check restore ${{ inputs.service }} ${{ inputs.kind }}
|
||||
shell: bash
|
||||
working-directory: src
|
||||
env:
|
||||
SANITY_RESTORE_FOLDER: ${{ steps.restore.outputs.result }}
|
||||
SANITY_RESTORE_SERVICE: ${{ inputs.service }}
|
||||
SANITY_TEST_KIND: restore
|
||||
SANITY_TEST_FOLDER: ${{ steps.restore.outputs.result }}
|
||||
SANITY_TEST_SERVICE: ${{ inputs.service }}
|
||||
TEST_DATA: ${{ inputs.test-folder }}
|
||||
BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
run: |
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./sanity-test
|
||||
|
||||
- name: Export ${{ inputs.service }} ${{ inputs.kind }}
|
||||
id: export
|
||||
shell: bash
|
||||
working-directory: src
|
||||
if: ${{ inputs.service == 'onedrive' || inputs.service == 'sharepoint' }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso export '${{ inputs.service }}' \
|
||||
/tmp/export-${{ inputs.service }}-${{inputs.kind }} \
|
||||
--no-stats \
|
||||
--hide-progress \
|
||||
${{ inputs.export-args }} \
|
||||
--backup '${{ steps.backup.outputs.result }}'
|
||||
|
||||
cat /tmp/corsologs
|
||||
|
||||
- name: Check export ${{ inputs.service }} ${{ inputs.kind }}
|
||||
shell: bash
|
||||
working-directory: src
|
||||
if: ${{ inputs.service == 'onedrive' || inputs.service == 'sharepoint' }}
|
||||
env:
|
||||
SANITY_TEST_KIND: export
|
||||
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}
|
||||
SANITY_TEST_SERVICE: ${{ inputs.service }}
|
||||
TEST_DATA: ${{ inputs.test-folder }}
|
||||
BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
run: |
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-validate-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./sanity-test
|
||||
|
||||
- name: Export archive ${{ inputs.service }} ${{ inputs.kind }}
|
||||
id: export-archive
|
||||
shell: bash
|
||||
working-directory: src
|
||||
if: ${{ inputs.service == 'onedrive' }} # Export only available for OneDrive
|
||||
run: |
|
||||
set -euo pipefail
|
||||
CORSO_LOG_FILE=${{ inputs.log-dir }}/gotest-restore-${{ inputs.service }}-${{inputs.kind }}.log
|
||||
./corso export '${{ inputs.service }}' \
|
||||
/tmp/export-${{ inputs.service }}-${{inputs.kind }}-archive \
|
||||
--no-stats \
|
||||
--hide-progress \
|
||||
--archive \
|
||||
${{ inputs.export-args }} \
|
||||
--backup '${{ steps.backup.outputs.result }}'
|
||||
|
||||
unzip /tmp/export-${{ inputs.service }}-${{inputs.kind }}-archive/*.zip \
|
||||
-d /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped
|
||||
cat /tmp/corsologs
|
||||
|
||||
- name: Check archive export ${{ inputs.service }} ${{ inputs.kind }}
|
||||
shell: bash
|
||||
working-directory: src
|
||||
if: ${{ inputs.service == 'onedrive' }}
|
||||
env:
|
||||
SANITY_TEST_KIND: export
|
||||
SANITY_TEST_FOLDER: /tmp/export-${{ inputs.service }}-${{inputs.kind }}-unzipped
|
||||
SANITY_TEST_SERVICE: ${{ inputs.service }}
|
||||
TEST_DATA: ${{ inputs.test-folder }}
|
||||
BASE_BACKUP: ${{ inputs.base-backup }}
|
||||
run: |
|
||||
|
||||
@ -14,6 +14,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- SharePoint document libraries deleted after the last backup can now be restored.
|
||||
- Restore requires the protected resource to have access to the service being restored.
|
||||
|
||||
### Added
|
||||
- Added option to export data from OneDrive and SharePoint backups as individual files or as a single zip file.
|
||||
|
||||
## [v0.11.1] (beta) - 2023-07-20
|
||||
|
||||
### Fixed
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
"github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
@ -47,7 +48,12 @@ func prepM365Test(
|
||||
vpr, cfgFP := tconfig.MakeTempTestConfigClone(t, force)
|
||||
ctx = config.SetViper(ctx, vpr)
|
||||
|
||||
repo, err := repository.Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
repo, err := repository.Initialize(
|
||||
ctx,
|
||||
acct,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return acct, st, repo, vpr, recorder, cfgFP
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/cli/backup"
|
||||
"github.com/alcionai/corso/src/cli/config"
|
||||
"github.com/alcionai/corso/src/cli/export"
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/cli/help"
|
||||
"github.com/alcionai/corso/src/cli/print"
|
||||
@ -53,7 +54,7 @@ func preRun(cc *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
avoidTheseCommands := []string{
|
||||
"corso", "env", "help", "backup", "details", "list", "restore", "delete", "repo", "init", "connect",
|
||||
"corso", "env", "help", "backup", "details", "list", "restore", "export", "delete", "repo", "init", "connect",
|
||||
}
|
||||
|
||||
if len(logger.ResolvedLogFile) > 0 && !slices.Contains(avoidTheseCommands, cc.Use) {
|
||||
@ -150,6 +151,7 @@ func BuildCommandTree(cmd *cobra.Command) {
|
||||
repo.AddCommands(cmd)
|
||||
backup.AddCommands(cmd)
|
||||
restore.AddCommands(cmd)
|
||||
export.AddCommands(cmd)
|
||||
help.AddCommands(cmd)
|
||||
}
|
||||
|
||||
|
||||
108
src/cli/export/export.go
Normal file
108
src/cli/export/export.go
Normal file
@ -0,0 +1,108 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/repo"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
var exportCommands = []func(cmd *cobra.Command) *cobra.Command{
|
||||
addOneDriveCommands,
|
||||
addSharePointCommands,
|
||||
}
|
||||
|
||||
// AddCommands attaches all `corso export * *` commands to the parent.
|
||||
func AddCommands(cmd *cobra.Command) {
|
||||
exportC := exportCmd()
|
||||
cmd.AddCommand(exportC)
|
||||
|
||||
for _, addExportTo := range exportCommands {
|
||||
addExportTo(exportC)
|
||||
}
|
||||
}
|
||||
|
||||
const exportCommand = "export"
|
||||
|
||||
// The export category of commands.
|
||||
// `corso export [<subcommand>] [<flag>...]`
|
||||
func exportCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: exportCommand,
|
||||
Short: "Export your service data",
|
||||
Long: `Export the data stored in one of your M365 services.`,
|
||||
RunE: handleExportCmd,
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
}
|
||||
|
||||
// Handler for flat calls to `corso export`.
|
||||
// Produces the same output as `corso export --help`.
|
||||
func handleExportCmd(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
}
|
||||
|
||||
func runExport(
|
||||
ctx context.Context,
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
ueco utils.ExportCfgOpts,
|
||||
sel selectors.Selector,
|
||||
backupID, serviceName string,
|
||||
) error {
|
||||
r, _, _, _, err := utils.GetAccountAndConnect(ctx, sel.PathService(), repo.S3Overrides(cmd))
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
exportLocation := args[0]
|
||||
if len(exportLocation) == 0 {
|
||||
// This should not be possible, but adding it just in case.
|
||||
exportLocation = control.DefaultRestoreLocation + dttm.FormatNow(dttm.HumanReadableDriveItem)
|
||||
}
|
||||
|
||||
Infof(ctx, "Exporting to folder %s", exportLocation)
|
||||
|
||||
eo, err := r.NewExport(
|
||||
ctx,
|
||||
backupID,
|
||||
sel,
|
||||
utils.MakeExportConfig(ctx, ueco))
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to initialize "+serviceName+" export"))
|
||||
}
|
||||
|
||||
expColl, err := eo.Run(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return Only(ctx, clues.New("Backup or backup details missing for id "+backupID))
|
||||
}
|
||||
|
||||
return Only(ctx, clues.Wrap(err, "Failed to run "+serviceName+" export"))
|
||||
}
|
||||
|
||||
// It would be better to give a progressbar than a spinner, but we
|
||||
// have any way of knowing how many files are available as of now.
|
||||
diskWriteComplete := observe.MessageWithCompletion(ctx, "Writing data to disk")
|
||||
defer close(diskWriteComplete)
|
||||
|
||||
err = export.ConsumeExportCollections(ctx, exportLocation, expColl, eo.Errors)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
96
src/cli/export/onedrive.go
Normal file
96
src/cli/export/onedrive.go
Normal file
@ -0,0 +1,96 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
)
|
||||
|
||||
// called by export.go to map subcommands to provider-specific handling.
|
||||
func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
|
||||
var (
|
||||
c *cobra.Command
|
||||
fs *pflag.FlagSet
|
||||
)
|
||||
|
||||
switch cmd.Use {
|
||||
case exportCommand:
|
||||
c, fs = utils.AddCommand(cmd, oneDriveExportCmd())
|
||||
|
||||
c.Use = c.Use + " " + oneDriveServiceCommandUseSuffix
|
||||
|
||||
// Flags addition ordering should follow the order we want them to appear in help and docs:
|
||||
// More generic (ex: --user) and more frequently used flags take precedence.
|
||||
fs.SortFlags = false
|
||||
|
||||
flags.AddBackupIDFlag(c, true)
|
||||
flags.AddOneDriveDetailsAndRestoreFlags(c)
|
||||
flags.AddExportConfigFlags(c)
|
||||
flags.AddFailFastFlag(c)
|
||||
flags.AddCorsoPassphaseFlags(c)
|
||||
flags.AddAWSCredsFlags(c)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
const (
|
||||
oneDriveServiceCommand = "onedrive"
|
||||
oneDriveServiceCommandUseSuffix = "--backup <backupId> <destination>"
|
||||
|
||||
//nolint:lll
|
||||
oneDriveServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's last backup (1234abcd...) to my-exports directory
|
||||
corso export onedrive my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef
|
||||
|
||||
# Export files named "FY2021 Planning.xlsx" in "Documents/Finance Reports" to current directory
|
||||
corso export onedrive . --backup 1234abcd-12ab-cd34-56de-1234abcd \
|
||||
--file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports"
|
||||
|
||||
# Export all files and folders in folder "Documents/Finance Reports" that were created before 2020 to my-exports
|
||||
corso export onedrive my-exports --backup 1234abcd-12ab-cd34-56de-1234abcd
|
||||
--folder "Documents/Finance Reports" --file-created-before 2020-01-01T00:00:00`
|
||||
)
|
||||
|
||||
// `corso export onedrive [<flag>...] <destination>`
|
||||
func oneDriveExportCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: oneDriveServiceCommand,
|
||||
Short: "Export M365 OneDrive service data",
|
||||
RunE: exportOneDriveCmd,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("missing restore destination")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Example: oneDriveServiceCommandExportExamples,
|
||||
}
|
||||
}
|
||||
|
||||
// processes an onedrive service export.
|
||||
func exportOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
if utils.HasNoFlagsAndShownHelp(cmd) {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := utils.MakeOneDriveOpts(cmd)
|
||||
|
||||
if flags.RunModeFV == flags.RunModeFlagTest {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := utils.ValidateOneDriveRestoreFlags(flags.BackupIDFV, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||
|
||||
return runExport(ctx, cmd, args, opts.ExportCfg, sel.Selector, flags.BackupIDFV, "OneDrive")
|
||||
}
|
||||
106
src/cli/export/onedrive_test.go
Normal file
106
src/cli/export/onedrive_test.go
Normal file
@ -0,0 +1,106 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
type OneDriveUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestOneDriveUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &OneDriveUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
expectUse := oneDriveServiceCommand + " " + oneDriveServiceCommandUseSuffix
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
use string
|
||||
expectUse string
|
||||
expectShort string
|
||||
expectRunE func(*cobra.Command, []string) error
|
||||
}{
|
||||
{"export onedrive", exportCommand, expectUse, oneDriveExportCmd().Short, exportOneDriveCmd},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// normally a persistent flag from the root.
|
||||
// required to ensure a dry run.
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addOneDriveCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
cmd.SetArgs([]string{
|
||||
"onedrive",
|
||||
testdata.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, testdata.BackupInput,
|
||||
"--" + flags.FileFN, testdata.FlgInputs(testdata.FileNameInput),
|
||||
"--" + flags.FolderFN, testdata.FlgInputs(testdata.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, testdata.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, testdata.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, testdata.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, testdata.FileModifiedBeforeInput,
|
||||
|
||||
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
|
||||
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
|
||||
"--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken,
|
||||
|
||||
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.ArchiveFN,
|
||||
})
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
opts := utils.MakeOneDriveOpts(cmd)
|
||||
assert.Equal(t, testdata.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.ElementsMatch(t, testdata.FileNameInput, opts.FileName)
|
||||
assert.ElementsMatch(t, testdata.FolderPathInput, opts.FolderPath)
|
||||
assert.Equal(t, testdata.FileCreatedAfterInput, opts.FileCreatedAfter)
|
||||
assert.Equal(t, testdata.FileCreatedBeforeInput, opts.FileCreatedBefore)
|
||||
assert.Equal(t, testdata.FileModifiedAfterInput, opts.FileModifiedAfter)
|
||||
assert.Equal(t, testdata.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||
|
||||
assert.Equal(t, testdata.Archive, opts.ExportCfg.Archive)
|
||||
|
||||
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
|
||||
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
|
||||
assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV)
|
||||
|
||||
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
|
||||
})
|
||||
}
|
||||
}
|
||||
100
src/cli/export/sharepoint.go
Normal file
100
src/cli/export/sharepoint.go
Normal file
@ -0,0 +1,100 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
)
|
||||
|
||||
// called by export.go to map subcommands to provider-specific handling.
|
||||
func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
||||
var (
|
||||
c *cobra.Command
|
||||
fs *pflag.FlagSet
|
||||
)
|
||||
|
||||
switch cmd.Use {
|
||||
case exportCommand:
|
||||
c, fs = utils.AddCommand(cmd, sharePointExportCmd())
|
||||
|
||||
c.Use = c.Use + " " + sharePointServiceCommandUseSuffix
|
||||
|
||||
// Flags addition ordering should follow the order we want them to appear in help and docs:
|
||||
// More generic (ex: --user) and more frequently used flags take precedence.
|
||||
fs.SortFlags = false
|
||||
|
||||
flags.AddBackupIDFlag(c, true)
|
||||
flags.AddSharePointDetailsAndRestoreFlags(c)
|
||||
flags.AddExportConfigFlags(c)
|
||||
flags.AddFailFastFlag(c)
|
||||
flags.AddCorsoPassphaseFlags(c)
|
||||
flags.AddAWSCredsFlags(c)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
const (
|
||||
sharePointServiceCommand = "sharepoint"
|
||||
sharePointServiceCommandUseSuffix = "--backup <backupId> <destination>"
|
||||
|
||||
//nolint:lll
|
||||
sharePointServiceCommandExportExamples = `# Export file with ID 98765abcdef in Bob's latest backup (1234abcd...) to my-exports directory
|
||||
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef my-exports
|
||||
|
||||
# Export files named "ServerRenderTemplate.xsl" in the folder "Display Templates/Style Sheets". as archive to current directory
|
||||
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd \
|
||||
--file "ServerRenderTemplate.xsl" --folder "Display Templates/Style Sheets" --archive .
|
||||
|
||||
# Export all files in the folder "Display Templates/Style Sheets" that were created before 2020 to my-exports directory.
|
||||
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd
|
||||
--file-created-before 2020-01-01T00:00:00 --folder "Display Templates/Style Sheets" my-exports
|
||||
|
||||
# Export all files in the "Documents" library to current directory.
|
||||
corso export sharepoint --backup 1234abcd-12ab-cd34-56de-1234abcd
|
||||
--library Documents --folder "Display Templates/Style Sheets" .`
|
||||
)
|
||||
|
||||
// `corso export sharepoint [<flag>...] <destination>`
|
||||
func sharePointExportCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: sharePointServiceCommand,
|
||||
Short: "Export M365 SharePoint service data",
|
||||
RunE: exportSharePointCmd,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("missing restore destination")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Example: sharePointServiceCommandExportExamples,
|
||||
}
|
||||
}
|
||||
|
||||
// processes an sharepoint service export.
|
||||
func exportSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
if utils.HasNoFlagsAndShownHelp(cmd) {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := utils.MakeSharePointOpts(cmd)
|
||||
|
||||
if flags.RunModeFV == flags.RunModeFlagTest {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := utils.ValidateSharePointRestoreFlags(flags.BackupIDFV, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
|
||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||
|
||||
return runExport(ctx, cmd, args, opts.ExportCfg, sel.Selector, flags.BackupIDFV, "SharePoint")
|
||||
}
|
||||
118
src/cli/export/sharepoint_test.go
Normal file
118
src/cli/export/sharepoint_test.go
Normal file
@ -0,0 +1,118 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
type SharePointUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestSharePointUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &SharePointUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
expectUse := sharePointServiceCommand + " " + sharePointServiceCommandUseSuffix
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
use string
|
||||
expectUse string
|
||||
expectShort string
|
||||
expectRunE func(*cobra.Command, []string) error
|
||||
}{
|
||||
{"export sharepoint", exportCommand, expectUse, sharePointExportCmd().Short, exportSharePointCmd},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
cmd := &cobra.Command{Use: test.use}
|
||||
|
||||
// normally a persistent flag from the root.
|
||||
// required to ensure a dry run.
|
||||
flags.AddRunModeFlag(cmd, true)
|
||||
|
||||
c := addSharePointCommands(cmd)
|
||||
require.NotNil(t, c)
|
||||
|
||||
cmds := cmd.Commands()
|
||||
require.Len(t, cmds, 1)
|
||||
|
||||
child := cmds[0]
|
||||
assert.Equal(t, test.expectUse, child.Use)
|
||||
assert.Equal(t, test.expectShort, child.Short)
|
||||
tester.AreSameFunc(t, test.expectRunE, child.RunE)
|
||||
|
||||
cmd.SetArgs([]string{
|
||||
"sharepoint",
|
||||
testdata.RestoreDestination,
|
||||
"--" + flags.RunModeFN, flags.RunModeFlagTest,
|
||||
"--" + flags.BackupFN, testdata.BackupInput,
|
||||
"--" + flags.LibraryFN, testdata.LibraryInput,
|
||||
"--" + flags.FileFN, testdata.FlgInputs(testdata.FileNameInput),
|
||||
"--" + flags.FolderFN, testdata.FlgInputs(testdata.FolderPathInput),
|
||||
"--" + flags.FileCreatedAfterFN, testdata.FileCreatedAfterInput,
|
||||
"--" + flags.FileCreatedBeforeFN, testdata.FileCreatedBeforeInput,
|
||||
"--" + flags.FileModifiedAfterFN, testdata.FileModifiedAfterInput,
|
||||
"--" + flags.FileModifiedBeforeFN, testdata.FileModifiedBeforeInput,
|
||||
"--" + flags.ListItemFN, testdata.FlgInputs(testdata.ListItemInput),
|
||||
"--" + flags.ListFolderFN, testdata.FlgInputs(testdata.ListFolderInput),
|
||||
"--" + flags.PageFN, testdata.FlgInputs(testdata.PageInput),
|
||||
"--" + flags.PageFolderFN, testdata.FlgInputs(testdata.PageFolderInput),
|
||||
|
||||
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
|
||||
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
|
||||
"--" + flags.AWSSessionTokenFN, testdata.AWSSessionToken,
|
||||
|
||||
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.ArchiveFN,
|
||||
})
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
cmd.SetErr(new(bytes.Buffer)) // drop output
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
opts := utils.MakeSharePointOpts(cmd)
|
||||
assert.Equal(t, testdata.BackupInput, flags.BackupIDFV)
|
||||
|
||||
assert.Equal(t, testdata.LibraryInput, opts.Library)
|
||||
assert.ElementsMatch(t, testdata.FileNameInput, opts.FileName)
|
||||
assert.ElementsMatch(t, testdata.FolderPathInput, opts.FolderPath)
|
||||
assert.Equal(t, testdata.FileCreatedAfterInput, opts.FileCreatedAfter)
|
||||
assert.Equal(t, testdata.FileCreatedBeforeInput, opts.FileCreatedBefore)
|
||||
assert.Equal(t, testdata.FileModifiedAfterInput, opts.FileModifiedAfter)
|
||||
assert.Equal(t, testdata.FileModifiedBeforeInput, opts.FileModifiedBefore)
|
||||
|
||||
assert.ElementsMatch(t, testdata.ListItemInput, opts.ListItem)
|
||||
assert.ElementsMatch(t, testdata.ListFolderInput, opts.ListFolder)
|
||||
|
||||
assert.ElementsMatch(t, testdata.PageInput, opts.Page)
|
||||
assert.ElementsMatch(t, testdata.PageFolderInput, opts.PageFolder)
|
||||
|
||||
assert.Equal(t, testdata.Archive, opts.ExportCfg.Archive)
|
||||
|
||||
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
|
||||
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
|
||||
assert.Equal(t, testdata.AWSSessionToken, flags.AWSSessionTokenFV)
|
||||
|
||||
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
|
||||
})
|
||||
}
|
||||
}
|
||||
15
src/cli/flags/export.go
Normal file
15
src/cli/flags/export.go
Normal file
@ -0,0 +1,15 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const ArchiveFN = "archive"
|
||||
|
||||
var ArchiveFV bool
|
||||
|
||||
// AddExportConfigFlags adds the restore config flag set.
|
||||
func AddExportConfigFlags(cmd *cobra.Command) {
|
||||
fs := cmd.Flags()
|
||||
fs.BoolVar(&ArchiveFV, ArchiveFN, false, "Export data as an archive instead of individual files")
|
||||
}
|
||||
@ -15,6 +15,7 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
rep "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
@ -158,7 +159,13 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to parse m365 account config"))
|
||||
}
|
||||
|
||||
r, err := repository.Initialize(ctx, cfg.Account, cfg.Storage, opt)
|
||||
// TODO(ashmrtn): Wire to flags for retention during repo init.
|
||||
r, err := repository.Initialize(
|
||||
ctx,
|
||||
cfg.Account,
|
||||
cfg.Storage,
|
||||
opt,
|
||||
rep.Retention{})
|
||||
if err != nil {
|
||||
if succeedIfExists && errors.Is(err, repository.ErrorRepoAlreadyExists) {
|
||||
return nil
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
)
|
||||
@ -200,7 +201,12 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
|
||||
ctx = config.SetViper(ctx, vpr)
|
||||
|
||||
// init the repo first
|
||||
_, err = repository.Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
_, err = repository.Initialize(
|
||||
ctx,
|
||||
account.Account{},
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// then test it
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -83,7 +84,12 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
|
||||
)
|
||||
|
||||
// init the repo first
|
||||
suite.repo, err = repository.Initialize(ctx, suite.acct, suite.st, control.Options{})
|
||||
suite.repo, err = repository.Initialize(
|
||||
ctx,
|
||||
suite.acct,
|
||||
suite.st,
|
||||
control.Options{},
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)
|
||||
|
||||
@ -34,7 +34,7 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
expectShort string
|
||||
expectRunE func(*cobra.Command, []string) error
|
||||
}{
|
||||
{"restore onedrive", restoreCommand, expectUse, sharePointRestoreCmd().Short, restoreSharePointCmd},
|
||||
{"restore sharepoint", restoreCommand, expectUse, sharePointRestoreCmd().Short, restoreSharePointCmd},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
|
||||
38
src/cli/utils/export_config.go
Normal file
38
src/cli/utils/export_config.go
Normal file
@ -0,0 +1,38 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
)
|
||||
|
||||
type ExportCfgOpts struct {
|
||||
Archive bool
|
||||
|
||||
Populated flags.PopulatedFlags
|
||||
}
|
||||
|
||||
func makeExportCfgOpts(cmd *cobra.Command) ExportCfgOpts {
|
||||
return ExportCfgOpts{
|
||||
Archive: flags.ArchiveFV,
|
||||
|
||||
// populated contains the list of flags that appear in the
|
||||
// command, according to pflags. Use this to differentiate
|
||||
// between an "empty" and a "missing" value.
|
||||
Populated: flags.GetPopulatedFlags(cmd),
|
||||
}
|
||||
}
|
||||
|
||||
func MakeExportConfig(
|
||||
ctx context.Context,
|
||||
opts ExportCfgOpts,
|
||||
) control.ExportConfig {
|
||||
exportCfg := control.DefaultExportConfig()
|
||||
|
||||
exportCfg.Archive = opts.Archive
|
||||
|
||||
return exportCfg
|
||||
}
|
||||
54
src/cli/utils/export_config_test.go
Normal file
54
src/cli/utils/export_config_test.go
Normal file
@ -0,0 +1,54 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
)
|
||||
|
||||
type ExportCfgUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestExportCfgUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &ExportCfgUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ExportCfgUnitSuite) TestMakeExportConfig() {
|
||||
rco := &ExportCfgOpts{Archive: true}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
populated flags.PopulatedFlags
|
||||
expect control.ExportConfig
|
||||
}{
|
||||
{
|
||||
name: "archive populated",
|
||||
populated: flags.PopulatedFlags{
|
||||
flags.ArchiveFN: {},
|
||||
},
|
||||
expect: control.ExportConfig{
|
||||
Archive: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
opts := *rco
|
||||
opts.Populated = test.populated
|
||||
|
||||
result := MakeExportConfig(ctx, opts)
|
||||
assert.Equal(t, test.expect.Archive, result.Archive)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -19,6 +19,7 @@ type OneDriveOpts struct {
|
||||
FileModifiedBefore string
|
||||
|
||||
RestoreCfg RestoreCfgOpts
|
||||
ExportCfg ExportCfgOpts
|
||||
|
||||
Populated flags.PopulatedFlags
|
||||
}
|
||||
@ -35,6 +36,7 @@ func MakeOneDriveOpts(cmd *cobra.Command) OneDriveOpts {
|
||||
FileModifiedBefore: flags.FileModifiedBeforeFV,
|
||||
|
||||
RestoreCfg: makeRestoreCfgOpts(cmd),
|
||||
ExportCfg: makeExportCfgOpts(cmd),
|
||||
|
||||
// populated contains the list of flags that appear in the
|
||||
// command, according to pflags. Use this to differentiate
|
||||
|
||||
@ -32,6 +32,7 @@ type SharePointOpts struct {
|
||||
Page []string
|
||||
|
||||
RestoreCfg RestoreCfgOpts
|
||||
ExportCfg ExportCfgOpts
|
||||
|
||||
Populated flags.PopulatedFlags
|
||||
}
|
||||
@ -56,6 +57,7 @@ func MakeSharePointOpts(cmd *cobra.Command) SharePointOpts {
|
||||
PageFolder: flags.PageFolderFV,
|
||||
|
||||
RestoreCfg: makeRestoreCfgOpts(cmd),
|
||||
ExportCfg: makeExportCfgOpts(cmd),
|
||||
|
||||
// populated contains the list of flags that appear in the
|
||||
// command, according to pflags. Use this to differentiate
|
||||
|
||||
4
src/cli/utils/testdata/flags.go
vendored
4
src/cli/utils/testdata/flags.go
vendored
@ -51,6 +51,8 @@ var (
|
||||
|
||||
DeltaPageSize = "deltaPageSize"
|
||||
|
||||
Archive = true
|
||||
|
||||
AzureClientID = "testAzureClientId"
|
||||
AzureTenantID = "testAzureTenantId"
|
||||
AzureClientSecret = "testAzureClientSecret"
|
||||
@ -60,4 +62,6 @@ var (
|
||||
AWSSessionToken = "testAWSSessionToken"
|
||||
|
||||
CorsoPassphrase = "testCorsoPassphrase"
|
||||
|
||||
RestoreDestination = "test-restore-destination"
|
||||
)
|
||||
|
||||
@ -7,15 +7,122 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/config"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
|
||||
// deleteBackups connects to the repository and deletes all backups for
|
||||
// service that are at least deletionDays old. Returns the IDs of all backups
|
||||
// that were deleted.
|
||||
func deleteBackups(
|
||||
ctx context.Context,
|
||||
service path.ServiceType,
|
||||
deletionDays int,
|
||||
) ([]string, error) {
|
||||
ctx = clues.Add(ctx, "cutoff_days", deletionDays)
|
||||
|
||||
r, _, _, _, err := utils.GetAccountAndConnect(ctx, service, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "connecting to account").WithClues(ctx)
|
||||
}
|
||||
|
||||
defer r.Close(ctx)
|
||||
|
||||
backups, err := r.BackupsByTag(ctx, store.Service(service))
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "listing backups").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
deleted []string
|
||||
cutoff = time.Now().Add(-time.Hour * 24 * time.Duration(deletionDays))
|
||||
)
|
||||
|
||||
for _, backup := range backups {
|
||||
if backup.StartAndEndTime.CompletedAt.Before(cutoff) {
|
||||
if err := r.DeleteBackup(ctx, backup.ID.String()); err != nil {
|
||||
return nil, clues.Wrap(
|
||||
err,
|
||||
"deleting backup").
|
||||
With("backup_id", backup.ID).
|
||||
WithClues(ctx)
|
||||
}
|
||||
|
||||
deleted = append(deleted, backup.ID.String())
|
||||
logAndPrint(ctx, "Deleted backup %s", backup.ID.String())
|
||||
}
|
||||
}
|
||||
|
||||
return deleted, nil
|
||||
}
|
||||
|
||||
// pitrListBackups connects to the repository at the given point in time and
|
||||
// lists the backups for service. It then checks the list of backups contains
|
||||
// the backups in backupIDs.
|
||||
//
|
||||
//nolint:unused
|
||||
//lint:ignore U1000 Waiting for full support.
|
||||
func pitrListBackups(
|
||||
ctx context.Context,
|
||||
service path.ServiceType,
|
||||
pitr time.Time,
|
||||
backupIDs []string,
|
||||
) error {
|
||||
if len(backupIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "pitr_time", pitr, "search_backups", backupIDs)
|
||||
|
||||
// TODO(ashmrtn): This may be moved into CLI layer at some point when we add
|
||||
// flags for opening a repo at a point in time.
|
||||
cfg, err := config.GetConfigRepoDetails(ctx, true, true, nil)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting config info")
|
||||
}
|
||||
|
||||
opts := utils.ControlWithConfig(cfg)
|
||||
opts.Repo.ViewTimestamp = &pitr
|
||||
|
||||
r, err := repository.Connect(ctx, cfg.Account, cfg.Storage, cfg.RepoID, opts)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "connecting to repo").WithClues(ctx)
|
||||
}
|
||||
|
||||
defer r.Close(ctx)
|
||||
|
||||
backups, err := r.BackupsByTag(ctx, store.Service(service))
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "listing backups").WithClues(ctx)
|
||||
}
|
||||
|
||||
bups := map[string]struct{}{}
|
||||
|
||||
for _, backup := range backups {
|
||||
bups[backup.ID.String()] = struct{}{}
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "found_backups", maps.Keys(bups))
|
||||
|
||||
for _, backupID := range backupIDs {
|
||||
if _, ok := bups[backupID]; !ok {
|
||||
return clues.New("looking for backup").
|
||||
With("search_backup_id", backupID).
|
||||
WithClues(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
service path.ServiceType
|
||||
@ -39,31 +146,16 @@ func main() {
|
||||
fatal(cc.Context(), "unknown service", nil)
|
||||
}
|
||||
|
||||
r, _, _, _, err := utils.GetAccountAndConnect(cc.Context(), service, nil)
|
||||
if err != nil {
|
||||
fatal(cc.Context(), "unable to connect account", err)
|
||||
}
|
||||
|
||||
defer r.Close(cc.Context())
|
||||
|
||||
backups, err := r.BackupsByTag(cc.Context(), store.Service(service))
|
||||
if err != nil {
|
||||
fatal(cc.Context(), "unable to find backups", err)
|
||||
}
|
||||
ctx := clues.Add(cc.Context(), "service", service)
|
||||
|
||||
days, err := strconv.Atoi(os.Getenv("DELETION_DAYS"))
|
||||
if err != nil {
|
||||
fatal(cc.Context(), "invalid no of days provided", nil)
|
||||
fatal(ctx, "invalid number of days provided", nil)
|
||||
}
|
||||
|
||||
for _, backup := range backups {
|
||||
if backup.StartAndEndTime.CompletedAt.Before(time.Now().AddDate(0, 0, -days)) {
|
||||
if err := r.DeleteBackup(cc.Context(), backup.ID.String()); err != nil {
|
||||
fatal(cc.Context(), "deleting backup", err)
|
||||
}
|
||||
|
||||
logAndPrint(cc.Context(), "Deleted backup %s", backup.ID.String())
|
||||
}
|
||||
_, err = deleteBackups(ctx, service, days)
|
||||
if err != nil {
|
||||
fatal(cc.Context(), "deleting backups", clues.Stack(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
6
src/cmd/sanity_test/common/common.go
Normal file
6
src/cmd/sanity_test/common/common.go
Normal file
@ -0,0 +1,6 @@
|
||||
package common
|
||||
|
||||
type PermissionInfo struct {
|
||||
EntityID string
|
||||
Roles []string
|
||||
}
|
||||
82
src/cmd/sanity_test/common/utils.go
Normal file
82
src/cmd/sanity_test/common/utils.go
Normal file
@ -0,0 +1,82 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
func Assert(
|
||||
ctx context.Context,
|
||||
passes func() bool,
|
||||
header string,
|
||||
expect, current any,
|
||||
) {
|
||||
if passes() {
|
||||
return
|
||||
}
|
||||
|
||||
header = "Error: " + header
|
||||
expected := fmt.Sprintf("* Expected: %+v", expect)
|
||||
got := fmt.Sprintf("* Current: %+v", current)
|
||||
|
||||
logger.Ctx(ctx).Info(strings.Join([]string{header, expected, got}, " "))
|
||||
|
||||
fmt.Println(header)
|
||||
fmt.Println(expected)
|
||||
fmt.Println(got)
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Fatal(ctx context.Context, msg string, err error) {
|
||||
logger.CtxErr(ctx, err).Error("test failure: " + msg)
|
||||
fmt.Println(msg+": ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func MustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
|
||||
t, err := dttm.ExtractTime(name)
|
||||
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
|
||||
Fatal(ctx, "extracting time from name: "+name, err)
|
||||
}
|
||||
|
||||
return t, !errors.Is(err, dttm.ErrNoTimeString)
|
||||
}
|
||||
|
||||
func IsWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool {
|
||||
if hasTime {
|
||||
if bound.Before(check) {
|
||||
logger.Ctx(ctx).
|
||||
With("boundary_time", bound, "check_time", check).
|
||||
Info("skipping restore folder: not older than time bound")
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func FilterSlice(sl []string, remove string) []string {
|
||||
r := []string{}
|
||||
|
||||
for _, s := range sl {
|
||||
if !strings.EqualFold(s, remove) {
|
||||
r = append(r, s)
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func LogAndPrint(ctx context.Context, tmpl string, vs ...any) {
|
||||
logger.Ctx(ctx).Infof(tmpl, vs...)
|
||||
fmt.Printf(tmpl+"\n", vs...)
|
||||
}
|
||||
88
src/cmd/sanity_test/export/onedrive.go
Normal file
88
src/cmd/sanity_test/export/onedrive.go
Normal file
@ -0,0 +1,88 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
)
|
||||
|
||||
func CheckOneDriveExport(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
userID, folderName, dataFolder string,
|
||||
) {
|
||||
drive, err := client.
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
// map itemID -> item size
|
||||
var (
|
||||
fileSizes = make(map[string]int64)
|
||||
exportFileSizes = make(map[string]int64)
|
||||
startTime = time.Now()
|
||||
)
|
||||
|
||||
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(folderName, path)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
exportFileSizes[relPath] = info.Size()
|
||||
if startTime.After(info.ModTime()) {
|
||||
startTime = info.ModTime()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("Error walking the path:", err)
|
||||
}
|
||||
|
||||
_ = restore.PopulateDriveDetails(
|
||||
ctx,
|
||||
client,
|
||||
ptr.Val(drive.GetId()),
|
||||
folderName,
|
||||
dataFolder,
|
||||
fileSizes,
|
||||
map[string][]common.PermissionInfo{},
|
||||
startTime)
|
||||
|
||||
for fileName, expected := range fileSizes {
|
||||
common.LogAndPrint(ctx, "checking for file: %s", fileName)
|
||||
|
||||
got := exportFileSizes[fileName]
|
||||
|
||||
common.Assert(
|
||||
ctx,
|
||||
func() bool { return expected == got },
|
||||
fmt.Sprintf("different file size: %s", fileName),
|
||||
expected,
|
||||
got)
|
||||
}
|
||||
|
||||
fmt.Println("Success")
|
||||
}
|
||||
88
src/cmd/sanity_test/export/sharepoint.go
Normal file
88
src/cmd/sanity_test/export/sharepoint.go
Normal file
@ -0,0 +1,88 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
)
|
||||
|
||||
func CheckSharePointExport(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
siteID, folderName, dataFolder string,
|
||||
) {
|
||||
drive, err := client.
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
// map itemID -> item size
|
||||
var (
|
||||
fileSizes = make(map[string]int64)
|
||||
exportFileSizes = make(map[string]int64)
|
||||
startTime = time.Now()
|
||||
)
|
||||
|
||||
err = filepath.Walk(folderName, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(folderName, path)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
exportFileSizes[relPath] = info.Size()
|
||||
if startTime.After(info.ModTime()) {
|
||||
startTime = info.ModTime()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("Error walking the path:", err)
|
||||
}
|
||||
|
||||
_ = restore.PopulateDriveDetails(
|
||||
ctx,
|
||||
client,
|
||||
ptr.Val(drive.GetId()),
|
||||
folderName,
|
||||
dataFolder,
|
||||
fileSizes,
|
||||
map[string][]common.PermissionInfo{},
|
||||
startTime)
|
||||
|
||||
for fileName, expected := range fileSizes {
|
||||
common.LogAndPrint(ctx, "checking for file: %s", fileName)
|
||||
|
||||
got := exportFileSizes[fileName]
|
||||
|
||||
common.Assert(
|
||||
ctx,
|
||||
func() bool { return expected == got },
|
||||
fmt.Sprintf("different file size: %s", fileName),
|
||||
expected,
|
||||
got)
|
||||
}
|
||||
|
||||
fmt.Println("Success")
|
||||
}
|
||||
219
src/cmd/sanity_test/restore/exchange.go
Normal file
219
src/cmd/sanity_test/restore/exchange.go
Normal file
@ -0,0 +1,219 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
)
|
||||
|
||||
// CheckEmailRestoration verifies that the emails count in restored folder is equivalent to
|
||||
// emails in actual m365 account
|
||||
func CheckEmailRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
testUser, folderName, dataFolder, baseBackupFolder string,
|
||||
startTime time.Time,
|
||||
) {
|
||||
var (
|
||||
restoreFolder models.MailFolderable
|
||||
itemCount = make(map[string]int32)
|
||||
restoreItemCount = make(map[string]int32)
|
||||
builder = client.Users().ByUserId(testUser).MailFolders()
|
||||
)
|
||||
|
||||
for {
|
||||
result, err := builder.Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting mail folders", err)
|
||||
}
|
||||
|
||||
values := result.GetValue()
|
||||
|
||||
for _, v := range values {
|
||||
itemName := ptr.Val(v.GetDisplayName())
|
||||
|
||||
if itemName == folderName {
|
||||
restoreFolder = v
|
||||
continue
|
||||
}
|
||||
|
||||
if itemName == dataFolder || itemName == baseBackupFolder {
|
||||
// otherwise, recursively aggregate all child folders.
|
||||
getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount)
|
||||
|
||||
itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
|
||||
}
|
||||
}
|
||||
|
||||
link, ok := ptr.ValOK(result.GetOdataNextLink())
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter())
|
||||
}
|
||||
|
||||
folderID := ptr.Val(restoreFolder.GetId())
|
||||
folderName = ptr.Val(restoreFolder.GetDisplayName())
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"restore_folder_id", folderID,
|
||||
"restore_folder_name", folderName)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting restore folder child folders", err)
|
||||
}
|
||||
|
||||
for _, fld := range childFolder.GetValue() {
|
||||
restoreDisplayName := ptr.Val(fld.GetDisplayName())
|
||||
|
||||
// check if folder is the data folder we loaded or the base backup to verify
|
||||
// the incremental backup worked fine
|
||||
if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
|
||||
count, _ := ptr.ValOK(fld.GetTotalItemCount())
|
||||
|
||||
restoreItemCount[restoreDisplayName] = count
|
||||
checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
|
||||
}
|
||||
}
|
||||
|
||||
verifyEmailData(ctx, restoreItemCount, itemCount)
|
||||
}
|
||||
|
||||
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
|
||||
for fldName, expected := range messageCount {
|
||||
got := restoreMessageCount[fldName]
|
||||
|
||||
common.Assert(
|
||||
ctx,
|
||||
func() bool { return expected == got },
|
||||
fmt.Sprintf("Restore item counts do not match: %s", fldName),
|
||||
expected,
|
||||
got)
|
||||
}
|
||||
}
|
||||
|
||||
// getAllSubFolder will recursively check for all subfolders and get the corresponding
|
||||
// email count.
|
||||
func getAllMailSubFolders(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
testUser string,
|
||||
r models.MailFolderable,
|
||||
parentFolder,
|
||||
dataFolder string,
|
||||
messageCount map[string]int32,
|
||||
) {
|
||||
var (
|
||||
folderID = ptr.Val(r.GetId())
|
||||
count int32 = 99
|
||||
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Top: &count,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
ctx = clues.Add(ctx, "parent_folder_id", folderID)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting mail subfolders", err)
|
||||
}
|
||||
|
||||
for _, child := range childFolder.GetValue() {
|
||||
var (
|
||||
childDisplayName = ptr.Val(child.GetDisplayName())
|
||||
childFolderCount = ptr.Val(child.GetChildFolderCount())
|
||||
//nolint:forbidigo
|
||||
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
|
||||
)
|
||||
|
||||
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
|
||||
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount())
|
||||
// recursively check for subfolders
|
||||
if childFolderCount > 0 {
|
||||
parentFolder := fullFolderName
|
||||
|
||||
getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkAllSubFolder will recursively traverse inside the restore folder and
|
||||
// verify that data matched in all subfolders
|
||||
func checkAllSubFolder(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
r models.MailFolderable,
|
||||
testUser,
|
||||
parentFolder,
|
||||
dataFolder string,
|
||||
restoreMessageCount map[string]int32,
|
||||
) {
|
||||
var (
|
||||
folderID = ptr.Val(r.GetId())
|
||||
count int32 = 99
|
||||
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Top: &count,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting mail subfolders", err)
|
||||
}
|
||||
|
||||
for _, child := range childFolder.GetValue() {
|
||||
var (
|
||||
childDisplayName = ptr.Val(child.GetDisplayName())
|
||||
//nolint:forbidigo
|
||||
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
|
||||
)
|
||||
|
||||
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
|
||||
childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
|
||||
restoreMessageCount[fullFolderName] = childTotalCount
|
||||
}
|
||||
|
||||
childFolderCount := ptr.Val(child.GetChildFolderCount())
|
||||
|
||||
if childFolderCount > 0 {
|
||||
parentFolder := fullFolderName
|
||||
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
369
src/cmd/sanity_test/restore/onedrive.go
Normal file
369
src/cmd/sanity_test/restore/onedrive.go
Normal file
@ -0,0 +1,369 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
const (
|
||||
owner = "owner"
|
||||
)
|
||||
|
||||
func CheckOneDriveRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
userID, folderName, dataFolder string,
|
||||
startTime time.Time,
|
||||
) {
|
||||
drive, err := client.
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
checkDriveRestoration(
|
||||
ctx,
|
||||
client,
|
||||
path.OneDriveService,
|
||||
folderName,
|
||||
ptr.Val(drive.GetId()),
|
||||
ptr.Val(drive.GetName()),
|
||||
dataFolder,
|
||||
startTime,
|
||||
false)
|
||||
}
|
||||
|
||||
func checkDriveRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
service path.ServiceType,
|
||||
folderName,
|
||||
driveID,
|
||||
driveName,
|
||||
dataFolder string,
|
||||
startTime time.Time,
|
||||
skipPermissionTest bool,
|
||||
) {
|
||||
var (
|
||||
// map itemID -> item size
|
||||
fileSizes = make(map[string]int64)
|
||||
// map itemID -> permission id -> []permission roles
|
||||
folderPermissions = make(map[string][]common.PermissionInfo)
|
||||
restoreFile = make(map[string]int64)
|
||||
restoredFolderPermissions = make(map[string][]common.PermissionInfo)
|
||||
)
|
||||
|
||||
ctx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
||||
|
||||
restoreFolderID := PopulateDriveDetails(
|
||||
ctx,
|
||||
client,
|
||||
driveID,
|
||||
folderName,
|
||||
dataFolder,
|
||||
fileSizes,
|
||||
folderPermissions,
|
||||
startTime)
|
||||
|
||||
getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime)
|
||||
|
||||
checkRestoredDriveItemPermissions(
|
||||
ctx,
|
||||
service,
|
||||
skipPermissionTest,
|
||||
folderPermissions,
|
||||
restoredFolderPermissions)
|
||||
|
||||
for fileName, expected := range fileSizes {
|
||||
common.LogAndPrint(ctx, "checking for file: %s", fileName)
|
||||
|
||||
got := restoreFile[fileName]
|
||||
|
||||
common.Assert(
|
||||
ctx,
|
||||
func() bool { return expected == got },
|
||||
fmt.Sprintf("different file size: %s", fileName),
|
||||
expected,
|
||||
got)
|
||||
}
|
||||
|
||||
fmt.Println("Success")
|
||||
}
|
||||
|
||||
func PopulateDriveDetails(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
driveID, folderName, dataFolder string,
|
||||
fileSizes map[string]int64,
|
||||
folderPermissions map[string][]common.PermissionInfo,
|
||||
startTime time.Time,
|
||||
) string {
|
||||
var restoreFolderID string
|
||||
|
||||
response, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId("root").
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting drive by id", err)
|
||||
}
|
||||
|
||||
for _, driveItem := range response.GetValue() {
|
||||
var (
|
||||
itemID = ptr.Val(driveItem.GetId())
|
||||
itemName = ptr.Val(driveItem.GetName())
|
||||
)
|
||||
|
||||
if itemName == folderName {
|
||||
restoreFolderID = itemID
|
||||
continue
|
||||
}
|
||||
|
||||
if itemName != dataFolder {
|
||||
common.LogAndPrint(ctx, "test data for folder: %s", dataFolder)
|
||||
continue
|
||||
}
|
||||
|
||||
// if it's a file check the size
|
||||
if driveItem.GetFile() != nil {
|
||||
fileSizes[itemName] = ptr.Val(driveItem.GetSize())
|
||||
}
|
||||
|
||||
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// currently we don't restore blank folders.
|
||||
// skip permission check for empty folders
|
||||
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
|
||||
common.LogAndPrint(ctx, "skipped empty folder: %s", itemName)
|
||||
continue
|
||||
}
|
||||
|
||||
folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime)
|
||||
}
|
||||
|
||||
return restoreFolderID
|
||||
}
|
||||
|
||||
func checkRestoredDriveItemPermissions(
|
||||
ctx context.Context,
|
||||
service path.ServiceType,
|
||||
skip bool,
|
||||
folderPermissions map[string][]common.PermissionInfo,
|
||||
restoredFolderPermissions map[string][]common.PermissionInfo,
|
||||
) {
|
||||
if skip {
|
||||
return
|
||||
}
|
||||
|
||||
/**
|
||||
TODO: replace this check with testElementsMatch
|
||||
from internal/connecter/graph_connector_helper_test.go
|
||||
**/
|
||||
|
||||
for folderName, permissions := range folderPermissions {
|
||||
common.LogAndPrint(ctx, "checking for folder: %s", folderName)
|
||||
|
||||
restoreFolderPerm := restoredFolderPermissions[folderName]
|
||||
|
||||
if len(permissions) < 1 {
|
||||
common.LogAndPrint(ctx, "no permissions found in: %s", folderName)
|
||||
continue
|
||||
}
|
||||
|
||||
permCheck := func() bool { return len(permissions) == len(restoreFolderPerm) }
|
||||
|
||||
if service == path.SharePointService {
|
||||
permCheck = func() bool { return len(permissions) <= len(restoreFolderPerm) }
|
||||
}
|
||||
|
||||
common.Assert(
|
||||
ctx,
|
||||
permCheck,
|
||||
fmt.Sprintf("wrong number of restored permissions: %s", folderName),
|
||||
permissions,
|
||||
restoreFolderPerm)
|
||||
|
||||
for _, perm := range permissions {
|
||||
eqID := func(pi common.PermissionInfo) bool { return strings.EqualFold(pi.EntityID, perm.EntityID) }
|
||||
i := slices.IndexFunc(restoreFolderPerm, eqID)
|
||||
|
||||
common.Assert(
|
||||
ctx,
|
||||
func() bool { return i >= 0 },
|
||||
fmt.Sprintf("permission was restored in: %s", folderName),
|
||||
perm.EntityID,
|
||||
restoreFolderPerm)
|
||||
|
||||
// permissions should be sorted, so a by-index comparison works
|
||||
restored := restoreFolderPerm[i]
|
||||
|
||||
common.Assert(
|
||||
ctx,
|
||||
func() bool { return slices.Equal(perm.Roles, restored.Roles) },
|
||||
fmt.Sprintf("different roles restored: %s", folderName),
|
||||
perm.Roles,
|
||||
restored.Roles)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getOneDriveChildFolder(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
driveID, itemID, parentName string,
|
||||
fileSizes map[string]int64,
|
||||
folderPermission map[string][]common.PermissionInfo,
|
||||
startTime time.Time,
|
||||
) {
|
||||
response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting child folder", err)
|
||||
}
|
||||
|
||||
for _, driveItem := range response.GetValue() {
|
||||
var (
|
||||
itemID = ptr.Val(driveItem.GetId())
|
||||
itemName = ptr.Val(driveItem.GetName())
|
||||
fullName = parentName + "/" + itemName
|
||||
)
|
||||
|
||||
folderTime, hasTime := common.MustGetTimeFromName(ctx, itemName)
|
||||
if !common.IsWithinTimeBound(ctx, startTime, folderTime, hasTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
// if it's a file check the size
|
||||
if driveItem.GetFile() != nil {
|
||||
fileSizes[fullName] = ptr.Val(driveItem.GetSize())
|
||||
}
|
||||
|
||||
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// currently we don't restore blank folders.
|
||||
// skip permission check for empty folders
|
||||
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
|
||||
common.LogAndPrint(ctx, "skipped empty folder: %s", fullName)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime)
|
||||
}
|
||||
}
|
||||
|
||||
func getRestoredDrive(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
driveID, restoreFolderID string,
|
||||
restoreFile map[string]int64,
|
||||
restoreFolder map[string][]common.PermissionInfo,
|
||||
startTime time.Time,
|
||||
) {
|
||||
restored, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(restoreFolderID).
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting child folder", err)
|
||||
}
|
||||
|
||||
for _, item := range restored.GetValue() {
|
||||
var (
|
||||
itemID = ptr.Val(item.GetId())
|
||||
itemName = ptr.Val(item.GetName())
|
||||
itemSize = ptr.Val(item.GetSize())
|
||||
)
|
||||
|
||||
if item.GetFile() != nil {
|
||||
restoreFile[itemName] = itemSize
|
||||
continue
|
||||
}
|
||||
|
||||
if item.GetFolder() == nil && item.GetPackage() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// permission helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func permissionIn(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
driveID, itemID string,
|
||||
) []common.PermissionInfo {
|
||||
pi := []common.PermissionInfo{}
|
||||
|
||||
pcr, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Permissions().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting permission", err)
|
||||
}
|
||||
|
||||
for _, perm := range pcr.GetValue() {
|
||||
if perm.GetGrantedToV2() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
gv2 = perm.GetGrantedToV2()
|
||||
permInfo = common.PermissionInfo{}
|
||||
entityID string
|
||||
)
|
||||
|
||||
// TODO: replace with filterUserPermissions in onedrive item.go
|
||||
if gv2.GetUser() != nil {
|
||||
entityID = ptr.Val(gv2.GetUser().GetId())
|
||||
} else if gv2.GetGroup() != nil {
|
||||
entityID = ptr.Val(gv2.GetGroup().GetId())
|
||||
}
|
||||
|
||||
roles := common.FilterSlice(perm.GetRoles(), owner)
|
||||
for _, role := range roles {
|
||||
permInfo.EntityID = entityID
|
||||
permInfo.Roles = append(permInfo.Roles, role)
|
||||
}
|
||||
|
||||
if len(roles) > 0 {
|
||||
slices.Sort(permInfo.Roles)
|
||||
pi = append(pi, permInfo)
|
||||
}
|
||||
}
|
||||
|
||||
return pi
|
||||
}
|
||||
39
src/cmd/sanity_test/restore/sharepoint.go
Normal file
39
src/cmd/sanity_test/restore/sharepoint.go
Normal file
@ -0,0 +1,39 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
func CheckSharePointRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
siteID, userID, folderName, dataFolder string,
|
||||
startTime time.Time,
|
||||
) {
|
||||
drive, err := client.
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
common.Fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
checkDriveRestoration(
|
||||
ctx,
|
||||
client,
|
||||
path.SharePointService,
|
||||
folderName,
|
||||
ptr.Val(drive.GetId()),
|
||||
ptr.Val(drive.GetName()),
|
||||
dataFolder,
|
||||
startTime,
|
||||
true)
|
||||
}
|
||||
@ -2,45 +2,21 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/common"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/export"
|
||||
"github.com/alcionai/corso/src/cmd/sanity_test/restore"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// types, consts, etc
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type permissionInfo struct {
|
||||
entityID string
|
||||
roles []string
|
||||
}
|
||||
|
||||
const (
|
||||
owner = "owner"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// main
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func main() {
|
||||
ls := logger.Settings{
|
||||
File: logger.GetLogFile(""),
|
||||
@ -60,16 +36,16 @@ func main() {
|
||||
os.Getenv("AZURE_CLIENT_ID"),
|
||||
os.Getenv("AZURE_CLIENT_SECRET"))
|
||||
if err != nil {
|
||||
fatal(ctx, "creating adapter", err)
|
||||
common.Fatal(ctx, "creating adapter", err)
|
||||
}
|
||||
|
||||
var (
|
||||
client = msgraphsdk.NewGraphServiceClient(adapter)
|
||||
testUser = tconfig.GetM365UserID(ctx)
|
||||
testSite = tconfig.GetM365SiteID(ctx)
|
||||
testService = os.Getenv("SANITY_RESTORE_SERVICE")
|
||||
folder = strings.TrimSpace(os.Getenv("SANITY_RESTORE_FOLDER"))
|
||||
startTime, _ = mustGetTimeFromName(ctx, folder)
|
||||
testKind = os.Getenv("SANITY_TEST_KIND") // restore or export (cli arg?)
|
||||
testService = os.Getenv("SANITY_TEST_SERVICE")
|
||||
folder = strings.TrimSpace(os.Getenv("SANITY_TEST_FOLDER"))
|
||||
dataFolder = os.Getenv("TEST_DATA")
|
||||
baseBackupFolder = os.Getenv("BASE_BACKUP")
|
||||
)
|
||||
@ -78,664 +54,35 @@ func main() {
|
||||
ctx,
|
||||
"resource_owner", testUser,
|
||||
"service", testService,
|
||||
"sanity_restore_folder", folder,
|
||||
"start_time", startTime.Format(time.RFC3339Nano))
|
||||
"sanity_restore_folder", folder)
|
||||
|
||||
logger.Ctx(ctx).Info("starting sanity test check")
|
||||
|
||||
switch testService {
|
||||
case "exchange":
|
||||
checkEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime)
|
||||
case "onedrive":
|
||||
checkOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
|
||||
case "sharepoint":
|
||||
checkSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime)
|
||||
switch testKind {
|
||||
case "restore":
|
||||
startTime, _ := common.MustGetTimeFromName(ctx, folder)
|
||||
clues.Add(ctx, "sanity_restore_start_time", startTime.Format(time.RFC3339))
|
||||
|
||||
switch testService {
|
||||
case "exchange":
|
||||
restore.CheckEmailRestoration(ctx, client, testUser, folder, dataFolder, baseBackupFolder, startTime)
|
||||
case "onedrive":
|
||||
restore.CheckOneDriveRestoration(ctx, client, testUser, folder, dataFolder, startTime)
|
||||
case "sharepoint":
|
||||
restore.CheckSharePointRestoration(ctx, client, testSite, testUser, folder, dataFolder, startTime)
|
||||
default:
|
||||
common.Fatal(ctx, "unknown service for restore sanity tests", nil)
|
||||
}
|
||||
case "export":
|
||||
switch testService {
|
||||
case "onedrive":
|
||||
export.CheckOneDriveExport(ctx, client, testUser, folder, dataFolder)
|
||||
case "sharepoint":
|
||||
export.CheckSharePointExport(ctx, client, testSite, folder, dataFolder)
|
||||
default:
|
||||
common.Fatal(ctx, "unknown service for export sanity tests", nil)
|
||||
}
|
||||
default:
|
||||
fatal(ctx, "no service specified", nil)
|
||||
common.Fatal(ctx, "unknown test kind (expected restore or export)", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// exchange
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// checkEmailRestoration verifies that the emails count in restored folder is equivalent to
|
||||
// emails in actual m365 account
|
||||
func checkEmailRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
testUser, folderName, dataFolder, baseBackupFolder string,
|
||||
startTime time.Time,
|
||||
) {
|
||||
var (
|
||||
restoreFolder models.MailFolderable
|
||||
itemCount = make(map[string]int32)
|
||||
restoreItemCount = make(map[string]int32)
|
||||
builder = client.Users().ByUserId(testUser).MailFolders()
|
||||
)
|
||||
|
||||
for {
|
||||
result, err := builder.Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting mail folders", err)
|
||||
}
|
||||
|
||||
values := result.GetValue()
|
||||
|
||||
for _, v := range values {
|
||||
itemName := ptr.Val(v.GetDisplayName())
|
||||
|
||||
if itemName == folderName {
|
||||
restoreFolder = v
|
||||
continue
|
||||
}
|
||||
|
||||
if itemName == dataFolder || itemName == baseBackupFolder {
|
||||
// otherwise, recursively aggregate all child folders.
|
||||
getAllMailSubFolders(ctx, client, testUser, v, itemName, dataFolder, itemCount)
|
||||
|
||||
itemCount[itemName] = ptr.Val(v.GetTotalItemCount())
|
||||
}
|
||||
}
|
||||
|
||||
link, ok := ptr.ValOK(result.GetOdataNextLink())
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
builder = users.NewItemMailFoldersRequestBuilder(link, client.GetAdapter())
|
||||
}
|
||||
|
||||
folderID := ptr.Val(restoreFolder.GetId())
|
||||
folderName = ptr.Val(restoreFolder.GetDisplayName())
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"restore_folder_id", folderID,
|
||||
"restore_folder_name", folderName)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting restore folder child folders", err)
|
||||
}
|
||||
|
||||
for _, fld := range childFolder.GetValue() {
|
||||
restoreDisplayName := ptr.Val(fld.GetDisplayName())
|
||||
|
||||
// check if folder is the data folder we loaded or the base backup to verify
|
||||
// the incremental backup worked fine
|
||||
if strings.EqualFold(restoreDisplayName, dataFolder) || strings.EqualFold(restoreDisplayName, baseBackupFolder) {
|
||||
count, _ := ptr.ValOK(fld.GetTotalItemCount())
|
||||
|
||||
restoreItemCount[restoreDisplayName] = count
|
||||
checkAllSubFolder(ctx, client, fld, testUser, restoreDisplayName, dataFolder, restoreItemCount)
|
||||
}
|
||||
}
|
||||
|
||||
verifyEmailData(ctx, restoreItemCount, itemCount)
|
||||
}
|
||||
|
||||
func verifyEmailData(ctx context.Context, restoreMessageCount, messageCount map[string]int32) {
|
||||
for fldName, expected := range messageCount {
|
||||
got := restoreMessageCount[fldName]
|
||||
|
||||
assert(
|
||||
ctx,
|
||||
func() bool { return expected == got },
|
||||
fmt.Sprintf("Restore item counts do not match: %s", fldName),
|
||||
expected,
|
||||
got)
|
||||
}
|
||||
}
|
||||
|
||||
// getAllSubFolder will recursively check for all subfolders and get the corresponding
|
||||
// email count.
|
||||
func getAllMailSubFolders(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
testUser string,
|
||||
r models.MailFolderable,
|
||||
parentFolder,
|
||||
dataFolder string,
|
||||
messageCount map[string]int32,
|
||||
) {
|
||||
var (
|
||||
folderID = ptr.Val(r.GetId())
|
||||
count int32 = 99
|
||||
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Top: &count,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
ctx = clues.Add(ctx, "parent_folder_id", folderID)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting mail subfolders", err)
|
||||
}
|
||||
|
||||
for _, child := range childFolder.GetValue() {
|
||||
var (
|
||||
childDisplayName = ptr.Val(child.GetDisplayName())
|
||||
childFolderCount = ptr.Val(child.GetChildFolderCount())
|
||||
//nolint:forbidigo
|
||||
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
|
||||
)
|
||||
|
||||
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
|
||||
messageCount[fullFolderName] = ptr.Val(child.GetTotalItemCount())
|
||||
// recursively check for subfolders
|
||||
if childFolderCount > 0 {
|
||||
parentFolder := fullFolderName
|
||||
|
||||
getAllMailSubFolders(ctx, client, testUser, child, parentFolder, dataFolder, messageCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkAllSubFolder will recursively traverse inside the restore folder and
|
||||
// verify that data matched in all subfolders
|
||||
func checkAllSubFolder(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
r models.MailFolderable,
|
||||
testUser,
|
||||
parentFolder,
|
||||
dataFolder string,
|
||||
restoreMessageCount map[string]int32,
|
||||
) {
|
||||
var (
|
||||
folderID = ptr.Val(r.GetId())
|
||||
count int32 = 99
|
||||
options = &users.ItemMailFoldersItemChildFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemChildFoldersRequestBuilderGetQueryParameters{
|
||||
Top: &count,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
childFolder, err := client.
|
||||
Users().
|
||||
ByUserId(testUser).
|
||||
MailFolders().
|
||||
ByMailFolderId(folderID).
|
||||
ChildFolders().
|
||||
Get(ctx, options)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting mail subfolders", err)
|
||||
}
|
||||
|
||||
for _, child := range childFolder.GetValue() {
|
||||
var (
|
||||
childDisplayName = ptr.Val(child.GetDisplayName())
|
||||
//nolint:forbidigo
|
||||
fullFolderName = stdpath.Join(parentFolder, childDisplayName)
|
||||
)
|
||||
|
||||
if filters.PathContains([]string{dataFolder}).Compare(fullFolderName) {
|
||||
childTotalCount, _ := ptr.ValOK(child.GetTotalItemCount())
|
||||
restoreMessageCount[fullFolderName] = childTotalCount
|
||||
}
|
||||
|
||||
childFolderCount := ptr.Val(child.GetChildFolderCount())
|
||||
|
||||
if childFolderCount > 0 {
|
||||
parentFolder := fullFolderName
|
||||
checkAllSubFolder(ctx, client, child, testUser, parentFolder, dataFolder, restoreMessageCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// oneDrive
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func checkOneDriveRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
userID, folderName, dataFolder string,
|
||||
startTime time.Time,
|
||||
) {
|
||||
drive, err := client.
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
checkDriveRestoration(
|
||||
ctx,
|
||||
client,
|
||||
path.OneDriveService,
|
||||
folderName,
|
||||
ptr.Val(drive.GetId()),
|
||||
ptr.Val(drive.GetName()),
|
||||
dataFolder,
|
||||
startTime,
|
||||
false)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// sharePoint
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func checkSharePointRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
siteID, userID, folderName, dataFolder string,
|
||||
startTime time.Time,
|
||||
) {
|
||||
drive, err := client.
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting the drive:", err)
|
||||
}
|
||||
|
||||
checkDriveRestoration(
|
||||
ctx,
|
||||
client,
|
||||
path.SharePointService,
|
||||
folderName,
|
||||
ptr.Val(drive.GetId()),
|
||||
ptr.Val(drive.GetName()),
|
||||
dataFolder,
|
||||
startTime,
|
||||
true)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// shared drive tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func checkDriveRestoration(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
service path.ServiceType,
|
||||
folderName,
|
||||
driveID,
|
||||
driveName,
|
||||
dataFolder string,
|
||||
startTime time.Time,
|
||||
skipPermissionTest bool,
|
||||
) {
|
||||
var (
|
||||
// map itemID -> item size
|
||||
fileSizes = make(map[string]int64)
|
||||
// map itemID -> permission id -> []permission roles
|
||||
folderPermissions = make(map[string][]permissionInfo)
|
||||
restoreFile = make(map[string]int64)
|
||||
restoredFolderPermissions = make(map[string][]permissionInfo)
|
||||
)
|
||||
|
||||
var restoreFolderID string
|
||||
|
||||
ctx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
||||
|
||||
response, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId("root").
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting drive by id", err)
|
||||
}
|
||||
|
||||
for _, driveItem := range response.GetValue() {
|
||||
var (
|
||||
itemID = ptr.Val(driveItem.GetId())
|
||||
itemName = ptr.Val(driveItem.GetName())
|
||||
)
|
||||
|
||||
if itemName == folderName {
|
||||
restoreFolderID = itemID
|
||||
continue
|
||||
}
|
||||
|
||||
if itemName != dataFolder {
|
||||
logAndPrint(ctx, "test data for folder: %s", dataFolder)
|
||||
continue
|
||||
}
|
||||
|
||||
// if it's a file check the size
|
||||
if driveItem.GetFile() != nil {
|
||||
fileSizes[itemName] = ptr.Val(driveItem.GetSize())
|
||||
}
|
||||
|
||||
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// currently we don't restore blank folders.
|
||||
// skip permission check for empty folders
|
||||
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
|
||||
logAndPrint(ctx, "skipped empty folder: %s", itemName)
|
||||
continue
|
||||
}
|
||||
|
||||
folderPermissions[itemName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, fileSizes, folderPermissions, startTime)
|
||||
}
|
||||
|
||||
getRestoredDrive(ctx, client, driveID, restoreFolderID, restoreFile, restoredFolderPermissions, startTime)
|
||||
|
||||
checkRestoredDriveItemPermissions(
|
||||
ctx,
|
||||
service,
|
||||
skipPermissionTest,
|
||||
folderPermissions,
|
||||
restoredFolderPermissions)
|
||||
|
||||
for fileName, expected := range fileSizes {
|
||||
logAndPrint(ctx, "checking for file: %s", fileName)
|
||||
|
||||
got := restoreFile[fileName]
|
||||
|
||||
assert(
|
||||
ctx,
|
||||
func() bool { return expected == got },
|
||||
fmt.Sprintf("different file size: %s", fileName),
|
||||
expected,
|
||||
got)
|
||||
}
|
||||
|
||||
fmt.Println("Success")
|
||||
}
|
||||
|
||||
func checkRestoredDriveItemPermissions(
|
||||
ctx context.Context,
|
||||
service path.ServiceType,
|
||||
skip bool,
|
||||
folderPermissions map[string][]permissionInfo,
|
||||
restoredFolderPermissions map[string][]permissionInfo,
|
||||
) {
|
||||
if skip {
|
||||
return
|
||||
}
|
||||
|
||||
/**
|
||||
TODO: replace this check with testElementsMatch
|
||||
from internal/connecter/graph_connector_helper_test.go
|
||||
**/
|
||||
|
||||
for folderName, permissions := range folderPermissions {
|
||||
logAndPrint(ctx, "checking for folder: %s", folderName)
|
||||
|
||||
restoreFolderPerm := restoredFolderPermissions[folderName]
|
||||
|
||||
if len(permissions) < 1 {
|
||||
logAndPrint(ctx, "no permissions found in: %s", folderName)
|
||||
continue
|
||||
}
|
||||
|
||||
permCheck := func() bool { return len(permissions) == len(restoreFolderPerm) }
|
||||
|
||||
if service == path.SharePointService {
|
||||
permCheck = func() bool { return len(permissions) <= len(restoreFolderPerm) }
|
||||
}
|
||||
|
||||
assert(
|
||||
ctx,
|
||||
permCheck,
|
||||
fmt.Sprintf("wrong number of restored permissions: %s", folderName),
|
||||
permissions,
|
||||
restoreFolderPerm)
|
||||
|
||||
for _, perm := range permissions {
|
||||
eqID := func(pi permissionInfo) bool { return strings.EqualFold(pi.entityID, perm.entityID) }
|
||||
i := slices.IndexFunc(restoreFolderPerm, eqID)
|
||||
|
||||
assert(
|
||||
ctx,
|
||||
func() bool { return i >= 0 },
|
||||
fmt.Sprintf("permission was restored in: %s", folderName),
|
||||
perm.entityID,
|
||||
restoreFolderPerm)
|
||||
|
||||
// permissions should be sorted, so a by-index comparison works
|
||||
restored := restoreFolderPerm[i]
|
||||
|
||||
assert(
|
||||
ctx,
|
||||
func() bool { return slices.Equal(perm.roles, restored.roles) },
|
||||
fmt.Sprintf("different roles restored: %s", folderName),
|
||||
perm.roles,
|
||||
restored.roles)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getOneDriveChildFolder(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
driveID, itemID, parentName string,
|
||||
fileSizes map[string]int64,
|
||||
folderPermission map[string][]permissionInfo,
|
||||
startTime time.Time,
|
||||
) {
|
||||
response, err := client.Drives().ByDriveId(driveID).Items().ByDriveItemId(itemID).Children().Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting child folder", err)
|
||||
}
|
||||
|
||||
for _, driveItem := range response.GetValue() {
|
||||
var (
|
||||
itemID = ptr.Val(driveItem.GetId())
|
||||
itemName = ptr.Val(driveItem.GetName())
|
||||
fullName = parentName + "/" + itemName
|
||||
)
|
||||
|
||||
folderTime, hasTime := mustGetTimeFromName(ctx, itemName)
|
||||
if !isWithinTimeBound(ctx, startTime, folderTime, hasTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
// if it's a file check the size
|
||||
if driveItem.GetFile() != nil {
|
||||
fileSizes[fullName] = ptr.Val(driveItem.GetSize())
|
||||
}
|
||||
|
||||
if driveItem.GetFolder() == nil && driveItem.GetPackage() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// currently we don't restore blank folders.
|
||||
// skip permission check for empty folders
|
||||
if ptr.Val(driveItem.GetFolder().GetChildCount()) == 0 {
|
||||
logAndPrint(ctx, "skipped empty folder: %s", fullName)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
folderPermission[fullName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, fullName, fileSizes, folderPermission, startTime)
|
||||
}
|
||||
}
|
||||
|
||||
func getRestoredDrive(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
driveID, restoreFolderID string,
|
||||
restoreFile map[string]int64,
|
||||
restoreFolder map[string][]permissionInfo,
|
||||
startTime time.Time,
|
||||
) {
|
||||
restored, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(restoreFolderID).
|
||||
Children().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting child folder", err)
|
||||
}
|
||||
|
||||
for _, item := range restored.GetValue() {
|
||||
var (
|
||||
itemID = ptr.Val(item.GetId())
|
||||
itemName = ptr.Val(item.GetName())
|
||||
itemSize = ptr.Val(item.GetSize())
|
||||
)
|
||||
|
||||
if item.GetFile() != nil {
|
||||
restoreFile[itemName] = itemSize
|
||||
continue
|
||||
}
|
||||
|
||||
if item.GetFolder() == nil && item.GetPackage() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
restoreFolder[itemName] = permissionIn(ctx, client, driveID, itemID)
|
||||
getOneDriveChildFolder(ctx, client, driveID, itemID, itemName, restoreFile, restoreFolder, startTime)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// permission helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func permissionIn(
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
driveID, itemID string,
|
||||
) []permissionInfo {
|
||||
pi := []permissionInfo{}
|
||||
|
||||
pcr, err := client.
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Permissions().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
fatal(ctx, "getting permission", err)
|
||||
}
|
||||
|
||||
for _, perm := range pcr.GetValue() {
|
||||
if perm.GetGrantedToV2() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
gv2 = perm.GetGrantedToV2()
|
||||
permInfo = permissionInfo{}
|
||||
entityID string
|
||||
)
|
||||
|
||||
// TODO: replace with filterUserPermissions in onedrive item.go
|
||||
if gv2.GetUser() != nil {
|
||||
entityID = ptr.Val(gv2.GetUser().GetId())
|
||||
} else if gv2.GetGroup() != nil {
|
||||
entityID = ptr.Val(gv2.GetGroup().GetId())
|
||||
}
|
||||
|
||||
roles := filterSlice(perm.GetRoles(), owner)
|
||||
for _, role := range roles {
|
||||
permInfo.entityID = entityID
|
||||
permInfo.roles = append(permInfo.roles, role)
|
||||
}
|
||||
|
||||
if len(roles) > 0 {
|
||||
slices.Sort(permInfo.roles)
|
||||
pi = append(pi, permInfo)
|
||||
}
|
||||
}
|
||||
|
||||
return pi
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func fatal(ctx context.Context, msg string, err error) {
|
||||
logger.CtxErr(ctx, err).Error("test failure: " + msg)
|
||||
fmt.Println(msg+": ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func mustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
|
||||
t, err := dttm.ExtractTime(name)
|
||||
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
|
||||
fatal(ctx, "extracting time from name: "+name, err)
|
||||
}
|
||||
|
||||
return t, !errors.Is(err, dttm.ErrNoTimeString)
|
||||
}
|
||||
|
||||
func isWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool {
|
||||
if hasTime {
|
||||
if bound.Before(check) {
|
||||
logger.Ctx(ctx).
|
||||
With("boundary_time", bound, "check_time", check).
|
||||
Info("skipping restore folder: not older than time bound")
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func filterSlice(sl []string, remove string) []string {
|
||||
r := []string{}
|
||||
|
||||
for _, s := range sl {
|
||||
if !strings.EqualFold(s, remove) {
|
||||
r = append(r, s)
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func assert(
|
||||
ctx context.Context,
|
||||
passes func() bool,
|
||||
header string,
|
||||
expect, current any,
|
||||
) {
|
||||
if passes() {
|
||||
return
|
||||
}
|
||||
|
||||
header = "Error: " + header
|
||||
expected := fmt.Sprintf("* Expected: %+v", expect)
|
||||
got := fmt.Sprintf("* Current: %+v", current)
|
||||
|
||||
logger.Ctx(ctx).Info(strings.Join([]string{header, expected, got}, " "))
|
||||
|
||||
fmt.Println(header)
|
||||
fmt.Println(expected)
|
||||
fmt.Println(got)
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func logAndPrint(ctx context.Context, tmpl string, vs ...any) {
|
||||
logger.Ctx(ctx).Infof(tmpl, vs...)
|
||||
fmt.Printf(tmpl+"\n", vs...)
|
||||
}
|
||||
|
||||
82
src/cmd/sanity_test/utils/utils.go
Normal file
82
src/cmd/sanity_test/utils/utils.go
Normal file
@ -0,0 +1,82 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
func Assert(
|
||||
ctx context.Context,
|
||||
passes func() bool,
|
||||
header string,
|
||||
expect, current any,
|
||||
) {
|
||||
if passes() {
|
||||
return
|
||||
}
|
||||
|
||||
header = "Error: " + header
|
||||
expected := fmt.Sprintf("* Expected: %+v", expect)
|
||||
got := fmt.Sprintf("* Current: %+v", current)
|
||||
|
||||
logger.Ctx(ctx).Info(strings.Join([]string{header, expected, got}, " "))
|
||||
|
||||
fmt.Println(header)
|
||||
fmt.Println(expected)
|
||||
fmt.Println(got)
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Fatal(ctx context.Context, msg string, err error) {
|
||||
logger.CtxErr(ctx, err).Error("test failure: " + msg)
|
||||
fmt.Println(msg+": ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func MustGetTimeFromName(ctx context.Context, name string) (time.Time, bool) {
|
||||
t, err := dttm.ExtractTime(name)
|
||||
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
|
||||
Fatal(ctx, "extracting time from name: "+name, err)
|
||||
}
|
||||
|
||||
return t, !errors.Is(err, dttm.ErrNoTimeString)
|
||||
}
|
||||
|
||||
func IsWithinTimeBound(ctx context.Context, bound, check time.Time, hasTime bool) bool {
|
||||
if hasTime {
|
||||
if bound.Before(check) {
|
||||
logger.Ctx(ctx).
|
||||
With("boundary_time", bound, "check_time", check).
|
||||
Info("skipping restore folder: not older than time bound")
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func FilterSlice(sl []string, remove string) []string {
|
||||
r := []string{}
|
||||
|
||||
for _, s := range sl {
|
||||
if !strings.EqualFold(s, remove) {
|
||||
r = append(r, s)
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func LogAndPrint(ctx context.Context, tmpl string, vs ...any) {
|
||||
logger.Ctx(ctx).Infof(tmpl, vs...)
|
||||
fmt.Printf(tmpl+"\n", vs...)
|
||||
}
|
||||
@ -6,9 +6,9 @@ replace github.com/kopia/kopia => github.com/alcionai/kopia v0.12.2-0.2023071323
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a
|
||||
github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go v1.44.308
|
||||
github.com/aws/aws-sdk-go v1.44.311
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/google/uuid v1.3.0
|
||||
|
||||
@ -53,8 +53,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a h1:mtJyeK/FhArTn06M5Lfgxk/GWnu8yqCGNN1BY16vjaA=
|
||||
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a/go.mod h1:MLEWSZ0cjEMg6hiGCRvE7AtrOhs7deBcm7ZrJBpfGRM=
|
||||
github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4 h1:husF7eAYw2HEzgjfAmNy+ZLzyztJV2SyoUngSUo829Y=
|
||||
github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4/go.mod h1:MLEWSZ0cjEMg6hiGCRvE7AtrOhs7deBcm7ZrJBpfGRM=
|
||||
github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377 h1:w50/aVU+zRP5lvE86TSSCCYrrEyuXOlJA06R5RdTS8Y=
|
||||
github.com/alcionai/kopia v0.12.2-0.20230713235606-4c85869e9377/go.mod h1:WH725ws0BYpZpTkVh4uqFHHPiiJuirl1Cm73jv5RYyA=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.44.308 h1:XKu+76UHsD5LaiU2Zb1q42uWakw80Az7x39jJXXahos=
|
||||
github.com/aws/aws-sdk-go v1.44.308/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.311 h1:60i8hyVMOXqabKJQPCq4qKRBQ6hRafI/WOcDxGM+J7Q=
|
||||
github.com/aws/aws-sdk-go v1.44.311/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
|
||||
@ -78,6 +78,10 @@ func (u SafeURL) Format(fs fmt.State, _ rune) {
|
||||
fmt.Fprint(fs, u.Conceal())
|
||||
}
|
||||
|
||||
func (u SafeURL) PlainString() string {
|
||||
return u.URL
|
||||
}
|
||||
|
||||
// String complies with Stringer to ensure the Conceal() version
|
||||
// of the url is printed anytime it gets transformed to a string.
|
||||
func (u SafeURL) String() string {
|
||||
|
||||
@ -74,7 +74,11 @@ func NewConn(s storage.Storage) *conn {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *conn) Initialize(ctx context.Context, opts repository.Options) error {
|
||||
func (w *conn) Initialize(
|
||||
ctx context.Context,
|
||||
opts repository.Options,
|
||||
retentionOpts repository.Retention,
|
||||
) error {
|
||||
bst, err := blobStoreByProvider(ctx, opts, w.storage)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "initializing storage")
|
||||
@ -86,8 +90,23 @@ func (w *conn) Initialize(ctx context.Context, opts repository.Options) error {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
// todo - issue #75: nil here should be a storage.NewRepoOptions()
|
||||
if err = repo.Initialize(ctx, bst, nil, cfg.CorsoPassphrase); err != nil {
|
||||
rOpts := retention.NewOpts()
|
||||
if err := rOpts.Set(retentionOpts); err != nil {
|
||||
return clues.Wrap(err, "setting retention configuration").WithClues(ctx)
|
||||
}
|
||||
|
||||
blobCfg, _, err := rOpts.AsConfigs(ctx)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
// Minimal config for retention if caller requested it.
|
||||
kopiaOpts := repo.NewRepositoryOptions{
|
||||
RetentionMode: blobCfg.RetentionMode,
|
||||
RetentionPeriod: blobCfg.RetentionPeriod,
|
||||
}
|
||||
|
||||
if err = repo.Initialize(ctx, bst, &kopiaOpts, cfg.CorsoPassphrase); err != nil {
|
||||
if errors.Is(err, repo.ErrAlreadyInitialized) {
|
||||
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||
}
|
||||
@ -111,7 +130,10 @@ func (w *conn) Initialize(ctx context.Context, opts repository.Options) error {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
// Calling with all parameters here will set extend object locks for
|
||||
// maintenance. Parameters for actual retention should have been set during
|
||||
// initialization and won't be updated again.
|
||||
return clues.Stack(w.setRetentionParameters(ctx, retentionOpts)).OrNil()
|
||||
}
|
||||
|
||||
func (w *conn) Connect(ctx context.Context, opts repository.Options) error {
|
||||
|
||||
@ -7,12 +7,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/snapshot"
|
||||
"github.com/kopia/kopia/snapshot/policy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
@ -26,7 +29,7 @@ func openKopiaRepo(
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
k := NewConn(st)
|
||||
if err := k.Initialize(ctx, repository.Options{}); err != nil {
|
||||
if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -82,13 +85,13 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
k := NewConn(st)
|
||||
|
||||
err := k.Initialize(ctx, repository.Options{})
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = k.Close(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = k.Initialize(ctx, repository.Options{})
|
||||
err = k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.ErrorIs(t, err, ErrorRepoAlreadyExists)
|
||||
}
|
||||
@ -103,7 +106,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
|
||||
st.Provider = storage.ProviderUnknown
|
||||
k := NewConn(st)
|
||||
|
||||
err := k.Initialize(ctx, repository.Options{})
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
@ -413,7 +416,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
k := NewConn(st)
|
||||
|
||||
err := k.Initialize(ctx, opts)
|
||||
err := k.Initialize(ctx, opts, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
kopiaOpts := k.ClientOptions()
|
||||
@ -453,3 +456,72 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
|
||||
err = k.Close(ctx)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
// ---------------
|
||||
// integration tests that require object locking to be enabled on the bucket.
|
||||
// ---------------
|
||||
type ConnRetentionIntegrationSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestConnRetentionIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &ConnRetentionIntegrationSuite{
|
||||
Suite: tester.NewRetentionSuite(
|
||||
t,
|
||||
[][]string{storeTD.AWSStorageCredEnvs},
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
// Test that providing retention doesn't change anything but retention values
|
||||
// from the default values that kopia uses.
|
||||
func (suite *ConnRetentionIntegrationSuite) TestInitWithAndWithoutRetention() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
st1 := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
k1 := NewConn(st1)
|
||||
err := k1.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, "initializing repo 1: %v", clues.ToCore(err))
|
||||
|
||||
st2 := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
k2 := NewConn(st2)
|
||||
err = k2.Initialize(
|
||||
ctx,
|
||||
repository.Options{},
|
||||
repository.Retention{
|
||||
Mode: ptr.To(repository.GovernanceRetention),
|
||||
Duration: ptr.To(time.Hour * 48),
|
||||
Extend: ptr.To(true),
|
||||
})
|
||||
require.NoError(t, err, "initializing repo 2: %v", clues.ToCore(err))
|
||||
|
||||
dr1, ok := k1.Repository.(repo.DirectRepository)
|
||||
require.True(t, ok, "getting direct repo 1")
|
||||
|
||||
dr2, ok := k2.Repository.(repo.DirectRepository)
|
||||
require.True(t, ok, "getting direct repo 2")
|
||||
|
||||
format1 := dr1.FormatManager().ScrubbedContentFormat()
|
||||
format2 := dr2.FormatManager().ScrubbedContentFormat()
|
||||
|
||||
assert.Equal(t, format1, format2)
|
||||
|
||||
blobCfg1, err := dr1.FormatManager().BlobCfgBlob()
|
||||
require.NoError(t, err, "getting blob config 1: %v", clues.ToCore(err))
|
||||
|
||||
blobCfg2, err := dr2.FormatManager().BlobCfgBlob()
|
||||
require.NoError(t, err, "getting retention config 2: %v", clues.ToCore(err))
|
||||
|
||||
assert.NotEqual(t, blobCfg1, blobCfg2)
|
||||
|
||||
// Check to make sure retention not enabled unexpectedly.
|
||||
checkRetentionParams(t, ctx, k1, blob.RetentionMode(""), 0, assert.False)
|
||||
|
||||
// Some checks to make sure retention was fully initialized as expected.
|
||||
checkRetentionParams(t, ctx, k2, blob.Governance, time.Hour*48, assert.True)
|
||||
}
|
||||
|
||||
@ -808,7 +808,7 @@ func openConnAndModelStore(
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
c := NewConn(st)
|
||||
|
||||
err := c.Initialize(ctx, repository.Options{})
|
||||
err := c.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
defer func() {
|
||||
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph/metadata"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -251,7 +252,9 @@ func (cp *corsoProgress) FinishedHashingFile(fname string, bs int64) {
|
||||
sl[i] = string(rdt)
|
||||
}
|
||||
|
||||
logger.Ctx(context.Background()).Debugw("finished hashing file", "path", sl[2:])
|
||||
logger.Ctx(cp.ctx).Debugw(
|
||||
"finished hashing file",
|
||||
"path", clues.Hide(path.Elements(sl[2:])))
|
||||
|
||||
atomic.AddInt64(&cp.totalBytes, bs)
|
||||
}
|
||||
@ -441,12 +444,12 @@ func streamBaseEntries(
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"current_item_path", curPath,
|
||||
"longest_prefix", longest)
|
||||
"current_directory_path", curPath,
|
||||
"longest_prefix", path.LoggableDir(longest))
|
||||
|
||||
err := dir.IterateEntries(ctx, func(innerCtx context.Context, entry fs.Entry) error {
|
||||
if err := innerCtx.Err(); err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
// Don't walk subdirectories in this function.
|
||||
@ -463,7 +466,9 @@ func streamBaseEntries(
|
||||
|
||||
entName, err := decodeElement(entry.Name())
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "decoding entry name: "+entry.Name())
|
||||
return clues.Wrap(err, "decoding entry name").
|
||||
WithClues(ctx).
|
||||
With("entry_name", entry.Name())
|
||||
}
|
||||
|
||||
// This entry was marked as deleted by a service that can't tell us the
|
||||
@ -475,7 +480,7 @@ func streamBaseEntries(
|
||||
// For now assuming that item IDs don't need escaping.
|
||||
itemPath, err := curPath.AppendItem(entName)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting full item path for base entry")
|
||||
return clues.Wrap(err, "getting full item path for base entry").WithClues(ctx)
|
||||
}
|
||||
|
||||
// We need the previous path so we can find this item in the base snapshot's
|
||||
@ -484,7 +489,7 @@ func streamBaseEntries(
|
||||
// to look for.
|
||||
prevItemPath, err := prevPath.AppendItem(entName)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting previous full item path for base entry")
|
||||
return clues.Wrap(err, "getting previous full item path for base entry").WithClues(ctx)
|
||||
}
|
||||
|
||||
// Meta files aren't in backup details since it's the set of items the user
|
||||
@ -508,13 +513,15 @@ func streamBaseEntries(
|
||||
}
|
||||
|
||||
if err := ctr(ctx, entry); err != nil {
|
||||
return clues.Wrap(err, "executing callback on item").With("item_path", itemPath)
|
||||
return clues.Wrap(err, "executing callback on item").
|
||||
WithClues(ctx).
|
||||
With("item_path", itemPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "traversing items in base snapshot directory")
|
||||
return clues.Wrap(err, "traversing items in base snapshot directory").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -825,7 +832,9 @@ func inflateCollectionTree(
|
||||
}
|
||||
|
||||
if node.collection != nil && node.collection.State() == data.NotMovedState {
|
||||
return nil, nil, clues.New("conflicting states for collection").With("changed_path", p)
|
||||
return nil, nil, clues.New("conflicting states for collection").
|
||||
WithClues(ctx).
|
||||
With("changed_path", p)
|
||||
}
|
||||
}
|
||||
|
||||
@ -852,13 +861,14 @@ func traverseBaseDir(
|
||||
expectedDirPath *path.Builder,
|
||||
dir fs.Directory,
|
||||
roots map[string]*treeMap,
|
||||
stats *count.Bus,
|
||||
) error {
|
||||
ctx = clues.Add(ctx,
|
||||
"old_dir_path", oldDirPath,
|
||||
"expected_dir_path", expectedDirPath)
|
||||
|
||||
if depth >= maxInflateTraversalDepth {
|
||||
return clues.New("base snapshot tree too tall")
|
||||
return clues.New("base snapshot tree too tall").WithClues(ctx)
|
||||
}
|
||||
|
||||
// Wrapper base64 encodes all file and folder names to avoid issues with
|
||||
@ -866,7 +876,9 @@ func traverseBaseDir(
|
||||
// from kopia we need to do the decoding here.
|
||||
dirName, err := decodeElement(dir.Name())
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "decoding base directory name").With("dir_name", dir.Name())
|
||||
return clues.Wrap(err, "decoding base directory name").
|
||||
WithClues(ctx).
|
||||
With("dir_name", clues.Hide(dir.Name()))
|
||||
}
|
||||
|
||||
// Form the path this directory would be at if the hierarchy remained the same
|
||||
@ -885,14 +897,29 @@ func traverseBaseDir(
|
||||
currentPath = currentPath.Append(dirName)
|
||||
}
|
||||
|
||||
var explicitMention bool
|
||||
|
||||
if upb, ok := updatedPaths[oldDirPath.String()]; ok {
|
||||
// This directory was deleted.
|
||||
if upb == nil {
|
||||
currentPath = nil
|
||||
|
||||
stats.Inc(statDel)
|
||||
} else {
|
||||
// This directory was moved/renamed and the new location is in upb.
|
||||
// This directory was explicitly mentioned and the new (possibly
|
||||
// unchanged) location is in upb.
|
||||
currentPath = upb.ToBuilder()
|
||||
|
||||
// Below we check if the collection was marked as new or DoNotMerge which
|
||||
// disables merging behavior. That means we can't directly update stats
|
||||
// here else we'll miss delta token refreshes and whatnot. Instead note
|
||||
// that we did see the path explicitly so it's not counted as a recursive
|
||||
// operation.
|
||||
explicitMention = true
|
||||
}
|
||||
} else if currentPath == nil {
|
||||
// Just stats tracking stuff.
|
||||
stats.Inc(statRecursiveDel)
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "new_path", currentPath)
|
||||
@ -920,10 +947,11 @@ func traverseBaseDir(
|
||||
oldDirPath,
|
||||
currentPath,
|
||||
dEntry,
|
||||
roots)
|
||||
roots,
|
||||
stats)
|
||||
})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "traversing base directory")
|
||||
return clues.Wrap(err, "traversing base directory").WithClues(ctx)
|
||||
}
|
||||
|
||||
// We only need to add this base directory to the tree we're building if it
|
||||
@ -940,7 +968,7 @@ func traverseBaseDir(
|
||||
// in the if-block though as that is an optimization.
|
||||
node := getTreeNode(roots, currentPath.Elements())
|
||||
if node == nil {
|
||||
return clues.New("getting tree node")
|
||||
return clues.New("getting tree node").WithClues(ctx)
|
||||
}
|
||||
|
||||
// Now that we have the node we need to check if there is a collection
|
||||
@ -950,17 +978,28 @@ func traverseBaseDir(
|
||||
// directories. The expected usecase for this is delta token expiry in M365.
|
||||
if node.collection != nil &&
|
||||
(node.collection.DoNotMergeItems() || node.collection.State() == data.NewState) {
|
||||
stats.Inc(statSkipMerge)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Just stats tracking stuff.
|
||||
if oldDirPath.String() == currentPath.String() {
|
||||
stats.Inc(statNoMove)
|
||||
} else if explicitMention {
|
||||
stats.Inc(statMove)
|
||||
} else {
|
||||
stats.Inc(statRecursiveMove)
|
||||
}
|
||||
|
||||
curP, err := path.FromDataLayerPath(currentPath.String(), false)
|
||||
if err != nil {
|
||||
return clues.New("converting current path to path.Path")
|
||||
return clues.New("converting current path to path.Path").WithClues(ctx)
|
||||
}
|
||||
|
||||
oldP, err := path.FromDataLayerPath(oldDirPath.String(), false)
|
||||
if err != nil {
|
||||
return clues.New("converting old path to path.Path")
|
||||
return clues.New("converting old path to path.Path").WithClues(ctx)
|
||||
}
|
||||
|
||||
node.baseDir = dir
|
||||
@ -993,6 +1032,24 @@ func logBaseInfo(ctx context.Context, m ManifestEntry) {
|
||||
"base_backup_id", mbID)
|
||||
}
|
||||
|
||||
const (
|
||||
// statNoMove denotes an directory that wasn't moved at all.
|
||||
statNoMove = "directories_not_moved"
|
||||
// statMove denotes an directory that was explicitly moved.
|
||||
statMove = "directories_explicitly_moved"
|
||||
// statRecursiveMove denotes an directory that moved because one or more or
|
||||
// its ancestors moved and it wasn't explicitly mentioned.
|
||||
statRecursiveMove = "directories_recursively_moved"
|
||||
// statDel denotes a directory that was explicitly deleted.
|
||||
statDel = "directories_explicitly_deleted"
|
||||
// statRecursiveDel denotes a directory that was deleted because one or more
|
||||
// of its ancestors was deleted and it wasn't explicitly mentioned.
|
||||
statRecursiveDel = "directories_recursively_deleted"
|
||||
// statSkipMerge denotes the number of directories that weren't merged because
|
||||
// they were marked either DoNotMerge or New.
|
||||
statSkipMerge = "directories_skipped_merging"
|
||||
)
|
||||
|
||||
func inflateBaseTree(
|
||||
ctx context.Context,
|
||||
loader snapshotLoader,
|
||||
@ -1058,10 +1115,13 @@ func inflateBaseTree(
|
||||
// The prefix is the tenant/service/owner/category set, which remains
|
||||
// otherwise unchecked in tree inflation below this point.
|
||||
newSubtreePath := subtreePath.ToBuilder()
|
||||
|
||||
if p, ok := updatedPaths[subtreePath.String()]; ok {
|
||||
newSubtreePath = p.ToBuilder()
|
||||
}
|
||||
|
||||
stats := count.New()
|
||||
|
||||
if err = traverseBaseDir(
|
||||
ictx,
|
||||
0,
|
||||
@ -1070,9 +1130,19 @@ func inflateBaseTree(
|
||||
newSubtreePath.Dir(),
|
||||
subtreeDir,
|
||||
roots,
|
||||
stats,
|
||||
); err != nil {
|
||||
return clues.Wrap(err, "traversing base snapshot").WithClues(ictx)
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
"merge subtree stats",
|
||||
statNoMove, stats.Get(statNoMove),
|
||||
statMove, stats.Get(statMove),
|
||||
statRecursiveMove, stats.Get(statRecursiveMove),
|
||||
statDel, stats.Get(statDel),
|
||||
statRecursiveDel, stats.Get(statRecursiveDel),
|
||||
statSkipMerge, stats.Get(statSkipMerge))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1124,7 +1194,7 @@ func inflateDirTree(
|
||||
}
|
||||
|
||||
if len(roots) > 1 {
|
||||
return nil, clues.New("multiple root directories")
|
||||
return nil, clues.New("multiple root directories").WithClues(ctx)
|
||||
}
|
||||
|
||||
var res fs.Directory
|
||||
|
||||
@ -324,7 +324,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
// Telling kopia to always flush may hide other errors if it fails while
|
||||
// flushing the write session (hence logging above).
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "kopia backup")
|
||||
return nil, clues.Wrap(err, "kopia backup").WithClues(ctx)
|
||||
}
|
||||
|
||||
res := manifestToStats(man, progress, bc)
|
||||
@ -369,7 +369,7 @@ func getDir(
|
||||
encodeElements(dirPath.PopFront().Elements()...))
|
||||
if err != nil {
|
||||
if isErrEntryNotFound(err) {
|
||||
err = clues.Stack(data.ErrNotFound, err)
|
||||
err = clues.Stack(data.ErrNotFound, err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx)
|
||||
@ -487,7 +487,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
// load it here.
|
||||
snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "loading snapshot root")
|
||||
return nil, clues.Wrap(err, "loading snapshot root").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -507,8 +507,8 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
// items from a single directory instance lower down.
|
||||
ictx := clues.Add(
|
||||
ctx,
|
||||
"item_path", itemPaths.StoragePath.String(),
|
||||
"restore_path", itemPaths.RestorePath.String())
|
||||
"item_path", itemPaths.StoragePath,
|
||||
"restore_path", itemPaths.RestorePath)
|
||||
|
||||
parentStoragePath, err := itemPaths.StoragePath.Dir()
|
||||
if err != nil {
|
||||
@ -552,7 +552,7 @@ func (w Wrapper) ProduceRestoreCollections(
|
||||
// then load the items from the directory.
|
||||
res, err := loadDirsAndItems(ctx, snapshotRoot, bcounter, dirsToItems, errs)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "loading items")
|
||||
return nil, clues.Wrap(err, "loading items").WithClues(ctx)
|
||||
}
|
||||
|
||||
return res, el.Failure()
|
||||
@ -610,12 +610,12 @@ func (w Wrapper) RepoMaintenance(
|
||||
) error {
|
||||
kopiaSafety, err := translateSafety(opts.Safety)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "identifying safety level")
|
||||
return clues.Wrap(err, "identifying safety level").WithClues(ctx)
|
||||
}
|
||||
|
||||
mode, err := translateMode(opts.Type)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "identifying maintenance mode")
|
||||
return clues.Wrap(err, "identifying maintenance mode").WithClues(ctx)
|
||||
}
|
||||
|
||||
currentOwner := w.c.ClientOptions().UsernameAtHost()
|
||||
|
||||
@ -41,7 +41,8 @@ func (ctrl *Controller) ProduceExportCollections(
|
||||
)
|
||||
|
||||
switch sels.Service {
|
||||
case selectors.ServiceOneDrive:
|
||||
case selectors.ServiceOneDrive, selectors.ServiceSharePoint:
|
||||
// OneDrive and SharePoint can share the code to create collections
|
||||
expCollections, err = onedrive.ProduceExportCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
|
||||
@ -133,6 +133,7 @@ func Complete() {
|
||||
const (
|
||||
ItemBackupMsg = "Backing up item"
|
||||
ItemRestoreMsg = "Restoring item"
|
||||
ItemExportMsg = "Exporting item"
|
||||
ItemQueueMsg = "Queuing items"
|
||||
)
|
||||
|
||||
@ -281,6 +282,51 @@ func ItemProgress(
|
||||
return bar.ProxyReader(rc), abort
|
||||
}
|
||||
|
||||
// ItemSpinner is similar to ItemProgress, but for use in cases where
|
||||
// we don't know the file size but want to show progress.
|
||||
func ItemSpinner(
|
||||
ctx context.Context,
|
||||
rc io.ReadCloser,
|
||||
header string,
|
||||
iname any,
|
||||
) (io.ReadCloser, func()) {
|
||||
plain := plainString(iname)
|
||||
log := logger.Ctx(ctx).With("item", iname)
|
||||
log.Debug(header)
|
||||
|
||||
if cfg.hidden() || rc == nil {
|
||||
defer log.Debug("done - " + header)
|
||||
return rc, func() {}
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
barOpts := []mpb.BarOption{
|
||||
mpb.PrependDecorators(
|
||||
decor.Name(header, decor.WCSyncSpaceR),
|
||||
decor.Name(plain, decor.WCSyncSpaceR),
|
||||
decor.CurrentKibiByte(" %.1f", decor.WC{W: 8})),
|
||||
}
|
||||
|
||||
if !cfg.keepBarsAfterComplete {
|
||||
barOpts = append(barOpts, mpb.BarRemoveOnComplete())
|
||||
}
|
||||
|
||||
bar := progress.New(-1, mpb.NopStyle(), barOpts...)
|
||||
|
||||
go waitAndCloseBar(bar, func() {
|
||||
// might be overly chatty, we can remove if needed.
|
||||
log.Debug("done - " + header)
|
||||
})()
|
||||
|
||||
abort := func() {
|
||||
bar.SetTotal(-1, true)
|
||||
bar.Abort(true)
|
||||
}
|
||||
|
||||
return bar.ProxyReader(rc), abort
|
||||
}
|
||||
|
||||
// ProgressWithCount tracks the display of a bar that tracks the completion
|
||||
// of the specified count.
|
||||
// Each write to the provided channel counts as a single increment.
|
||||
@ -517,8 +563,8 @@ func (b bulletf) String() string {
|
||||
// observe progress bar. Logged values should only use
|
||||
// the fmt %v to ensure Concealers hide PII.
|
||||
func plainString(v any) string {
|
||||
if ps, ok := v.(clues.PlainStringer); ok {
|
||||
return ps.PlainString()
|
||||
if c, ok := v.(clues.Concealer); ok {
|
||||
return c.PlainString()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", v)
|
||||
|
||||
@ -40,7 +40,7 @@ func (suite *MaintenanceOpIntegrationSuite) TestRepoMaintenance() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
err := k.Initialize(ctx, repository.Options{})
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
kw, err := kopia.NewWrapper(k)
|
||||
|
||||
@ -243,7 +243,7 @@ func (suite *RestoreOpIntegrationSuite) SetupSuite() {
|
||||
|
||||
suite.acct = tconfig.NewM365Account(t)
|
||||
|
||||
err := k.Initialize(ctx, repository.Options{})
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.kopiaCloser = func(ctx context.Context) {
|
||||
|
||||
77
src/internal/operations/retention_config.go
Normal file
77
src/internal/operations/retention_config.go
Normal file
@ -0,0 +1,77 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/crash"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/stats"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
)
|
||||
|
||||
// RetentionConfigOperation wraps an operation with restore-specific props.
|
||||
type RetentionConfigOperation struct {
|
||||
operation
|
||||
Results RetentionConfigResults
|
||||
rcOpts repository.Retention
|
||||
}
|
||||
|
||||
// RetentionConfigResults aggregate the details of the results of the operation.
|
||||
type RetentionConfigResults struct {
|
||||
stats.StartAndEndTime
|
||||
}
|
||||
|
||||
// NewRetentionConfigOperation constructs and validates an operation to change
|
||||
// retention parameters.
|
||||
func NewRetentionConfigOperation(
|
||||
ctx context.Context,
|
||||
opts control.Options,
|
||||
kw *kopia.Wrapper,
|
||||
rcOpts repository.Retention,
|
||||
bus events.Eventer,
|
||||
) (RetentionConfigOperation, error) {
|
||||
op := RetentionConfigOperation{
|
||||
operation: newOperation(opts, bus, count.New(), kw, nil),
|
||||
rcOpts: rcOpts,
|
||||
}
|
||||
|
||||
// Don't run validation because we don't populate the model store.
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func (op *RetentionConfigOperation) Run(ctx context.Context) (err error) {
|
||||
defer func() {
|
||||
if crErr := crash.Recovery(ctx, recover(), "retention_config"); crErr != nil {
|
||||
err = crErr
|
||||
}
|
||||
}()
|
||||
|
||||
op.Results.StartedAt = time.Now()
|
||||
|
||||
// TODO(ashmrtn): Send telemetry?
|
||||
|
||||
return op.do(ctx)
|
||||
}
|
||||
|
||||
func (op *RetentionConfigOperation) do(ctx context.Context) error {
|
||||
defer func() {
|
||||
op.Results.CompletedAt = time.Now()
|
||||
}()
|
||||
|
||||
err := op.operation.kopia.SetRetentionParameters(ctx, op.rcOpts)
|
||||
if err != nil {
|
||||
op.Status = Failed
|
||||
return clues.Wrap(err, "running retention config operation")
|
||||
}
|
||||
|
||||
op.Status = Completed
|
||||
|
||||
return nil
|
||||
}
|
||||
74
src/internal/operations/retention_config_test.go
Normal file
74
src/internal/operations/retention_config_test.go
Normal file
@ -0,0 +1,74 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
evmock "github.com/alcionai/corso/src/internal/events/mock"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
)
|
||||
|
||||
type RetentionConfigOpIntegrationSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestRetentionConfigOpIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &RetentionConfigOpIntegrationSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{storeTD.AWSStorageCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *RetentionConfigOpIntegrationSuite) TestRepoRetentionConfig() {
|
||||
var (
|
||||
t = suite.T()
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st = storeTD.NewPrefixedS3Storage(t)
|
||||
k = kopia.NewConn(st)
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
kw, err := kopia.NewWrapper(k)
|
||||
// kopiaRef comes with a count of 1 and Wrapper bumps it again so safe
|
||||
// to close here.
|
||||
k.Close(ctx)
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
defer kw.Close(ctx)
|
||||
|
||||
// Only set extend locks parameter as other retention options require a bucket
|
||||
// with object locking enabled. There's more complete tests in the kopia
|
||||
// package.
|
||||
rco, err := NewRetentionConfigOperation(
|
||||
ctx,
|
||||
control.DefaultOptions(),
|
||||
kw,
|
||||
repository.Retention{
|
||||
Extend: ptr.To(true),
|
||||
},
|
||||
evmock.NewBus())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = rco.Run(ctx)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, Completed, rco.Status)
|
||||
assert.NotZero(t, rco.Results.StartedAt)
|
||||
assert.NotZero(t, rco.Results.CompletedAt)
|
||||
assert.NotEqual(t, rco.Results.StartedAt, rco.Results.CompletedAt)
|
||||
}
|
||||
@ -889,6 +889,26 @@ func (suite *ExchangeRestoreIntgSuite) SetupSuite() {
|
||||
suite.its = newIntegrationTesterSetup(suite.T())
|
||||
}
|
||||
|
||||
type clientItemPager interface {
|
||||
GetItemsInContainerByCollisionKeyer[string]
|
||||
GetItemIDsInContainer(
|
||||
ctx context.Context,
|
||||
userID, containerID string,
|
||||
) (map[string]struct{}, error)
|
||||
GetContainerByName(
|
||||
ctx context.Context,
|
||||
userID, parentContainerID, containerName string,
|
||||
) (graph.Container, error)
|
||||
GetItemsInContainerByCollisionKey(
|
||||
ctx context.Context,
|
||||
userID, containerID string,
|
||||
) (map[string]string, error)
|
||||
CreateContainer(
|
||||
ctx context.Context,
|
||||
userID, parentContainerID, containerName string,
|
||||
) (graph.Container, error)
|
||||
}
|
||||
|
||||
func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptions() {
|
||||
t := suite.T()
|
||||
|
||||
@ -921,25 +941,28 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
var (
|
||||
restoreCfg = ctrlTD.DefaultRestoreConfig("exchange_adv_restore")
|
||||
sel = rsel.Selector
|
||||
userID = sel.ID()
|
||||
cIDs = map[path.CategoryType]string{
|
||||
path.ContactsCategory: "",
|
||||
path.EmailCategory: "",
|
||||
path.EventsCategory: "",
|
||||
restoreCfg = ctrlTD.DefaultRestoreConfig("exchange_adv_restore")
|
||||
sel = rsel.Selector
|
||||
userID = sel.ID()
|
||||
countItemsInRestore int
|
||||
|
||||
itemIDs = map[path.CategoryType]map[string]struct{}{}
|
||||
collisionKeys = map[path.CategoryType]map[string]string{}
|
||||
containerIDs = map[path.CategoryType]string{}
|
||||
parentContainerIDs = map[path.CategoryType]string{
|
||||
path.EmailCategory: api.MsgFolderRoot,
|
||||
}
|
||||
parentContainerNames = map[path.CategoryType][]string{
|
||||
path.EmailCategory: {api.MailInbox},
|
||||
path.ContactsCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}
|
||||
|
||||
testCategories = map[path.CategoryType]clientItemPager{
|
||||
path.ContactsCategory: suite.its.ac.Contacts(),
|
||||
path.EmailCategory: suite.its.ac.Mail(),
|
||||
// path.EventsCategory: suite.its.ac.Events(),
|
||||
}
|
||||
collKeys = map[path.CategoryType]map[string]string{}
|
||||
countContactsInRestore int
|
||||
acCont = suite.its.ac.Contacts()
|
||||
contactIDs map[string]struct{}
|
||||
countEmailsInRestore int
|
||||
acMail = suite.its.ac.Mail()
|
||||
mailIDs map[string]struct{}
|
||||
countItemsInRestore int
|
||||
// countEventsInRestore int
|
||||
// acEvts = suite.its.ac.Events()
|
||||
// eventIDs = []string{}
|
||||
)
|
||||
|
||||
// initial restore
|
||||
@ -971,61 +994,27 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
// get all files in folder, use these as the base
|
||||
// set of files to compare against.
|
||||
|
||||
// --- contacts
|
||||
for cat, ac := range testCategories {
|
||||
suite.Run(cat.String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
contGC, err := acCont.GetContainerByName(ctx, userID, "", restoreCfg.Location)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
cIDs[path.ContactsCategory] = ptr.Val(contGC.GetId())
|
||||
containers := append([]string{restoreCfg.Location}, parentContainerNames[cat]...)
|
||||
|
||||
collKeys[path.ContactsCategory], err = acCont.GetItemsInContainerByCollisionKey(
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.ContactsCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
countContactsInRestore = len(collKeys[path.ContactsCategory])
|
||||
t.Log(countContactsInRestore, "contacts restored")
|
||||
itemIDs[cat], collisionKeys[cat], containerIDs[cat] = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
ac,
|
||||
userID,
|
||||
parentContainerIDs[cat],
|
||||
containers...)
|
||||
|
||||
contactIDs, err = acCont.GetItemIDsInContainer(ctx, userID, cIDs[path.ContactsCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
countItemsInRestore += len(collisionKeys[cat])
|
||||
})
|
||||
}
|
||||
|
||||
// --- events
|
||||
|
||||
// gc, err = acEvts.GetContainerByName(ctx, userID, "", restoreCfg.Location)
|
||||
// require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// restoredContainerID[path.EventsCategory] = ptr.Val(gc.GetId())
|
||||
|
||||
// collKeys[path.EventsCategory], err = acEvts.GetItemsInContainerByCollisionKey(
|
||||
// ctx,
|
||||
// userID,
|
||||
// cIDs[path.EventsCategory])
|
||||
// require.NoError(t, err, clues.ToCore(err))
|
||||
// countEventsInRestore = len(collKeys[path.EventsCategory])
|
||||
// t.Log(countContactsInRestore, "events restored")
|
||||
|
||||
mailGC, err := acMail.GetContainerByName(ctx, userID, api.MsgFolderRoot, restoreCfg.Location)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
mailGC, err = acMail.GetContainerByName(ctx, userID, ptr.Val(mailGC.GetId()), api.MailInbox)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cIDs[path.EmailCategory] = ptr.Val(mailGC.GetId())
|
||||
|
||||
// --- mail
|
||||
|
||||
collKeys[path.EmailCategory], err = acMail.GetItemsInContainerByCollisionKey(
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.EmailCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
countEmailsInRestore = len(collKeys[path.EmailCategory])
|
||||
t.Log(countContactsInRestore, "emails restored")
|
||||
|
||||
mailIDs, err = acMail.GetItemIDsInContainer(ctx, userID, cIDs[path.EmailCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
countItemsInRestore = countContactsInRestore + countEmailsInRestore // + countEventsInRestore
|
||||
checkRestoreCounts(t, ctr1, 0, 0, countItemsInRestore)
|
||||
})
|
||||
|
||||
@ -1062,43 +1051,30 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
|
||||
checkRestoreCounts(t, ctr2, countItemsInRestore, 0, 0)
|
||||
|
||||
// --- contacts
|
||||
result := map[string]string{}
|
||||
|
||||
// get all files in folder, use these as the base
|
||||
// set of files to compare against.
|
||||
result := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.ContactsCategory],
|
||||
GetItemsInContainerByCollisionKeyer[string](acCont),
|
||||
collKeys[path.ContactsCategory])
|
||||
for cat, ac := range testCategories {
|
||||
suite.Run(cat.String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
currentContactIDs, err := acCont.GetItemIDsInContainer(ctx, userID, cIDs[path.ContactsCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
assert.Equal(t, contactIDs, currentContactIDs, "ids are equal")
|
||||
m := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
containerIDs[cat],
|
||||
GetItemsInContainerByCollisionKeyer[string](ac),
|
||||
collisionKeys[cat])
|
||||
maps.Copy(result, m)
|
||||
|
||||
// --- events
|
||||
currentIDs, err := ac.GetItemIDsInContainer(ctx, userID, containerIDs[cat])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// m = checkCollisionKeyResults(t, ctx, userID, cIDs[path.EventsCategory], acEvts, collKeys[path.EventsCategory])
|
||||
// maps.Copy(result, m)
|
||||
|
||||
// --- mail
|
||||
|
||||
m := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.EmailCategory],
|
||||
GetItemsInContainerByCollisionKeyer[string](acMail),
|
||||
collKeys[path.EmailCategory])
|
||||
maps.Copy(result, m)
|
||||
|
||||
currentMailIDs, err := acMail.GetItemIDsInContainer(ctx, userID, cIDs[path.EmailCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, mailIDs, currentMailIDs, "ids are equal")
|
||||
assert.Equal(t, itemIDs[cat], currentIDs, "ids are equal")
|
||||
})
|
||||
}
|
||||
|
||||
assert.Len(t, result, 0, "no new items should get added")
|
||||
})
|
||||
@ -1136,60 +1112,40 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
}
|
||||
}
|
||||
|
||||
assert.Len(
|
||||
t,
|
||||
filtEnts,
|
||||
countItemsInRestore,
|
||||
"every item should have been replaced")
|
||||
|
||||
// --- contacts
|
||||
|
||||
result := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.ContactsCategory],
|
||||
GetItemsInContainerByCollisionKeyer[string](acCont),
|
||||
collKeys[path.ContactsCategory])
|
||||
|
||||
currentContactIDs, err := acCont.GetItemIDsInContainer(ctx, userID, cIDs[path.ContactsCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, len(contactIDs), len(currentContactIDs), "count of ids are equal")
|
||||
for orig := range contactIDs {
|
||||
assert.NotContains(t, currentContactIDs, orig, "original item should not exist after replacement")
|
||||
}
|
||||
|
||||
contactIDs = currentContactIDs
|
||||
|
||||
// --- events
|
||||
|
||||
// m = checkCollisionKeyResults(t, ctx, userID, cIDs[path.EventsCategory], acEvts, collKeys[path.EventsCategory])
|
||||
// maps.Copy(result, m)
|
||||
|
||||
// --- mail
|
||||
|
||||
m := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.EmailCategory],
|
||||
GetItemsInContainerByCollisionKeyer[string](acMail),
|
||||
collKeys[path.EmailCategory])
|
||||
maps.Copy(result, m)
|
||||
assert.Len(t, filtEnts, countItemsInRestore, "every item should have been replaced")
|
||||
|
||||
checkRestoreCounts(t, ctr3, 0, countItemsInRestore, 0)
|
||||
|
||||
currentMailIDs, err := acMail.GetItemIDsInContainer(ctx, userID, cIDs[path.EmailCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
result := map[string]string{}
|
||||
|
||||
assert.Equal(t, len(mailIDs), len(currentMailIDs), "count of ids are equal")
|
||||
for orig := range mailIDs {
|
||||
assert.NotContains(t, currentMailIDs, orig, "original item should not exist after replacement")
|
||||
for cat, ac := range testCategories {
|
||||
suite.Run(cat.String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
m := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
containerIDs[cat],
|
||||
GetItemsInContainerByCollisionKeyer[string](ac),
|
||||
collisionKeys[cat])
|
||||
maps.Copy(result, m)
|
||||
|
||||
currentIDs, err := ac.GetItemIDsInContainer(ctx, userID, containerIDs[cat])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, len(itemIDs[cat]), len(currentIDs), "count of ids are equal")
|
||||
for orig := range itemIDs[cat] {
|
||||
assert.NotContains(t, currentIDs, orig, "original item should not exist after replacement")
|
||||
}
|
||||
|
||||
itemIDs[cat] = currentIDs
|
||||
})
|
||||
}
|
||||
|
||||
mailIDs = currentMailIDs
|
||||
|
||||
assert.Len(t, result, 0, "all items should have been replaced")
|
||||
})
|
||||
|
||||
@ -1226,45 +1182,35 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
}
|
||||
}
|
||||
|
||||
assert.Len(
|
||||
t,
|
||||
filtEnts,
|
||||
countItemsInRestore,
|
||||
"every item should have been copied")
|
||||
assert.Len(t, filtEnts, countItemsInRestore, "every item should have been copied")
|
||||
|
||||
checkRestoreCounts(t, ctr4, 0, 0, countItemsInRestore)
|
||||
|
||||
result := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.ContactsCategory],
|
||||
GetItemsInContainerByCollisionKeyer[string](acCont),
|
||||
collKeys[path.ContactsCategory])
|
||||
result := map[string]string{}
|
||||
|
||||
currentContactIDs, err := acCont.GetItemIDsInContainer(ctx, userID, cIDs[path.ContactsCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
for cat, ac := range testCategories {
|
||||
suite.Run(cat.String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
assert.Equal(t, 2*len(contactIDs), len(currentContactIDs), "count of ids should be double from before")
|
||||
assert.Subset(t, maps.Keys(currentContactIDs), maps.Keys(contactIDs), "original item should exist after copy")
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// m = checkCollisionKeyResults(t, ctx, userID, cIDs[path.EventsCategory], acEvts, collKeys[path.EventsCategory])
|
||||
// maps.Copy(result, m)
|
||||
m := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
containerIDs[cat],
|
||||
GetItemsInContainerByCollisionKeyer[string](ac),
|
||||
collisionKeys[cat])
|
||||
maps.Copy(result, m)
|
||||
|
||||
m := filterCollisionKeyResults(
|
||||
t,
|
||||
ctx,
|
||||
userID,
|
||||
cIDs[path.EmailCategory],
|
||||
GetItemsInContainerByCollisionKeyer[string](acMail),
|
||||
collKeys[path.EmailCategory])
|
||||
maps.Copy(result, m)
|
||||
currentIDs, err := ac.GetItemIDsInContainer(ctx, userID, containerIDs[cat])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
currentMailIDs, err := acMail.GetItemIDsInContainer(ctx, userID, cIDs[path.EmailCategory])
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, 2*len(mailIDs), len(currentMailIDs), "count of ids should be double from before")
|
||||
assert.Subset(t, maps.Keys(currentMailIDs), maps.Keys(mailIDs), "original item should exist after copy")
|
||||
assert.Equal(t, 2*len(itemIDs[cat]), len(currentIDs), "count of ids should be double from before")
|
||||
assert.Subset(t, maps.Keys(currentIDs), maps.Keys(itemIDs[cat]), "original item should exist after copy")
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: we have the option of modifying copy creations in exchange
|
||||
// so that the results don't collide. But we haven't made that
|
||||
@ -1344,7 +1290,7 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtecte
|
||||
|
||||
// --- contacts
|
||||
cat := path.ContactsCategory
|
||||
userItemIDs[cat], userCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
userItemIDs[cat], userCollisionKeys[cat], _ = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acCont,
|
||||
@ -1354,7 +1300,7 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtecte
|
||||
|
||||
// --- events
|
||||
// cat = path.EventsCategory
|
||||
// userItemIDs[cat], userCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
// userItemIDs[cat], userCollisionKeys[cat], _ = getCollKeysAndItemIDs(
|
||||
// t,
|
||||
// ctx,
|
||||
// acEvts,
|
||||
@ -1364,7 +1310,7 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtecte
|
||||
|
||||
// --- mail
|
||||
cat = path.EmailCategory
|
||||
userItemIDs[cat], userCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
userItemIDs[cat], userCollisionKeys[cat], _ = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acMail,
|
||||
@ -1400,7 +1346,7 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtecte
|
||||
|
||||
// --- contacts
|
||||
cat = path.ContactsCategory
|
||||
secondaryItemIDs[cat], secondaryCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
secondaryItemIDs[cat], secondaryCollisionKeys[cat], _ = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acCont,
|
||||
@ -1410,7 +1356,7 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtecte
|
||||
|
||||
// --- events
|
||||
// cat = path.EventsCategory
|
||||
// secondaryItemIDs[cat], secondaryCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
// secondaryItemIDs[cat], secondaryCollisionKeys[cat], _ = getCollKeysAndItemIDs(
|
||||
// t,
|
||||
// ctx,
|
||||
// acEvts,
|
||||
@ -1420,7 +1366,7 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtecte
|
||||
|
||||
// --- mail
|
||||
cat = path.EmailCategory
|
||||
secondaryItemIDs[cat], secondaryCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
secondaryItemIDs[cat], secondaryCollisionKeys[cat], _ = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acMail,
|
||||
@ -1436,52 +1382,31 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtecte
|
||||
}
|
||||
}
|
||||
|
||||
type GetItemsKeysAndContainerByNameer interface {
|
||||
GetItemIDsInContainer(
|
||||
ctx context.Context,
|
||||
userID, containerID string,
|
||||
) (map[string]struct{}, error)
|
||||
GetContainerByName(
|
||||
ctx context.Context,
|
||||
userID, parentContainerID, containerName string,
|
||||
) (graph.Container, error)
|
||||
GetItemsInContainerByCollisionKey(
|
||||
ctx context.Context,
|
||||
userID, containerID string,
|
||||
) (map[string]string, error)
|
||||
}
|
||||
|
||||
func getCollKeysAndItemIDs(
|
||||
t *testing.T,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
gikacbn GetItemsKeysAndContainerByNameer,
|
||||
cip clientItemPager,
|
||||
userID, parentContainerID string,
|
||||
containerNames ...string,
|
||||
) (map[string]struct{}, map[string]string) {
|
||||
) (map[string]struct{}, map[string]string, string) {
|
||||
var (
|
||||
c graph.Container
|
||||
err error
|
||||
cID string
|
||||
cID = parentContainerID
|
||||
)
|
||||
|
||||
for _, cn := range containerNames {
|
||||
pcid := parentContainerID
|
||||
|
||||
if len(cID) != 0 {
|
||||
pcid = cID
|
||||
}
|
||||
|
||||
c, err = gikacbn.GetContainerByName(ctx, userID, pcid, cn)
|
||||
c, err = cip.GetContainerByName(ctx, userID, cID, cn)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cID = ptr.Val(c.GetId())
|
||||
}
|
||||
|
||||
itemIDs, err := gikacbn.GetItemIDsInContainer(ctx, userID, cID)
|
||||
itemIDs, err := cip.GetItemIDsInContainer(ctx, userID, cID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
collisionKeys, err := gikacbn.GetItemsInContainerByCollisionKey(ctx, userID, cID)
|
||||
collisionKeys, err := cip.GetItemsInContainerByCollisionKey(ctx, userID, cID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return itemIDs, collisionKeys
|
||||
return itemIDs, collisionKeys, cID
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ func prepNewTestBackupOp(
|
||||
|
||||
k := kopia.NewConn(bod.st)
|
||||
|
||||
err := k.Initialize(ctx, repository.Options{})
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
defer func() {
|
||||
|
||||
@ -44,7 +44,8 @@ func (suite *StreamStoreIntgSuite) SetupSubTest() {
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
k := kopia.NewConn(st)
|
||||
require.NoError(t, k.Initialize(ctx, repository.Options{}))
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.kcloser = func() { k.Close(ctx) }
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRestoreLocation = "Corso_Restore_"
|
||||
DefaultRestoreLocation = "Corso_Restore_"
|
||||
)
|
||||
|
||||
// CollisionPolicy describes how the datalayer behaves in case of a collision.
|
||||
@ -70,12 +70,12 @@ type RestoreConfig struct {
|
||||
func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig {
|
||||
return RestoreConfig{
|
||||
OnCollision: Skip,
|
||||
Location: defaultRestoreLocation + dttm.FormatNow(timeFormat),
|
||||
Location: DefaultRestoreLocation + dttm.FormatNow(timeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultRestoreContainerName(timeFormat dttm.TimeFormat) string {
|
||||
return defaultRestoreLocation + dttm.FormatNow(timeFormat)
|
||||
return DefaultRestoreLocation + dttm.FormatNow(timeFormat)
|
||||
}
|
||||
|
||||
// EnsureRestoreConfigDefaults sets all non-supported values in the config
|
||||
@ -107,10 +107,6 @@ var (
|
||||
// interface compliance required for handling PII
|
||||
_ clues.Concealer = &RestoreConfig{}
|
||||
_ fmt.Stringer = &RestoreConfig{}
|
||||
|
||||
// interface compliance for the observe package to display
|
||||
// values without concealing PII.
|
||||
_ clues.PlainStringer = &RestoreConfig{}
|
||||
)
|
||||
|
||||
func (rc RestoreConfig) marshal() string {
|
||||
|
||||
79
src/pkg/export/consume.go
Normal file
79
src/pkg/export/consume.go
Normal file
@ -0,0 +1,79 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
)
|
||||
|
||||
func ConsumeExportCollections(
|
||||
ctx context.Context,
|
||||
exportLocation string,
|
||||
expColl []Collection,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
el := errs.Local()
|
||||
|
||||
for _, col := range expColl {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
folder := filepath.Join(exportLocation, col.BasePath())
|
||||
ictx := clues.Add(ctx, "dir_name", folder)
|
||||
|
||||
for item := range col.Items(ctx) {
|
||||
if item.Error != nil {
|
||||
el.AddRecoverable(ictx, clues.Wrap(item.Error, "getting item").WithClues(ctx))
|
||||
}
|
||||
|
||||
if err := writeItem(ictx, item, folder); err != nil {
|
||||
el.AddRecoverable(
|
||||
ictx,
|
||||
clues.Wrap(err, "writing item").With("file_name", item.Data.Name).WithClues(ctx))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return el.Failure()
|
||||
}
|
||||
|
||||
// writeItem writes an ExportItem to disk in the specified folder.
|
||||
func writeItem(ctx context.Context, item Item, folder string) error {
|
||||
name := item.Data.Name
|
||||
fpath := filepath.Join(folder, name)
|
||||
|
||||
progReader, pclose := observe.ItemSpinner(
|
||||
ctx,
|
||||
item.Data.Body,
|
||||
observe.ItemExportMsg,
|
||||
clues.Hide(name))
|
||||
|
||||
defer item.Data.Body.Close()
|
||||
defer pclose()
|
||||
|
||||
err := os.MkdirAll(folder, os.ModePerm)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "creating directory")
|
||||
}
|
||||
|
||||
// In case the user tries to restore to a non-clean
|
||||
// directory, we might run into collisions an fail.
|
||||
f, err := os.Create(fpath)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "creating file")
|
||||
}
|
||||
|
||||
_, err = io.Copy(f, progReader)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "writing data")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
175
src/pkg/export/consume_test.go
Normal file
175
src/pkg/export/consume_test.go
Normal file
@ -0,0 +1,175 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
)
|
||||
|
||||
type ExportE2ESuite struct {
|
||||
tester.Suite
|
||||
called bool
|
||||
}
|
||||
|
||||
func TestExportE2ESuite(t *testing.T) {
|
||||
suite.Run(t, &ExportE2ESuite{Suite: tester.NewE2ESuite(t, nil)})
|
||||
}
|
||||
|
||||
func (suite *ExportE2ESuite) SetupSuite() {
|
||||
suite.called = true
|
||||
}
|
||||
|
||||
type mockExportCollection struct {
|
||||
path string
|
||||
items []Item
|
||||
}
|
||||
|
||||
func (mec mockExportCollection) BasePath() string { return mec.path }
|
||||
func (mec mockExportCollection) Items(context.Context) <-chan Item {
|
||||
ch := make(chan Item)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
for _, item := range mec.items {
|
||||
ch <- item
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (suite *ExportE2ESuite) TestConsumeExportCollection() {
|
||||
type ei struct {
|
||||
name string
|
||||
body string
|
||||
}
|
||||
|
||||
type i struct {
|
||||
path string
|
||||
items []ei
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
cols []i
|
||||
}{
|
||||
{
|
||||
name: "single root collection single item",
|
||||
cols: []i{
|
||||
{
|
||||
path: "",
|
||||
items: []ei{
|
||||
{
|
||||
name: "name1",
|
||||
body: "body1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single root collection multiple items",
|
||||
cols: []i{
|
||||
{
|
||||
path: "",
|
||||
items: []ei{
|
||||
{
|
||||
name: "name1",
|
||||
body: "body1",
|
||||
},
|
||||
{
|
||||
name: "name2",
|
||||
body: "body2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple collections multiple items",
|
||||
cols: []i{
|
||||
{
|
||||
path: "",
|
||||
items: []ei{
|
||||
{
|
||||
name: "name1",
|
||||
body: "body1",
|
||||
},
|
||||
{
|
||||
name: "name2",
|
||||
body: "body2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
path: "folder",
|
||||
items: []ei{
|
||||
{
|
||||
name: "name3",
|
||||
body: "body3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
ecs := []Collection{}
|
||||
for _, col := range test.cols {
|
||||
items := []Item{}
|
||||
for _, item := range col.items {
|
||||
items = append(items, Item{
|
||||
Data: ItemData{
|
||||
Name: item.name,
|
||||
Body: io.NopCloser((bytes.NewBufferString(item.body))),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
ecs = append(ecs, mockExportCollection{
|
||||
path: col.path,
|
||||
items: items,
|
||||
})
|
||||
}
|
||||
|
||||
dir, err := os.MkdirTemp("", "export-test")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
err = ConsumeExportCollections(ctx, dir, ecs, fault.New(true))
|
||||
require.NoError(t, err, "writing data")
|
||||
|
||||
for _, col := range test.cols {
|
||||
for _, item := range col.items {
|
||||
f, err := os.Open(filepath.Join(dir, col.path, item.name))
|
||||
require.NoError(t, err, "opening file")
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
_, err = buf.ReadFrom(f)
|
||||
require.NoError(t, err, "reading file")
|
||||
|
||||
assert.Equal(t, item.body, buf.String(), "file contents")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -7,7 +7,9 @@ import (
|
||||
|
||||
// Collection is the interface that is returned to the SDK consumer
|
||||
type Collection interface {
|
||||
// BasePath gets the base path of the collection
|
||||
// BasePath gets the base path of the collection. This is derived
|
||||
// from FullPath, but trim out thing like drive id or any other part
|
||||
// that is not needed to show the path to the collection.
|
||||
BasePath() string
|
||||
|
||||
// Items gets the items within the collection(folder)
|
||||
|
||||
@ -501,7 +501,7 @@ func suffixed(target, input string) bool {
|
||||
// Printers and PII control
|
||||
// ----------------------------------------------------------------------------------------------------
|
||||
|
||||
var _ clues.PlainConcealer = &Filter{}
|
||||
var _ clues.Concealer = &Filter{}
|
||||
|
||||
var safeFilterValues = map[string]struct{}{"*": {}}
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@ package path
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
@ -54,10 +53,6 @@ var (
|
||||
// interface compliance required for handling PII
|
||||
_ clues.Concealer = &Elements{}
|
||||
_ fmt.Stringer = &Elements{}
|
||||
|
||||
// interface compliance for the observe package to display
|
||||
// values without concealing PII.
|
||||
_ clues.PlainStringer = &Elements{}
|
||||
)
|
||||
|
||||
// Elements are a PII Concealer-compliant slice of elements within a path.
|
||||
@ -123,16 +118,8 @@ func (el Elements) Last() string {
|
||||
// LoggableDir takes in a path reference (of any structure) and conceals any
|
||||
// non-standard elements (ids, filenames, foldernames, etc).
|
||||
func LoggableDir(ref string) string {
|
||||
r := ref
|
||||
n := strings.TrimSuffix(r, string(PathSeparator))
|
||||
|
||||
for n != r {
|
||||
r = n
|
||||
n = strings.TrimSuffix(r, string(PathSeparator))
|
||||
}
|
||||
|
||||
elems := Split(r)
|
||||
elems = pii.ConcealElements(elems, piiSafePathElems)
|
||||
|
||||
return join(elems)
|
||||
// Can't directly use Builder since that could return an error. Instead split
|
||||
// into elements and use that.
|
||||
split := Split(TrimTrailingSlash(ref))
|
||||
return Elements(split).Conceal()
|
||||
}
|
||||
|
||||
@ -120,10 +120,6 @@ type Path interface {
|
||||
// is appropriately hidden from logging, errors, and other outputs.
|
||||
clues.Concealer
|
||||
fmt.Stringer
|
||||
|
||||
// In the rare case that the path needs to get printed as a plain string,
|
||||
// without obscuring values for PII.
|
||||
clues.PlainStringer
|
||||
}
|
||||
|
||||
// interface compliance required for handling PII
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -102,7 +103,7 @@ func initM365Repo(t *testing.T) (
|
||||
FailureHandling: control.FailFast,
|
||||
}
|
||||
|
||||
repo, err := repository.Initialize(ctx, ac, st, opts)
|
||||
repo, err := repository.Initialize(ctx, ac, st, opts, ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return ctx, repo, ac, st
|
||||
|
||||
@ -25,7 +25,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
rep "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -82,8 +82,12 @@ type Repository interface {
|
||||
) (operations.ExportOperation, error)
|
||||
NewMaintenance(
|
||||
ctx context.Context,
|
||||
mOpts rep.Maintenance,
|
||||
mOpts ctrlRepo.Maintenance,
|
||||
) (operations.MaintenanceOperation, error)
|
||||
NewRetentionConfig(
|
||||
ctx context.Context,
|
||||
rcOpts ctrlRepo.Retention,
|
||||
) (operations.RetentionConfigOperation, error)
|
||||
DeleteBackup(ctx context.Context, id string) error
|
||||
BackupGetter
|
||||
// ConnectToM365 establishes graph api connections
|
||||
@ -117,7 +121,8 @@ func (r repository) GetID() string {
|
||||
// - validate the m365 account & secrets
|
||||
// - connect to the m365 account to ensure communication capability
|
||||
// - validate the provider config & secrets
|
||||
// - initialize the kopia repo with the provider
|
||||
// - initialize the kopia repo with the provider and retention parameters
|
||||
// - update maintenance retention parameters as needed
|
||||
// - store the configuration details
|
||||
// - connect to the provider
|
||||
// - return the connected repository
|
||||
@ -126,6 +131,7 @@ func Initialize(
|
||||
acct account.Account,
|
||||
s storage.Storage,
|
||||
opts control.Options,
|
||||
retentionOpts ctrlRepo.Retention,
|
||||
) (repo Repository, err error) {
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
@ -140,7 +146,7 @@ func Initialize(
|
||||
}()
|
||||
|
||||
kopiaRef := kopia.NewConn(s)
|
||||
if err := kopiaRef.Initialize(ctx, opts.Repo); err != nil {
|
||||
if err := kopiaRef.Initialize(ctx, opts.Repo, retentionOpts); err != nil {
|
||||
// replace common internal errors so that sdk users can check results with errors.Is()
|
||||
if errors.Is(err, kopia.ErrorRepoAlreadyExists) {
|
||||
return nil, clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||
@ -408,7 +414,7 @@ func (r repository) NewRestore(
|
||||
|
||||
func (r repository) NewMaintenance(
|
||||
ctx context.Context,
|
||||
mOpts rep.Maintenance,
|
||||
mOpts ctrlRepo.Maintenance,
|
||||
) (operations.MaintenanceOperation, error) {
|
||||
return operations.NewMaintenanceOperation(
|
||||
ctx,
|
||||
@ -418,6 +424,18 @@ func (r repository) NewMaintenance(
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
func (r repository) NewRetentionConfig(
|
||||
ctx context.Context,
|
||||
rcOpts ctrlRepo.Retention,
|
||||
) (operations.RetentionConfigOperation, error) {
|
||||
return operations.NewRetentionConfigOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
rcOpts,
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
// Backup retrieves a backup by id.
|
||||
func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) {
|
||||
return getBackup(ctx, id, store.NewKopiaStore(r.modelStore))
|
||||
|
||||
@ -60,7 +60,12 @@ func (suite *RepositoryUnitSuite) TestInitialize() {
|
||||
st, err := test.storage()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, err = Initialize(ctx, test.account, st, control.DefaultOptions())
|
||||
_, err = Initialize(
|
||||
ctx,
|
||||
test.account,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -137,7 +142,12 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
||||
defer flush()
|
||||
|
||||
st := test.storage(t)
|
||||
r, err := Initialize(ctx, test.account, st, control.DefaultOptions())
|
||||
r, err := Initialize(
|
||||
ctx,
|
||||
test.account,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
if err == nil {
|
||||
defer func() {
|
||||
err := r.Close(ctx)
|
||||
@ -169,7 +179,7 @@ func (suite *RepositoryIntegrationSuite) TestInitializeWithRole() {
|
||||
st.SessionName = "corso-repository-test"
|
||||
st.SessionDuration = roleDuration.String()
|
||||
|
||||
r, err := Initialize(ctx, account.Account{}, st, control.Options{})
|
||||
r, err := Initialize(ctx, account.Account{}, st, control.Options{}, ctrlRepo.Retention{})
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
defer func() {
|
||||
@ -186,7 +196,12 @@ func (suite *RepositoryIntegrationSuite) TestConnect() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
repo, err := Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
repo, err := Initialize(
|
||||
ctx,
|
||||
account.Account{},
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// now re-connect
|
||||
@ -203,7 +218,12 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
r, err := Initialize(
|
||||
ctx,
|
||||
account.Account{},
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
oldID := r.GetID()
|
||||
@ -228,7 +248,12 @@ func (suite *RepositoryIntegrationSuite) TestNewBackup() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
r, err := Initialize(
|
||||
ctx,
|
||||
acct,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
userID := tconfig.M365UserID(t)
|
||||
@ -250,7 +275,12 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
r, err := Initialize(
|
||||
ctx,
|
||||
acct,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ro, err := r.NewRestore(ctx, "backup-id", selectors.Selector{DiscreteOwner: "test"}, restoreCfg)
|
||||
@ -269,7 +299,12 @@ func (suite *RepositoryIntegrationSuite) TestNewMaintenance() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
r, err := Initialize(
|
||||
ctx,
|
||||
acct,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
mo, err := r.NewMaintenance(ctx, ctrlRepo.Maintenance{})
|
||||
@ -286,7 +321,12 @@ func (suite *RepositoryIntegrationSuite) TestConnect_DisableMetrics() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
repo, err := Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
repo, err := Initialize(
|
||||
ctx,
|
||||
account.Account{},
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
ctrlRepo.Retention{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// now re-connect
|
||||
@ -350,7 +390,7 @@ func (suite *RepositoryIntegrationSuite) Test_Options() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
repo, err := Initialize(ctx, acct, st, test.opts())
|
||||
repo, err := Initialize(ctx, acct, st, test.opts(), ctrlRepo.Retention{})
|
||||
require.NoError(t, err)
|
||||
|
||||
r := repo.(*repository)
|
||||
|
||||
@ -240,7 +240,7 @@ func (suite *RepositoryModelIntgSuite) SetupSuite() {
|
||||
|
||||
require.NotNil(t, k)
|
||||
|
||||
err = k.Initialize(ctx, rep.Options{})
|
||||
err = k.Initialize(ctx, rep.Options{}, rep.Retention{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = k.Connect(ctx, rep.Options{})
|
||||
@ -291,8 +291,11 @@ func (suite *RepositoryModelIntgSuite) TestGetRepositoryModel() {
|
||||
k = kopia.NewConn(s)
|
||||
)
|
||||
|
||||
require.NoError(t, k.Initialize(ctx, rep.Options{}))
|
||||
require.NoError(t, k.Connect(ctx, rep.Options{}))
|
||||
err := k.Initialize(ctx, rep.Options{}, rep.Retention{})
|
||||
require.NoError(t, err, "initializing repo: %v", clues.ToCore(err))
|
||||
|
||||
err = k.Connect(ctx, rep.Options{})
|
||||
require.NoError(t, err, "connecting to repo: %v", clues.ToCore(err))
|
||||
|
||||
defer k.Close(ctx)
|
||||
|
||||
|
||||
@ -153,9 +153,6 @@ type (
|
||||
// Primarily to ensure that root- or mid-tier scopes (such as folders)
|
||||
// cascade 'Any' matching to more granular categories.
|
||||
setDefaults()
|
||||
|
||||
// Scopes need to comply with PII printing controls.
|
||||
clues.PlainConcealer
|
||||
}
|
||||
// scopeT is the generic type interface of a scoper.
|
||||
scopeT interface {
|
||||
|
||||
@ -341,7 +341,7 @@ func selectorAsIface[T any](s Selector) (T, error) {
|
||||
// Stringers and Concealers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var _ clues.PlainConcealer = &Selector{}
|
||||
var _ clues.Concealer = &Selector{}
|
||||
|
||||
type loggableSelector struct {
|
||||
Service service `json:"service,omitempty"`
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user