Merge branch 'main' into teamsDiscovery
This commit is contained in:
commit
7d642d6b6b
3
.github/actions/slack-message/action.yml
vendored
3
.github/actions/slack-message/action.yml
vendored
@ -31,6 +31,7 @@ runs:
|
||||
- name: use url or blank val
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STEP=${{ github.action || '' }}" >> $GITHUB_ENV
|
||||
echo "JOB=${{ github.job || '' }}" >> $GITHUB_ENV
|
||||
echo "LOGS=${{ github.run_id && env.logurl || '-' }}" >> $GITHUB_ENV
|
||||
echo "COMMIT=${{ github.sha && env.commiturl || '-' }}" >> $GITHUB_ENV
|
||||
@ -50,7 +51,7 @@ runs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "${{ inputs.msg }} :: ${{ env.JOB }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
|
||||
"text": "${{ inputs.msg }} :: ${{ env.JOB }} - ${{ env.STEP }}\n${{ env.LOGS }} ${{ env.COMMIT }} ${{ env.REF }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@ -7,8 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased] (beta)
|
||||
|
||||
### Added
|
||||
- Restore commands now accept an optional resource override with the `--to-resource` flag. This allows restores to recreate backup data within different mailboxes, sites, and users.
|
||||
|
||||
### Fixed
|
||||
- SharePoint document libraries deleted after the last backup can now be restored.
|
||||
- Restore requires the protected resource to have access to the service being restored.
|
||||
|
||||
## [v0.11.1] (beta) - 2023-07-20
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ func prepM365Test(
|
||||
vpr, cfgFP := tconfig.MakeTempTestConfigClone(t, force)
|
||||
ctx = config.SetViper(ctx, vpr)
|
||||
|
||||
repo, err := repository.Initialize(ctx, acct, st, control.Defaults())
|
||||
repo, err := repository.Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return acct, st, repo, vpr, recorder, cfgFP
|
||||
|
||||
@ -9,11 +9,13 @@ import (
|
||||
const (
|
||||
CollisionsFN = "collisions"
|
||||
DestinationFN = "destination"
|
||||
ToResourceFN = "to-resource"
|
||||
)
|
||||
|
||||
var (
|
||||
CollisionsFV string
|
||||
DestinationFV string
|
||||
ToResourceFV string
|
||||
)
|
||||
|
||||
// AddRestoreConfigFlags adds the restore config flag set.
|
||||
@ -25,5 +27,8 @@ func AddRestoreConfigFlags(cmd *cobra.Command) {
|
||||
"Sets the behavior for existing item collisions: "+string(control.Skip)+", "+string(control.Copy)+", or "+string(control.Replace))
|
||||
fs.StringVar(
|
||||
&DestinationFV, DestinationFN, "",
|
||||
"Overrides the destination where items get restored; '/' places items into their original location")
|
||||
"Overrides the folder where items get restored; '/' places items into their original location")
|
||||
fs.StringVar(
|
||||
&ToResourceFV, ToResourceFN, "",
|
||||
"Overrides the protected resource (mailbox, site, user, etc) where data gets restored")
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
|
||||
ctx = config.SetViper(ctx, vpr)
|
||||
|
||||
// init the repo first
|
||||
_, err = repository.Initialize(ctx, account.Account{}, st, control.Defaults())
|
||||
_, err = repository.Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// then test it
|
||||
|
||||
@ -84,6 +84,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
||||
|
||||
"--" + flags.CollisionsFN, testdata.Collisions,
|
||||
"--" + flags.DestinationFN, testdata.Destination,
|
||||
"--" + flags.ToResourceFN, testdata.ToResource,
|
||||
|
||||
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
|
||||
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
|
||||
@ -125,6 +126,7 @@ func (suite *ExchangeUnitSuite) TestAddExchangeCommands() {
|
||||
|
||||
assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions)
|
||||
assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination)
|
||||
assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||
|
||||
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
|
||||
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
|
||||
|
||||
@ -70,6 +70,7 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
|
||||
"--" + flags.CollisionsFN, testdata.Collisions,
|
||||
"--" + flags.DestinationFN, testdata.Destination,
|
||||
"--" + flags.ToResourceFN, testdata.ToResource,
|
||||
|
||||
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
|
||||
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
|
||||
@ -80,6 +81,9 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
"--" + flags.AzureClientSecretFN, testdata.AzureClientSecret,
|
||||
|
||||
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.RestorePermissionsFN,
|
||||
})
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
@ -99,6 +103,7 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
|
||||
assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions)
|
||||
assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination)
|
||||
assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||
|
||||
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
|
||||
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
|
||||
@ -109,6 +114,7 @@ func (suite *OneDriveUnitSuite) TestAddOneDriveCommands() {
|
||||
assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV)
|
||||
|
||||
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
|
||||
assert.True(t, flags.RestorePermissionsFV)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,6 +75,7 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
|
||||
"--" + flags.CollisionsFN, testdata.Collisions,
|
||||
"--" + flags.DestinationFN, testdata.Destination,
|
||||
"--" + flags.ToResourceFN, testdata.ToResource,
|
||||
|
||||
"--" + flags.AWSAccessKeyFN, testdata.AWSAccessKeyID,
|
||||
"--" + flags.AWSSecretAccessKeyFN, testdata.AWSSecretAccessKey,
|
||||
@ -85,6 +86,9 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
"--" + flags.AzureClientSecretFN, testdata.AzureClientSecret,
|
||||
|
||||
"--" + flags.CorsoPassphraseFN, testdata.CorsoPassphrase,
|
||||
|
||||
// bool flags
|
||||
"--" + flags.RestorePermissionsFN,
|
||||
})
|
||||
|
||||
cmd.SetOut(new(bytes.Buffer)) // drop output
|
||||
@ -111,6 +115,7 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
|
||||
assert.Equal(t, testdata.Collisions, opts.RestoreCfg.Collisions)
|
||||
assert.Equal(t, testdata.Destination, opts.RestoreCfg.Destination)
|
||||
assert.Equal(t, testdata.ToResource, opts.RestoreCfg.ProtectedResource)
|
||||
|
||||
assert.Equal(t, testdata.AWSAccessKeyID, flags.AWSAccessKeyFV)
|
||||
assert.Equal(t, testdata.AWSSecretAccessKey, flags.AWSSecretAccessKeyFV)
|
||||
@ -121,6 +126,9 @@ func (suite *SharePointUnitSuite) TestAddSharePointCommands() {
|
||||
assert.Equal(t, testdata.AzureClientSecret, flags.AzureClientSecretFV)
|
||||
|
||||
assert.Equal(t, testdata.CorsoPassphrase, flags.CorsoPassphraseFV)
|
||||
|
||||
// bool flags
|
||||
assert.True(t, flags.RestorePermissionsFV)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,7 +8,7 @@ import (
|
||||
|
||||
// Control produces the control options based on the user's flags.
|
||||
func Control() control.Options {
|
||||
opt := control.Defaults()
|
||||
opt := control.DefaultOptions()
|
||||
|
||||
if flags.FailFastFV {
|
||||
opt.FailureHandling = control.FailFast
|
||||
@ -21,7 +21,6 @@ func Control() control.Options {
|
||||
|
||||
opt.DeltaPageSize = dps
|
||||
opt.DisableMetrics = flags.NoStatsFV
|
||||
opt.RestorePermissions = flags.RestorePermissionsFV
|
||||
opt.SkipReduce = flags.SkipReduceFV
|
||||
opt.ToggleFeatures.DisableIncrementals = flags.DisableIncrementalsFV
|
||||
opt.ToggleFeatures.DisableDelta = flags.DisableDeltaFV
|
||||
|
||||
@ -18,16 +18,20 @@ type RestoreCfgOpts struct {
|
||||
// DTTMFormat is the timestamp format appended
|
||||
// to the default folder name. Defaults to
|
||||
// dttm.HumanReadable.
|
||||
DTTMFormat dttm.TimeFormat
|
||||
DTTMFormat dttm.TimeFormat
|
||||
ProtectedResource string
|
||||
RestorePermissions bool
|
||||
|
||||
Populated flags.PopulatedFlags
|
||||
}
|
||||
|
||||
func makeRestoreCfgOpts(cmd *cobra.Command) RestoreCfgOpts {
|
||||
return RestoreCfgOpts{
|
||||
Collisions: flags.CollisionsFV,
|
||||
Destination: flags.DestinationFV,
|
||||
DTTMFormat: dttm.HumanReadable,
|
||||
Collisions: flags.CollisionsFV,
|
||||
Destination: flags.DestinationFV,
|
||||
DTTMFormat: dttm.HumanReadable,
|
||||
ProtectedResource: flags.ToResourceFV,
|
||||
RestorePermissions: flags.RestorePermissionsFV,
|
||||
|
||||
// populated contains the list of flags that appear in the
|
||||
// command, according to pflags. Use this to differentiate
|
||||
@ -67,6 +71,9 @@ func MakeRestoreConfig(
|
||||
restoreCfg.Location = opts.Destination
|
||||
}
|
||||
|
||||
restoreCfg.ProtectedResource = opts.ProtectedResource
|
||||
restoreCfg.IncludePermissions = opts.RestorePermissions
|
||||
|
||||
Infof(ctx, "Restoring to folder %s", restoreCfg.Location)
|
||||
|
||||
return restoreCfg
|
||||
|
||||
@ -68,18 +68,18 @@ func (suite *RestoreCfgUnitSuite) TestValidateRestoreConfigFlags() {
|
||||
}
|
||||
|
||||
func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
|
||||
rco := &RestoreCfgOpts{
|
||||
Collisions: "collisions",
|
||||
Destination: "destination",
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
rco *RestoreCfgOpts
|
||||
populated flags.PopulatedFlags
|
||||
expect control.RestoreConfig
|
||||
}{
|
||||
{
|
||||
name: "not populated",
|
||||
name: "not populated",
|
||||
rco: &RestoreCfgOpts{
|
||||
Collisions: "collisions",
|
||||
Destination: "destination",
|
||||
},
|
||||
populated: flags.PopulatedFlags{},
|
||||
expect: control.RestoreConfig{
|
||||
OnCollision: control.Skip,
|
||||
@ -88,6 +88,10 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
|
||||
},
|
||||
{
|
||||
name: "collision populated",
|
||||
rco: &RestoreCfgOpts{
|
||||
Collisions: "collisions",
|
||||
Destination: "destination",
|
||||
},
|
||||
populated: flags.PopulatedFlags{
|
||||
flags.CollisionsFN: {},
|
||||
},
|
||||
@ -98,6 +102,10 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
|
||||
},
|
||||
{
|
||||
name: "destination populated",
|
||||
rco: &RestoreCfgOpts{
|
||||
Collisions: "collisions",
|
||||
Destination: "destination",
|
||||
},
|
||||
populated: flags.PopulatedFlags{
|
||||
flags.DestinationFN: {},
|
||||
},
|
||||
@ -108,6 +116,10 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
|
||||
},
|
||||
{
|
||||
name: "both populated",
|
||||
rco: &RestoreCfgOpts{
|
||||
Collisions: "collisions",
|
||||
Destination: "destination",
|
||||
},
|
||||
populated: flags.PopulatedFlags{
|
||||
flags.CollisionsFN: {},
|
||||
flags.DestinationFN: {},
|
||||
@ -117,6 +129,23 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
|
||||
Location: "destination",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with restore permissions",
|
||||
rco: &RestoreCfgOpts{
|
||||
Collisions: "collisions",
|
||||
Destination: "destination",
|
||||
RestorePermissions: true,
|
||||
},
|
||||
populated: flags.PopulatedFlags{
|
||||
flags.CollisionsFN: {},
|
||||
flags.DestinationFN: {},
|
||||
},
|
||||
expect: control.RestoreConfig{
|
||||
OnCollision: control.CollisionPolicy("collisions"),
|
||||
Location: "destination",
|
||||
IncludePermissions: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
@ -125,12 +154,13 @@ func (suite *RestoreCfgUnitSuite) TestMakeRestoreConfig() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
opts := *rco
|
||||
opts := *test.rco
|
||||
opts.Populated = test.populated
|
||||
|
||||
result := MakeRestoreConfig(ctx, opts)
|
||||
assert.Equal(t, test.expect.OnCollision, result.OnCollision)
|
||||
assert.Contains(t, result.Location, test.expect.Location)
|
||||
assert.Equal(t, test.expect.IncludePermissions, result.IncludePermissions)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
1
src/cli/utils/testdata/flags.go
vendored
1
src/cli/utils/testdata/flags.go
vendored
@ -46,6 +46,7 @@ var (
|
||||
|
||||
Collisions = "collisions"
|
||||
Destination = "destination"
|
||||
ToResource = "toResource"
|
||||
RestorePermissions = true
|
||||
|
||||
DeltaPageSize = "deltaPageSize"
|
||||
|
||||
@ -21,12 +21,12 @@ import (
|
||||
odStub "github.com/alcionai/corso/src/internal/m365/onedrive/stub"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
m365Stub "github.com/alcionai/corso/src/internal/m365/stub"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -104,7 +104,15 @@ func generateAndRestoreItems(
|
||||
|
||||
print.Infof(ctx, "Generating %d %s items in %s\n", howMany, cat, Destination)
|
||||
|
||||
return ctrl.ConsumeRestoreCollections(ctx, version.Backup, sel, restoreCfg, opts, dataColls, errs, ctr)
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: version.Backup,
|
||||
Options: opts,
|
||||
ProtectedResource: sel,
|
||||
RestoreConfig: restoreCfg,
|
||||
Selector: sel,
|
||||
}
|
||||
|
||||
return ctrl.ConsumeRestoreCollections(ctx, rcc, dataColls, errs, ctr)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------
|
||||
@ -144,7 +152,7 @@ func getControllerAndVerifyResourceOwner(
|
||||
return nil, account.Account{}, nil, clues.Wrap(err, "connecting to graph api")
|
||||
}
|
||||
|
||||
id, _, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, resourceOwner, nil)
|
||||
id, _, err := ctrl.PopulateProtectedResourceIDAndName(ctx, resourceOwner, nil)
|
||||
if err != nil {
|
||||
return nil, account.Account{}, nil, clues.Wrap(err, "verifying user")
|
||||
}
|
||||
@ -216,7 +224,8 @@ var (
|
||||
|
||||
func generateAndRestoreDriveItems(
|
||||
ctrl *m365.Controller,
|
||||
resourceOwner, secondaryUserID, secondaryUserName string,
|
||||
protectedResource idname.Provider,
|
||||
secondaryUserID, secondaryUserName string,
|
||||
acct account.Account,
|
||||
service path.ServiceType,
|
||||
cat path.CategoryType,
|
||||
@ -240,14 +249,23 @@ func generateAndRestoreDriveItems(
|
||||
|
||||
switch service {
|
||||
case path.SharePointService:
|
||||
d, err := ctrl.AC.Stable.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil)
|
||||
d, err := ctrl.AC.Stable.
|
||||
Client().
|
||||
Sites().
|
||||
BySiteId(protectedResource.ID()).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting site's default drive")
|
||||
}
|
||||
|
||||
driveID = ptr.Val(d.GetId())
|
||||
default:
|
||||
d, err := ctrl.AC.Stable.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil)
|
||||
d, err := ctrl.AC.Stable.Client().
|
||||
Users().
|
||||
ByUserId(protectedResource.ID()).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting user's default drive")
|
||||
}
|
||||
@ -407,18 +425,16 @@ func generateAndRestoreDriveItems(
|
||||
// input,
|
||||
// version.Backup)
|
||||
|
||||
opts := control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
}
|
||||
opts := control.DefaultOptions()
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
config := m365Stub.ConfigInfo{
|
||||
Opts: opts,
|
||||
Resource: resource.Users,
|
||||
Service: service,
|
||||
Tenant: tenantID,
|
||||
ResourceOwners: []string{resourceOwner},
|
||||
RestoreCfg: testdata.DefaultRestoreConfig(""),
|
||||
ResourceOwners: []string{protectedResource.ID()},
|
||||
RestoreCfg: restoreCfg,
|
||||
}
|
||||
|
||||
_, _, collections, _, err := m365Stub.GetCollectionsAndExpected(
|
||||
@ -429,5 +445,13 @@ func generateAndRestoreDriveItems(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ctrl.ConsumeRestoreCollections(ctx, version.Backup, sel, restoreCfg, opts, collections, errs, ctr)
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: version.Backup,
|
||||
Options: opts,
|
||||
ProtectedResource: protectedResource,
|
||||
RestoreConfig: restoreCfg,
|
||||
Selector: sel,
|
||||
}
|
||||
|
||||
return ctrl.ConsumeRestoreCollections(ctx, rcc, collections, errs, ctr)
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
|
||||
subject, body, body,
|
||||
now, now, now, now)
|
||||
},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
errs,
|
||||
count.New())
|
||||
if err != nil {
|
||||
@ -121,7 +121,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
|
||||
exchMock.NoAttachments, exchMock.NoCancelledOccurrences,
|
||||
exchMock.NoExceptionOccurrences)
|
||||
},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
errs,
|
||||
count.New())
|
||||
if err != nil {
|
||||
@ -172,7 +172,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
|
||||
"123-456-7890",
|
||||
)
|
||||
},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
errs,
|
||||
count.New())
|
||||
if err != nil {
|
||||
|
||||
@ -47,7 +47,7 @@ func handleOneDriveFileFactory(cmd *cobra.Command, args []string) error {
|
||||
|
||||
deets, err := generateAndRestoreDriveItems(
|
||||
ctrl,
|
||||
inp.ID(),
|
||||
inp,
|
||||
SecondaryUser,
|
||||
strings.ToLower(SecondaryUser),
|
||||
acct,
|
||||
|
||||
@ -47,7 +47,7 @@ func handleSharePointLibraryFileFactory(cmd *cobra.Command, args []string) error
|
||||
|
||||
deets, err := generateAndRestoreDriveItems(
|
||||
ctrl,
|
||||
inp.ID(),
|
||||
inp,
|
||||
SecondaryUser,
|
||||
strings.ToLower(SecondaryUser),
|
||||
acct,
|
||||
|
||||
@ -8,7 +8,7 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||
github.com/alcionai/clues v0.0.0-20230630194723-e24d7940e07a
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go v1.44.307
|
||||
github.com/aws/aws-sdk-go v1.44.308
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/google/uuid v1.3.0
|
||||
|
||||
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.44.307 h1:2R0/EPgpZcFSUwZhYImq/srjaOrOfLv5MNRzrFyAM38=
|
||||
github.com/aws/aws-sdk-go v1.44.307/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.308 h1:XKu+76UHsD5LaiU2Zb1q42uWakw80Az7x39jJXXahos=
|
||||
github.com/aws/aws-sdk-go v1.44.308/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
|
||||
99
src/internal/archive/zip.go
Normal file
99
src/internal/archive/zip.go
Normal file
@ -0,0 +1,99 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
)
|
||||
|
||||
const (
|
||||
// ZipCopyBufferSize is the size of the copy buffer for zip
|
||||
// write operations
|
||||
// TODO(meain): tweak this value
|
||||
ZipCopyBufferSize = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
type zipCollection struct {
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
func (z zipCollection) BasePath() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (z zipCollection) Items(ctx context.Context) <-chan export.Item {
|
||||
rc := make(chan export.Item, 1)
|
||||
defer close(rc)
|
||||
|
||||
rc <- export.Item{
|
||||
Data: export.ItemData{
|
||||
Name: "Corso_Export_" + dttm.FormatNow(dttm.HumanReadable) + ".zip",
|
||||
Body: z.reader,
|
||||
},
|
||||
}
|
||||
|
||||
return rc
|
||||
}
|
||||
|
||||
// ZipExportCollection takes a list of export collections and zips
|
||||
// them into a single collection.
|
||||
func ZipExportCollection(
|
||||
ctx context.Context,
|
||||
expCollections []export.Collection,
|
||||
) (export.Collection, error) {
|
||||
if len(expCollections) == 0 {
|
||||
return nil, clues.New("no export collections provided")
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
wr := zip.NewWriter(writer)
|
||||
|
||||
go func() {
|
||||
defer writer.Close()
|
||||
defer wr.Close()
|
||||
|
||||
buf := make([]byte, ZipCopyBufferSize)
|
||||
|
||||
for _, ec := range expCollections {
|
||||
folder := ec.BasePath()
|
||||
items := ec.Items(ctx)
|
||||
|
||||
for item := range items {
|
||||
err := item.Error
|
||||
if err != nil {
|
||||
writer.CloseWithError(clues.Wrap(err, "getting export item").With("id", item.ID))
|
||||
return
|
||||
}
|
||||
|
||||
name := item.Data.Name
|
||||
|
||||
// We assume folder and name to not contain any path separators.
|
||||
// Also, this should always use `/` as this is
|
||||
// created within a zip file and not written to disk.
|
||||
// TODO(meain): Exchange paths might contain a path
|
||||
// separator and will have to have special handling.
|
||||
|
||||
//nolint:forbidigo
|
||||
f, err := wr.Create(path.Join(folder, name))
|
||||
if err != nil {
|
||||
writer.CloseWithError(clues.Wrap(err, "creating zip entry").With("name", name).With("id", item.ID))
|
||||
return
|
||||
}
|
||||
|
||||
_, err = io.CopyBuffer(f, item.Data.Body, buf)
|
||||
if err != nil {
|
||||
writer.CloseWithError(clues.Wrap(err, "writing zip entry").With("name", name).With("id", item.ID))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return zipCollection{reader}, nil
|
||||
}
|
||||
@ -28,6 +28,10 @@ type is struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func NewProvider(id, name string) *is {
|
||||
return &is{id, name}
|
||||
}
|
||||
|
||||
func (is is) ID() string { return is.id }
|
||||
func (is is) Name() string { return is.name }
|
||||
|
||||
|
||||
60
src/internal/common/idname/idname_test.go
Normal file
60
src/internal/common/idname/idname_test.go
Normal file
@ -0,0 +1,60 @@
|
||||
package idname
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
type IDNameUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestIDNameUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &IDNameUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *IDNameUnitSuite) TestAdd() {
|
||||
table := []struct {
|
||||
name string
|
||||
inID string
|
||||
inName string
|
||||
searchID string
|
||||
searchName string
|
||||
}{
|
||||
{
|
||||
name: "basic",
|
||||
inID: "foo",
|
||||
inName: "bar",
|
||||
searchID: "foo",
|
||||
searchName: "bar",
|
||||
},
|
||||
{
|
||||
name: "change casing",
|
||||
inID: "FNORDS",
|
||||
inName: "SMARF",
|
||||
searchID: "fnords",
|
||||
searchName: "smarf",
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
cache := NewCache(nil)
|
||||
|
||||
cache.Add(test.inID, test.inName)
|
||||
|
||||
id, found := cache.IDOf(test.searchName)
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, test.inID, id)
|
||||
|
||||
name, found := cache.NameOf(test.searchID)
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, test.inName, name)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -91,6 +91,11 @@ func (c NoFetchRestoreCollection) FetchItemByName(context.Context, string) (Stre
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
type FetchRestoreCollection struct {
|
||||
Collection
|
||||
FetchItemByNamer
|
||||
}
|
||||
|
||||
// Stream represents a single item within a Collection
|
||||
// that can be consumed as a stream (it embeds io.Reader)
|
||||
type Stream interface {
|
||||
|
||||
@ -35,6 +35,8 @@ const (
|
||||
BackupEnd = "Backup End"
|
||||
RestoreStart = "Restore Start"
|
||||
RestoreEnd = "Restore End"
|
||||
ExportStart = "Export Start"
|
||||
ExportEnd = "Export End"
|
||||
MaintenanceStart = "Maintenance Start"
|
||||
MaintenanceEnd = "Maintenance End"
|
||||
|
||||
@ -49,6 +51,7 @@ const (
|
||||
ItemsWritten = "items_written"
|
||||
Resources = "resources"
|
||||
RestoreID = "restore_id"
|
||||
ExportID = "export_id"
|
||||
Service = "service"
|
||||
StartTime = "start_time"
|
||||
Status = "status"
|
||||
|
||||
@ -52,7 +52,7 @@ func (suite *EventsIntegrationSuite) TestNewBus() {
|
||||
)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
b, err := events.NewBus(ctx, s, a.ID(), control.Defaults())
|
||||
b, err := events.NewBus(ctx, s, a.ID(), control.DefaultOptions())
|
||||
require.NotEmpty(t, b)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@ package m365
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
@ -44,7 +43,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
||||
ctx, end := diagnostics.Span(
|
||||
ctx,
|
||||
"m365:produceBackupCollections",
|
||||
diagnostics.Index("service", sels.Service.String()))
|
||||
diagnostics.Index("service", sels.PathService().String()))
|
||||
defer end()
|
||||
|
||||
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||
@ -61,8 +60,8 @@ func (ctrl *Controller) ProduceBackupCollections(
|
||||
serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled(
|
||||
ctx,
|
||||
ctrl.AC.Users(),
|
||||
path.ServiceType(sels.Service),
|
||||
sels.DiscreteOwner)
|
||||
sels.PathService(),
|
||||
owner.ID())
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
@ -194,10 +193,8 @@ func verifyBackupInputs(sels selectors.Selector, siteIDs []string) error {
|
||||
ids = siteIDs
|
||||
}
|
||||
|
||||
resourceOwner := strings.ToLower(sels.DiscreteOwner)
|
||||
|
||||
if !filters.Equal(ids).Compare(resourceOwner) {
|
||||
return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_resource_owner", sels.DiscreteOwner)
|
||||
if !filters.Contains(ids).Compare(sels.ID()) {
|
||||
return clues.Stack(graph.ErrResourceOwnerNotFound).With("missing_protected_resource", sels.DiscreteOwner)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -57,7 +57,7 @@ func (suite *DataCollectionIntgSuite) SetupSuite() {
|
||||
|
||||
suite.tenantID = creds.AzureTenantID
|
||||
|
||||
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||
suite.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
||||
sel := test.getSelector(t)
|
||||
uidn := inMock.NewProvider(sel.ID(), sel.Name())
|
||||
|
||||
ctrlOpts := control.Defaults()
|
||||
ctrlOpts := control.DefaultOptions()
|
||||
ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries
|
||||
|
||||
collections, excludes, canUsePreviousBackup, err := exchange.ProduceBackupCollections(
|
||||
@ -239,7 +239,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
||||
test.getSelector(t),
|
||||
nil,
|
||||
version.NoBackup,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
fault.New(true))
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.False(t, canUsePreviousBackup, "can use previous backup")
|
||||
@ -296,7 +296,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
||||
nil,
|
||||
ctrl.credentials,
|
||||
ctrl,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
@ -367,7 +367,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
||||
siteIDs = []string{siteID}
|
||||
)
|
||||
|
||||
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil)
|
||||
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
sel := selectors.NewSharePointBackup(siteIDs)
|
||||
@ -381,7 +381,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
||||
sel.Selector,
|
||||
nil,
|
||||
version.NoBackup,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
@ -414,7 +414,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||
siteIDs = []string{siteID}
|
||||
)
|
||||
|
||||
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, siteID, nil)
|
||||
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, siteID, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
sel := selectors.NewSharePointBackup(siteIDs)
|
||||
@ -428,7 +428,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||
sel.Selector,
|
||||
nil,
|
||||
version.NoBackup,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
@ -24,6 +24,7 @@ import (
|
||||
var (
|
||||
_ inject.BackupProducer = &Controller{}
|
||||
_ inject.RestoreConsumer = &Controller{}
|
||||
_ inject.ExportConsumer = &Controller{}
|
||||
)
|
||||
|
||||
// Controller is a struct used to wrap the GraphServiceClient and
|
||||
@ -83,10 +84,11 @@ func NewController(
|
||||
AC: ac,
|
||||
IDNameLookup: idname.NewCache(nil),
|
||||
|
||||
credentials: creds,
|
||||
ownerLookup: rCli,
|
||||
tenant: acct.ID(),
|
||||
wg: &sync.WaitGroup{},
|
||||
credentials: creds,
|
||||
ownerLookup: rCli,
|
||||
tenant: acct.ID(),
|
||||
wg: &sync.WaitGroup{},
|
||||
backupDriveIDNames: idname.NewCache(nil),
|
||||
}
|
||||
|
||||
return &ctrl, nil
|
||||
@ -149,10 +151,6 @@ func (ctrl *Controller) incrementAwaitingMessages() {
|
||||
}
|
||||
|
||||
func (ctrl *Controller) CacheItemInfo(dii details.ItemInfo) {
|
||||
if ctrl.backupDriveIDNames == nil {
|
||||
ctrl.backupDriveIDNames = idname.NewCache(map[string]string{})
|
||||
}
|
||||
|
||||
if dii.SharePoint != nil {
|
||||
ctrl.backupDriveIDNames.Add(dii.SharePoint.DriveID, dii.SharePoint.DriveName)
|
||||
}
|
||||
@ -248,15 +246,15 @@ func (r resourceClient) getOwnerIDAndNameFrom(
|
||||
return id, name, nil
|
||||
}
|
||||
|
||||
// PopulateOwnerIDAndNamesFrom takes the provided owner identifier and produces
|
||||
// PopulateProtectedResourceIDAndName takes the provided owner identifier and produces
|
||||
// the owner's name and ID from that value. Returns an error if the owner is
|
||||
// not recognized by the current tenant.
|
||||
//
|
||||
// The id-name swapper is optional. Some processes will look up all owners in
|
||||
// The id-name cacher is optional. Some processes will look up all owners in
|
||||
// the tenant before reaching this step. In that case, the data gets handed
|
||||
// down for this func to consume instead of performing further queries. The
|
||||
// data gets stored inside the controller instance for later re-use.
|
||||
func (ctrl *Controller) PopulateOwnerIDAndNamesFrom(
|
||||
func (ctrl *Controller) PopulateProtectedResourceIDAndName(
|
||||
ctx context.Context,
|
||||
owner string, // input value, can be either id or name
|
||||
ins idname.Cacher,
|
||||
|
||||
@ -12,15 +12,18 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/internal/m365/stub"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
@ -223,7 +226,7 @@ func (suite *ControllerUnitSuite) TestPopulateOwnerIDAndNamesFrom() {
|
||||
|
||||
ctrl := &Controller{ownerLookup: test.rc}
|
||||
|
||||
rID, rName, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, test.owner, test.ins)
|
||||
rID, rName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, test.owner, test.ins)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, test.expectID, rID, "id")
|
||||
assert.Equal(t, test.expectName, rName, "name")
|
||||
@ -385,20 +388,24 @@ func (suite *ControllerIntegrationSuite) TestRestoreFailsBadService() {
|
||||
}
|
||||
)
|
||||
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: version.Backup,
|
||||
Options: control.DefaultOptions(),
|
||||
ProtectedResource: sel,
|
||||
RestoreConfig: restoreCfg,
|
||||
Selector: sel,
|
||||
}
|
||||
|
||||
deets, err := suite.ctrl.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
version.Backup,
|
||||
sel,
|
||||
restoreCfg,
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rcc,
|
||||
[]data.RestoreCollection{&dataMock.Collection{}},
|
||||
fault.New(true),
|
||||
count.New())
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.NotNil(t, deets)
|
||||
assert.Error(t, err, graph.ErrServiceNotEnabled, clues.ToCore(err))
|
||||
assert.Nil(t, deets)
|
||||
|
||||
status := suite.ctrl.Wait()
|
||||
assert.Equal(t, 0, status.Objects)
|
||||
@ -408,6 +415,8 @@ func (suite *ControllerIntegrationSuite) TestRestoreFailsBadService() {
|
||||
|
||||
func (suite *ControllerIntegrationSuite) TestEmptyCollections() {
|
||||
restoreCfg := testdata.DefaultRestoreConfig("")
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
col []data.RestoreCollection
|
||||
@ -464,15 +473,17 @@ func (suite *ControllerIntegrationSuite) TestEmptyCollections() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: version.Backup,
|
||||
Options: control.DefaultOptions(),
|
||||
ProtectedResource: test.sel,
|
||||
RestoreConfig: restoreCfg,
|
||||
Selector: test.sel,
|
||||
}
|
||||
|
||||
deets, err := suite.ctrl.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
version.Backup,
|
||||
test.sel,
|
||||
restoreCfg,
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rcc,
|
||||
test.col,
|
||||
fault.New(true),
|
||||
count.New())
|
||||
@ -503,12 +514,18 @@ func runRestore(
|
||||
|
||||
restoreCtrl := newController(ctx, t, sci.Resource, path.ExchangeService)
|
||||
restoreSel := getSelectorWith(t, sci.Service, sci.ResourceOwners, true)
|
||||
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: backupVersion,
|
||||
Options: control.DefaultOptions(),
|
||||
ProtectedResource: restoreSel,
|
||||
RestoreConfig: sci.RestoreCfg,
|
||||
Selector: restoreSel,
|
||||
}
|
||||
|
||||
deets, err := restoreCtrl.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
restoreSel,
|
||||
sci.RestoreCfg,
|
||||
sci.Opts,
|
||||
rcc,
|
||||
collections,
|
||||
fault.New(true),
|
||||
count.New())
|
||||
@ -610,6 +627,7 @@ func runRestoreBackupTest(
|
||||
tenant string,
|
||||
resourceOwners []string,
|
||||
opts control.Options,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
@ -620,7 +638,7 @@ func runRestoreBackupTest(
|
||||
Service: test.service,
|
||||
Tenant: tenant,
|
||||
ResourceOwners: resourceOwners,
|
||||
RestoreCfg: testdata.DefaultRestoreConfig(""),
|
||||
RestoreCfg: restoreCfg,
|
||||
}
|
||||
|
||||
totalItems, totalKopiaItems, collections, expectedData, err := stub.GetCollectionsAndExpected(
|
||||
@ -655,6 +673,7 @@ func runRestoreTestWithVersion(
|
||||
tenant string,
|
||||
resourceOwners []string,
|
||||
opts control.Options,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
@ -665,7 +684,7 @@ func runRestoreTestWithVersion(
|
||||
Service: test.service,
|
||||
Tenant: tenant,
|
||||
ResourceOwners: resourceOwners,
|
||||
RestoreCfg: testdata.DefaultRestoreConfig(""),
|
||||
RestoreCfg: restoreCfg,
|
||||
}
|
||||
|
||||
totalItems, _, collections, _, err := stub.GetCollectionsAndExpected(
|
||||
@ -692,7 +711,7 @@ func runRestoreBackupTestVersions(
|
||||
tenant string,
|
||||
resourceOwners []string,
|
||||
opts control.Options,
|
||||
crc control.RestoreConfig,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
@ -703,7 +722,7 @@ func runRestoreBackupTestVersions(
|
||||
Service: test.service,
|
||||
Tenant: tenant,
|
||||
ResourceOwners: resourceOwners,
|
||||
RestoreCfg: crc,
|
||||
RestoreCfg: restoreCfg,
|
||||
}
|
||||
|
||||
totalItems, _, collections, _, err := stub.GetCollectionsAndExpected(
|
||||
@ -737,7 +756,7 @@ func runRestoreBackupTestVersions(
|
||||
test.collectionsLatest)
|
||||
}
|
||||
|
||||
func (suite *ControllerIntegrationSuite) TestRestoreAndBackup() {
|
||||
func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
|
||||
bodyText := "This email has some text. However, all the text is on the same line."
|
||||
subjectText := "Test message for restore"
|
||||
|
||||
@ -996,10 +1015,8 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup() {
|
||||
test,
|
||||
suite.ctrl.tenant,
|
||||
[]string{suite.user},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
})
|
||||
control.DefaultOptions(),
|
||||
control.DefaultRestoreConfig(dttm.HumanReadableDriveItem))
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1080,6 +1097,8 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
||||
for i, collection := range test.collections {
|
||||
// Get a restoreCfg per collection so they're independent.
|
||||
restoreCfg := testdata.DefaultRestoreConfig("")
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
expectedDests = append(expectedDests, destAndCats{
|
||||
resourceOwner: suite.user,
|
||||
dest: restoreCfg.Location,
|
||||
@ -1112,15 +1131,18 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
||||
)
|
||||
|
||||
restoreCtrl := newController(ctx, t, test.resourceCat, path.ExchangeService)
|
||||
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: version.Backup,
|
||||
Options: control.DefaultOptions(),
|
||||
ProtectedResource: restoreSel,
|
||||
RestoreConfig: restoreCfg,
|
||||
Selector: restoreSel,
|
||||
}
|
||||
|
||||
deets, err := restoreCtrl.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
version.Backup,
|
||||
restoreSel,
|
||||
restoreCfg,
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rcc,
|
||||
collections,
|
||||
fault.New(true),
|
||||
count.New())
|
||||
@ -1152,10 +1174,7 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
||||
backupSel,
|
||||
nil,
|
||||
version.NoBackup,
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
control.DefaultOptions(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
@ -1164,10 +1183,13 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
||||
|
||||
t.Log("Backup enumeration complete")
|
||||
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadableDriveItem)
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
ci := stub.ConfigInfo{
|
||||
Opts: control.Options{RestorePermissions: true},
|
||||
Opts: control.DefaultOptions(),
|
||||
// Alright to be empty, needed for OneDrive.
|
||||
RestoreCfg: control.RestoreConfig{},
|
||||
RestoreCfg: restoreCfg,
|
||||
}
|
||||
|
||||
// Pull the data prior to waiting for the status as otherwise it will
|
||||
@ -1205,16 +1227,16 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_largeMailAttachmen
|
||||
},
|
||||
}
|
||||
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadableDriveItem)
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
runRestoreBackupTest(
|
||||
suite.T(),
|
||||
test,
|
||||
suite.ctrl.tenant,
|
||||
[]string{suite.user},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
)
|
||||
control.DefaultOptions(),
|
||||
restoreCfg)
|
||||
}
|
||||
|
||||
func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
|
||||
@ -1233,8 +1255,7 @@ func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
|
||||
sel.Include(
|
||||
sel.ContactFolders([]string{selectors.NoneTgt}),
|
||||
sel.EventCalendars([]string{selectors.NoneTgt}),
|
||||
sel.MailFolders([]string{selectors.NoneTgt}),
|
||||
)
|
||||
sel.MailFolders([]string{selectors.NoneTgt}))
|
||||
|
||||
return sel.Selector
|
||||
},
|
||||
@ -1297,23 +1318,20 @@ func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
|
||||
start = time.Now()
|
||||
)
|
||||
|
||||
id, name, err := backupCtrl.PopulateOwnerIDAndNamesFrom(ctx, backupSel.DiscreteOwner, nil)
|
||||
id, name, err := backupCtrl.PopulateProtectedResourceIDAndName(ctx, backupSel.DiscreteOwner, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
backupSel.SetDiscreteOwnerIDName(id, name)
|
||||
|
||||
dcs, excludes, canUsePreviousBackup, err := backupCtrl.ProduceBackupCollections(
|
||||
ctx,
|
||||
inMock.NewProvider(id, name),
|
||||
idname.NewProvider(id, name),
|
||||
backupSel,
|
||||
nil,
|
||||
version.NoBackup,
|
||||
control.Options{
|
||||
RestorePermissions: false,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
control.DefaultOptions(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
// No excludes yet because this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
@ -414,7 +414,7 @@ func (suite *BackupIntgSuite) SetupSuite() {
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||
suite.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.tenantID = creds.AzureTenantID
|
||||
@ -466,7 +466,7 @@ func (suite *BackupIntgSuite) TestMailFetch() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
ctrlOpts := control.Defaults()
|
||||
ctrlOpts := control.DefaultOptions()
|
||||
ctrlOpts.ToggleFeatures.DisableDelta = !test.canMakeDeltaQueries
|
||||
|
||||
collections, err := createCollections(
|
||||
@ -554,7 +554,7 @@ func (suite *BackupIntgSuite) TestDelta() {
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
func(status *support.ControllerOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -587,7 +587,7 @@ func (suite *BackupIntgSuite) TestDelta() {
|
||||
inMock.NewProvider(userID, userID),
|
||||
test.scope,
|
||||
dps,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
func(status *support.ControllerOperationStatus) {},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -633,7 +633,7 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() {
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
sel.Scopes()[0],
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -709,7 +709,7 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() {
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -834,7 +834,7 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() {
|
||||
inMock.NewProvider(suite.user, suite.user),
|
||||
test.scope,
|
||||
DeltaPaths{},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
newStatusUpdater(t, &wg),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -1995,7 +1995,7 @@ func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_i
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
ctrlOpts := control.Defaults()
|
||||
ctrlOpts := control.DefaultOptions()
|
||||
ctrlOpts.ToggleFeatures.DisableDelta = !deltaAfter
|
||||
|
||||
getter := test.getter
|
||||
|
||||
@ -178,7 +178,7 @@ func (suite *CollectionSuite) TestNewCollection_state() {
|
||||
test.curr, test.prev, test.loc,
|
||||
0,
|
||||
&mockItemer{}, nil,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
false)
|
||||
assert.Equal(t, test.expect, c.State(), "collection state")
|
||||
assert.Equal(t, test.curr, c.fullPath, "full path")
|
||||
|
||||
@ -699,7 +699,7 @@ func (suite *ContainerResolverSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
func (suite *ContainerResolverSuite) TestPopulate() {
|
||||
ac, err := api.NewClient(suite.credentials, control.Defaults())
|
||||
ac, err := api.NewClient(suite.credentials, control.DefaultOptions())
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
eventFunc := func(t *testing.T) graph.ContainerResolver {
|
||||
|
||||
@ -31,7 +31,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
|
||||
|
||||
its.creds = creds
|
||||
|
||||
its.ac, err = api.NewClient(creds, control.Defaults())
|
||||
its.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.userID = tconfig.GetM365UserID(ctx)
|
||||
|
||||
@ -84,7 +84,7 @@ func (suite *MailFolderCacheIntegrationSuite) TestDeltaFetch() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
ac, err := api.NewClient(suite.credentials, control.Defaults())
|
||||
ac, err := api.NewClient(suite.credentials, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
acm := ac.Mail()
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
@ -28,7 +29,7 @@ import (
|
||||
func ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
restoreCfg control.RestoreConfig,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
@ -39,16 +40,13 @@ func ConsumeRestoreCollections(
|
||||
}
|
||||
|
||||
var (
|
||||
userID = dcs[0].FullPath().ResourceOwner()
|
||||
resourceID = rcc.ProtectedResource.ID()
|
||||
directoryCache = make(map[path.CategoryType]graph.ContainerResolver)
|
||||
handlers = restoreHandlers(ac)
|
||||
metrics support.CollectionMetrics
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
// FIXME: should be user name
|
||||
ctx = clues.Add(ctx, "resource_owner", clues.Hide(userID))
|
||||
|
||||
for _, dc := range dcs {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
@ -69,7 +67,7 @@ func ConsumeRestoreCollections(
|
||||
}
|
||||
|
||||
if directoryCache[category] == nil {
|
||||
gcr := handler.newContainerCache(userID)
|
||||
gcr := handler.newContainerCache(resourceID)
|
||||
if err := gcr.Populate(ctx, errs, handler.defaultRootContainer()); err != nil {
|
||||
return nil, clues.Wrap(err, "populating container cache")
|
||||
}
|
||||
@ -80,8 +78,8 @@ func ConsumeRestoreCollections(
|
||||
containerID, gcc, err := createDestination(
|
||||
ictx,
|
||||
handler,
|
||||
handler.formatRestoreDestination(restoreCfg.Location, dc.FullPath()),
|
||||
userID,
|
||||
handler.formatRestoreDestination(rcc.RestoreConfig.Location, dc.FullPath()),
|
||||
resourceID,
|
||||
directoryCache[category],
|
||||
errs)
|
||||
if err != nil {
|
||||
@ -92,7 +90,7 @@ func ConsumeRestoreCollections(
|
||||
directoryCache[category] = gcc
|
||||
ictx = clues.Add(ictx, "restore_destination_id", containerID)
|
||||
|
||||
collisionKeyToItemID, err := handler.getItemsInContainerByCollisionKey(ctx, userID, containerID)
|
||||
collisionKeyToItemID, err := handler.getItemsInContainerByCollisionKey(ctx, resourceID, containerID)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "building item collision cache"))
|
||||
continue
|
||||
@ -102,10 +100,10 @@ func ConsumeRestoreCollections(
|
||||
ictx,
|
||||
handler,
|
||||
dc,
|
||||
userID,
|
||||
resourceID,
|
||||
containerID,
|
||||
collisionKeyToItemID,
|
||||
restoreCfg.OnCollision,
|
||||
rcc.RestoreConfig.OnCollision,
|
||||
deets,
|
||||
errs,
|
||||
ctr)
|
||||
@ -126,7 +124,7 @@ func ConsumeRestoreCollections(
|
||||
support.Restore,
|
||||
len(dcs),
|
||||
metrics,
|
||||
restoreCfg.Location)
|
||||
rcc.RestoreConfig.Location)
|
||||
|
||||
return status, el.Failure()
|
||||
}
|
||||
@ -136,7 +134,7 @@ func restoreCollection(
|
||||
ctx context.Context,
|
||||
ir itemRestorer,
|
||||
dc data.RestoreCollection,
|
||||
userID, destinationID string,
|
||||
resourceID, destinationID string,
|
||||
collisionKeyToItemID map[string]string,
|
||||
collisionPolicy control.CollisionPolicy,
|
||||
deets *details.Builder,
|
||||
@ -187,7 +185,7 @@ func restoreCollection(
|
||||
info, err := ir.restore(
|
||||
ictx,
|
||||
body,
|
||||
userID,
|
||||
resourceID,
|
||||
destinationID,
|
||||
collisionKeyToItemID,
|
||||
collisionPolicy,
|
||||
@ -240,7 +238,7 @@ func createDestination(
|
||||
ctx context.Context,
|
||||
ca containerAPI,
|
||||
destination *path.Builder,
|
||||
userID string,
|
||||
resourceID string,
|
||||
gcr graph.ContainerResolver,
|
||||
errs *fault.Bus,
|
||||
) (string, graph.ContainerResolver, error) {
|
||||
@ -264,7 +262,7 @@ func createDestination(
|
||||
ca,
|
||||
cache,
|
||||
restoreLoc,
|
||||
userID,
|
||||
resourceID,
|
||||
containerParentID,
|
||||
container,
|
||||
errs)
|
||||
@ -285,7 +283,7 @@ func getOrPopulateContainer(
|
||||
ca containerAPI,
|
||||
gcr graph.ContainerResolver,
|
||||
restoreLoc *path.Builder,
|
||||
userID, containerParentID, containerName string,
|
||||
resourceID, containerParentID, containerName string,
|
||||
errs *fault.Bus,
|
||||
) (string, error) {
|
||||
cached, ok := gcr.LocationInCache(restoreLoc.String())
|
||||
@ -293,7 +291,7 @@ func getOrPopulateContainer(
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
c, err := ca.CreateContainer(ctx, userID, containerParentID, containerName)
|
||||
c, err := ca.CreateContainer(ctx, resourceID, containerParentID, containerName)
|
||||
|
||||
// 409 handling case:
|
||||
// attempt to fetch the container by name and add that result to the cache.
|
||||
@ -301,7 +299,7 @@ func getOrPopulateContainer(
|
||||
// sometimes the backend will create the folder despite the 5xx response,
|
||||
// leaving our local containerResolver with inconsistent state.
|
||||
if graph.IsErrFolderExists(err) {
|
||||
cc, e := ca.GetContainerByName(ctx, userID, containerParentID, containerName)
|
||||
cc, e := ca.GetContainerByName(ctx, resourceID, containerParentID, containerName)
|
||||
if e != nil {
|
||||
err = clues.Stack(err, e)
|
||||
} else {
|
||||
@ -327,7 +325,7 @@ func uploadAttachments(
|
||||
ctx context.Context,
|
||||
ap attachmentPoster,
|
||||
as []models.Attachmentable,
|
||||
userID, destinationID, itemID string,
|
||||
resourceID, destinationID, itemID string,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
el := errs.Local()
|
||||
@ -340,7 +338,7 @@ func uploadAttachments(
|
||||
err := uploadAttachment(
|
||||
ctx,
|
||||
ap,
|
||||
userID,
|
||||
resourceID,
|
||||
destinationID,
|
||||
itemID,
|
||||
a)
|
||||
|
||||
@ -44,7 +44,7 @@ func (suite *RestoreIntgSuite) SetupSuite() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.credentials = m365
|
||||
suite.ac, err = api.NewClient(m365, control.Defaults())
|
||||
suite.ac, err = api.NewClient(m365, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
|
||||
61
src/internal/m365/export.go
Normal file
61
src/internal/m365/export.go
Normal file
@ -0,0 +1,61 @@
|
||||
package m365
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
// ProduceExportCollections exports data from the specified collections
|
||||
func (ctrl *Controller) ProduceExportCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
sels selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
) ([]export.Collection, error) {
|
||||
ctx, end := diagnostics.Span(ctx, "m365:export")
|
||||
defer end()
|
||||
|
||||
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||
ctx = clues.Add(ctx, "export_config", exportCfg) // TODO(meain): needs PII control
|
||||
|
||||
var (
|
||||
expCollections []export.Collection
|
||||
status *support.ControllerOperationStatus
|
||||
deets = &details.Builder{}
|
||||
err error
|
||||
)
|
||||
|
||||
switch sels.Service {
|
||||
case selectors.ServiceOneDrive:
|
||||
expCollections, err = onedrive.ProduceExportCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
exportCfg,
|
||||
opts,
|
||||
dcs,
|
||||
deets,
|
||||
errs)
|
||||
default:
|
||||
err = clues.Wrap(clues.New(sels.Service.String()), "service not supported")
|
||||
}
|
||||
|
||||
ctrl.incrementAwaitingMessages()
|
||||
ctrl.UpdateStatus(status)
|
||||
|
||||
return expCollections, err
|
||||
}
|
||||
@ -796,8 +796,8 @@ func compareDriveItem(
|
||||
assert.Equal(t, expectedMeta.FileName, itemMeta.FileName)
|
||||
}
|
||||
|
||||
if !mci.Opts.RestorePermissions {
|
||||
assert.Equal(t, 0, len(itemMeta.Permissions))
|
||||
if !mci.RestoreCfg.IncludePermissions {
|
||||
assert.Empty(t, itemMeta.Permissions, "no permissions should be included in restore")
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -26,6 +27,10 @@ type Controller struct {
|
||||
Err error
|
||||
|
||||
Stats data.CollectionStats
|
||||
|
||||
ProtectedResourceID string
|
||||
ProtectedResourceName string
|
||||
ProtectedResourceErr error
|
||||
}
|
||||
|
||||
func (ctrl Controller) ProduceBackupCollections(
|
||||
@ -59,10 +64,7 @@ func (ctrl Controller) Wait() *data.CollectionStats {
|
||||
|
||||
func (ctrl Controller) ConsumeRestoreCollections(
|
||||
_ context.Context,
|
||||
_ int,
|
||||
_ selectors.Selector,
|
||||
_ control.RestoreConfig,
|
||||
_ control.Options,
|
||||
_ inject.RestoreConsumerConfig,
|
||||
_ []data.RestoreCollection,
|
||||
_ *fault.Bus,
|
||||
_ *count.Bus,
|
||||
@ -71,3 +73,25 @@ func (ctrl Controller) ConsumeRestoreCollections(
|
||||
}
|
||||
|
||||
func (ctrl Controller) CacheItemInfo(dii details.ItemInfo) {}
|
||||
|
||||
func (ctrl Controller) ProduceExportCollections(
|
||||
_ context.Context,
|
||||
_ int,
|
||||
_ selectors.Selector,
|
||||
_ control.ExportConfig,
|
||||
_ control.Options,
|
||||
_ []data.RestoreCollection,
|
||||
_ *fault.Bus,
|
||||
) ([]export.Collection, error) {
|
||||
return nil, ctrl.Err
|
||||
}
|
||||
|
||||
func (ctrl Controller) PopulateProtectedResourceIDAndName(
|
||||
ctx context.Context,
|
||||
protectedResource string, // input value, can be either id or name
|
||||
ins idname.Cacher,
|
||||
) (string, string, error) {
|
||||
return ctrl.ProtectedResourceID,
|
||||
ctrl.ProtectedResourceName,
|
||||
ctrl.ProtectedResourceErr
|
||||
}
|
||||
|
||||
@ -945,7 +945,7 @@ func (suite *CollectionUnitTestSuite) TestItemExtensions() {
|
||||
nil,
|
||||
}
|
||||
|
||||
opts := control.Defaults()
|
||||
opts := control.DefaultOptions()
|
||||
opts.ItemExtensionFactory = append(
|
||||
opts.ItemExtensionFactory,
|
||||
test.factories...)
|
||||
|
||||
166
src/internal/m365/onedrive/export.go
Normal file
166
src/internal/m365/onedrive/export.go
Normal file
@ -0,0 +1,166 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var _ export.Collection = &exportCollection{}
|
||||
|
||||
// exportCollection is the implementation of export.ExportCollection for OneDrive
|
||||
type exportCollection struct {
|
||||
// baseDir contains the path of the collection
|
||||
baseDir string
|
||||
|
||||
// backingCollection is the restore collection from which we will
|
||||
// create the export collection.
|
||||
backingCollection data.RestoreCollection
|
||||
|
||||
// backupVersion is the backupVersion of the backup this collection was part
|
||||
// of. This is required to figure out how to get the name of the
|
||||
// item.
|
||||
backupVersion int
|
||||
}
|
||||
|
||||
func (ec exportCollection) BasePath() string {
|
||||
return ec.baseDir
|
||||
}
|
||||
|
||||
func (ec exportCollection) Items(ctx context.Context) <-chan export.Item {
|
||||
ch := make(chan export.Item)
|
||||
go items(ctx, ec, ch)
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// items converts items in backing collection to export items
|
||||
func items(ctx context.Context, ec exportCollection, ch chan<- export.Item) {
|
||||
defer close(ch)
|
||||
|
||||
errs := fault.New(false)
|
||||
|
||||
// There will only be a single item in the backingCollections
|
||||
// for OneDrive
|
||||
for item := range ec.backingCollection.Items(ctx, errs) {
|
||||
itemUUID := item.UUID()
|
||||
if isMetadataFile(itemUUID, ec.backupVersion) {
|
||||
continue
|
||||
}
|
||||
|
||||
name, err := getItemName(ctx, itemUUID, ec.backupVersion, ec.backingCollection)
|
||||
|
||||
ch <- export.Item{
|
||||
ID: itemUUID,
|
||||
Data: export.ItemData{
|
||||
Name: name,
|
||||
Body: item.ToReader(),
|
||||
},
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
|
||||
eitems, erecovereable := errs.ItemsAndRecovered()
|
||||
|
||||
// Return all the items that we failed to get from kopia at the end
|
||||
for _, err := range eitems {
|
||||
ch <- export.Item{
|
||||
ID: err.ID,
|
||||
Error: &err,
|
||||
}
|
||||
}
|
||||
|
||||
for _, ec := range erecovereable {
|
||||
ch <- export.Item{
|
||||
Error: ec,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isMetadataFile is used to determine if a path corresponds to a
|
||||
// metadata file. This is OneDrive specific logic and depends on the
|
||||
// version of the backup unlike metadata.IsMetadataFile which only has
|
||||
// to be concerned about the current version.
|
||||
func isMetadataFile(id string, backupVersion int) bool {
|
||||
if backupVersion < version.OneDrive1DataAndMetaFiles {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.HasSuffix(id, metadata.MetaFileSuffix) ||
|
||||
strings.HasSuffix(id, metadata.DirMetaFileSuffix)
|
||||
}
|
||||
|
||||
// getItemName is used to get the name of the item.
|
||||
// How we get the name depends on the version of the backup.
|
||||
func getItemName(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
backupVersion int,
|
||||
fin data.FetchItemByNamer,
|
||||
) (string, error) {
|
||||
if backupVersion < version.OneDrive1DataAndMetaFiles {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
if backupVersion < version.OneDrive5DirMetaNoName {
|
||||
return strings.TrimSuffix(id, metadata.DataFileSuffix), nil
|
||||
}
|
||||
|
||||
if strings.HasSuffix(id, metadata.DataFileSuffix) {
|
||||
trimmedName := strings.TrimSuffix(id, metadata.DataFileSuffix)
|
||||
metaName := trimmedName + metadata.MetaFileSuffix
|
||||
|
||||
meta, err := fetchAndReadMetadata(ctx, fin, metaName)
|
||||
if err != nil {
|
||||
return "", clues.Wrap(err, "getting metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
return meta.FileName, nil
|
||||
}
|
||||
|
||||
return "", clues.New("invalid item id").WithClues(ctx)
|
||||
}
|
||||
|
||||
// ProduceExportCollections will create the export collections for the
|
||||
// given restore collections.
|
||||
func ProduceExportCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
exportCfg control.ExportConfig,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
) ([]export.Collection, error) {
|
||||
var (
|
||||
el = errs.Local()
|
||||
ec = make([]export.Collection, 0, len(dcs))
|
||||
)
|
||||
|
||||
for _, dc := range dcs {
|
||||
drivePath, err := path.ToDrivePath(dc.FullPath())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "transforming path to drive path").WithClues(ctx)
|
||||
}
|
||||
|
||||
baseDir := path.Builder{}.Append(drivePath.Folders...)
|
||||
|
||||
ec = append(ec, exportCollection{
|
||||
baseDir: baseDir.String(),
|
||||
backingCollection: dc,
|
||||
backupVersion: backupVersion,
|
||||
})
|
||||
}
|
||||
|
||||
return ec, el.Failure()
|
||||
}
|
||||
463
src/internal/m365/onedrive/export_test.go
Normal file
463
src/internal/m365/onedrive/export_test.go
Normal file
@ -0,0 +1,463 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
type ExportUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestExportUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &ExportUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ExportUnitSuite) TestIsMetadataFile() {
|
||||
table := []struct {
|
||||
name string
|
||||
id string
|
||||
backupVersion int
|
||||
isMeta bool
|
||||
}{
|
||||
{
|
||||
name: "legacy",
|
||||
backupVersion: version.OneDrive1DataAndMetaFiles,
|
||||
isMeta: false,
|
||||
},
|
||||
{
|
||||
name: "metadata file",
|
||||
backupVersion: version.OneDrive3IsMetaMarker,
|
||||
id: "name" + metadata.MetaFileSuffix,
|
||||
isMeta: true,
|
||||
},
|
||||
{
|
||||
name: "dir metadata file",
|
||||
backupVersion: version.OneDrive3IsMetaMarker,
|
||||
id: "name" + metadata.DirMetaFileSuffix,
|
||||
isMeta: true,
|
||||
},
|
||||
{
|
||||
name: "non metadata file",
|
||||
backupVersion: version.OneDrive3IsMetaMarker,
|
||||
id: "name" + metadata.DataFileSuffix,
|
||||
isMeta: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
assert.Equal(suite.T(), test.isMeta, isMetadataFile(test.id, test.backupVersion), "is metadata")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type metadataStream struct {
|
||||
id string
|
||||
name string
|
||||
}
|
||||
|
||||
func (ms metadataStream) ToReader() io.ReadCloser {
|
||||
return io.NopCloser(bytes.NewBufferString(`{"filename": "` + ms.name + `"}`))
|
||||
}
|
||||
func (ms metadataStream) UUID() string { return ms.id }
|
||||
func (ms metadataStream) Deleted() bool { return false }
|
||||
|
||||
type finD struct {
|
||||
id string
|
||||
name string
|
||||
err error
|
||||
}
|
||||
|
||||
func (fd finD) FetchItemByName(ctx context.Context, name string) (data.Stream, error) {
|
||||
if fd.err != nil {
|
||||
return nil, fd.err
|
||||
}
|
||||
|
||||
if name == fd.id {
|
||||
return metadataStream{id: fd.id, name: fd.name}, nil
|
||||
}
|
||||
|
||||
return nil, assert.AnError
|
||||
}
|
||||
|
||||
func (suite *ExportUnitSuite) TestGetItemName() {
|
||||
table := []struct {
|
||||
tname string
|
||||
id string
|
||||
backupVersion int
|
||||
name string
|
||||
fin data.FetchItemByNamer
|
||||
errFunc assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
tname: "legacy",
|
||||
id: "name",
|
||||
backupVersion: version.OneDrive1DataAndMetaFiles,
|
||||
name: "name",
|
||||
errFunc: assert.NoError,
|
||||
},
|
||||
{
|
||||
tname: "name in filename",
|
||||
id: "name.data",
|
||||
backupVersion: version.OneDrive4DirIncludesPermissions,
|
||||
name: "name",
|
||||
errFunc: assert.NoError,
|
||||
},
|
||||
{
|
||||
tname: "name in metadata",
|
||||
id: "id.data",
|
||||
backupVersion: version.Backup,
|
||||
name: "name",
|
||||
fin: finD{id: "id.meta", name: "name"},
|
||||
errFunc: assert.NoError,
|
||||
},
|
||||
{
|
||||
tname: "name in metadata but error",
|
||||
id: "id.data",
|
||||
backupVersion: version.Backup,
|
||||
name: "",
|
||||
fin: finD{err: assert.AnError},
|
||||
errFunc: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.tname, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
name, err := getItemName(
|
||||
ctx,
|
||||
test.id,
|
||||
test.backupVersion,
|
||||
test.fin,
|
||||
)
|
||||
test.errFunc(t, err)
|
||||
|
||||
assert.Equal(t, test.name, name, "name")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockRestoreCollection struct {
|
||||
path path.Path
|
||||
items []mockDataStream
|
||||
}
|
||||
|
||||
func (rc mockRestoreCollection) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||
ch := make(chan data.Stream)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
el := errs.Local()
|
||||
|
||||
for _, item := range rc.items {
|
||||
if item.err != nil {
|
||||
el.AddRecoverable(ctx, item.err)
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- item
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (rc mockRestoreCollection) FullPath() path.Path {
|
||||
return rc.path
|
||||
}
|
||||
|
||||
type mockDataStream struct {
|
||||
id string
|
||||
data string
|
||||
err error
|
||||
}
|
||||
|
||||
func (ms mockDataStream) ToReader() io.ReadCloser {
|
||||
if ms.data != "" {
|
||||
return io.NopCloser(bytes.NewBufferString(ms.data))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (ms mockDataStream) UUID() string { return ms.id }
|
||||
func (ms mockDataStream) Deleted() bool { return false }
|
||||
|
||||
func (suite *ExportUnitSuite) TestGetItems() {
|
||||
table := []struct {
|
||||
name string
|
||||
version int
|
||||
backingCollection data.RestoreCollection
|
||||
expectedItems []export.Item
|
||||
}{
|
||||
{
|
||||
name: "single item",
|
||||
version: 1,
|
||||
backingCollection: data.NoFetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
items: []mockDataStream{
|
||||
{id: "name1", data: "body1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItems: []export.Item{
|
||||
{
|
||||
ID: "name1",
|
||||
Data: export.ItemData{
|
||||
Name: "name1",
|
||||
Body: io.NopCloser((bytes.NewBufferString("body1"))),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple items",
|
||||
version: 1,
|
||||
backingCollection: data.NoFetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
items: []mockDataStream{
|
||||
{id: "name1", data: "body1"},
|
||||
{id: "name2", data: "body2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItems: []export.Item{
|
||||
{
|
||||
ID: "name1",
|
||||
Data: export.ItemData{
|
||||
Name: "name1",
|
||||
Body: io.NopCloser((bytes.NewBufferString("body1"))),
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "name2",
|
||||
Data: export.ItemData{
|
||||
Name: "name2",
|
||||
Body: io.NopCloser((bytes.NewBufferString("body2"))),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single item with data suffix",
|
||||
version: 2,
|
||||
backingCollection: data.NoFetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
items: []mockDataStream{
|
||||
{id: "name1.data", data: "body1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItems: []export.Item{
|
||||
{
|
||||
ID: "name1.data",
|
||||
Data: export.ItemData{
|
||||
Name: "name1",
|
||||
Body: io.NopCloser((bytes.NewBufferString("body1"))),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single item name from metadata",
|
||||
version: version.Backup,
|
||||
backingCollection: data.FetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
items: []mockDataStream{
|
||||
{id: "id1.data", data: "body1"},
|
||||
},
|
||||
},
|
||||
FetchItemByNamer: finD{id: "id1.meta", name: "name1"},
|
||||
},
|
||||
expectedItems: []export.Item{
|
||||
{
|
||||
ID: "id1.data",
|
||||
Data: export.ItemData{
|
||||
Name: "name1",
|
||||
Body: io.NopCloser((bytes.NewBufferString("body1"))),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single item name from metadata with error",
|
||||
version: version.Backup,
|
||||
backingCollection: data.FetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
items: []mockDataStream{
|
||||
{id: "id1.data"},
|
||||
},
|
||||
},
|
||||
FetchItemByNamer: finD{err: assert.AnError},
|
||||
},
|
||||
expectedItems: []export.Item{
|
||||
{
|
||||
ID: "id1.data",
|
||||
Error: assert.AnError,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "items with success and metadata read error",
|
||||
version: version.Backup,
|
||||
backingCollection: data.FetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
items: []mockDataStream{
|
||||
{id: "missing.data"},
|
||||
{id: "id1.data", data: "body1"},
|
||||
},
|
||||
},
|
||||
FetchItemByNamer: finD{id: "id1.meta", name: "name1"},
|
||||
},
|
||||
expectedItems: []export.Item{
|
||||
{
|
||||
ID: "missing.data",
|
||||
Error: assert.AnError,
|
||||
},
|
||||
{
|
||||
ID: "id1.data",
|
||||
Data: export.ItemData{
|
||||
Name: "name1",
|
||||
Body: io.NopCloser(bytes.NewBufferString("body1")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "items with success and fetch error",
|
||||
version: version.OneDrive1DataAndMetaFiles,
|
||||
backingCollection: data.FetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
items: []mockDataStream{
|
||||
{id: "name0", data: "body0"},
|
||||
{id: "name1", err: assert.AnError},
|
||||
{id: "name2", data: "body2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItems: []export.Item{
|
||||
{
|
||||
ID: "name0",
|
||||
Data: export.ItemData{
|
||||
Name: "name0",
|
||||
Body: io.NopCloser(bytes.NewBufferString("body0")),
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "name2",
|
||||
Data: export.ItemData{
|
||||
Name: "name2",
|
||||
Body: io.NopCloser(bytes.NewBufferString("body2")),
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "",
|
||||
Error: assert.AnError,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
ec := exportCollection{
|
||||
baseDir: "",
|
||||
backingCollection: test.backingCollection,
|
||||
backupVersion: test.version,
|
||||
}
|
||||
|
||||
items := ec.Items(ctx)
|
||||
|
||||
fitems := []export.Item{}
|
||||
for item := range items {
|
||||
fitems = append(fitems, item)
|
||||
}
|
||||
|
||||
assert.Len(t, fitems, len(test.expectedItems), "num of items")
|
||||
|
||||
// We do not have any grantees about the ordering of the
|
||||
// items in the SDK, but leaving the test this way for now
|
||||
// to simplify testing.
|
||||
for i, item := range fitems {
|
||||
assert.Equal(t, test.expectedItems[i].ID, item.ID, "id")
|
||||
assert.Equal(t, test.expectedItems[i].Data.Name, item.Data.Name, "name")
|
||||
assert.Equal(t, test.expectedItems[i].Data.Body, item.Data.Body, "body")
|
||||
assert.ErrorIs(t, item.Error, test.expectedItems[i].Error)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ExportUnitSuite) TestExportRestoreCollections() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
dpb := odConsts.DriveFolderPrefixBuilder("driveID1")
|
||||
|
||||
p, err := dpb.ToDataLayerOneDrivePath("t", "u", false)
|
||||
assert.NoError(t, err, "build path")
|
||||
|
||||
dcs := []data.RestoreCollection{
|
||||
data.FetchRestoreCollection{
|
||||
Collection: mockRestoreCollection{
|
||||
path: p,
|
||||
items: []mockDataStream{
|
||||
{id: "id1.data", data: "body1"},
|
||||
},
|
||||
},
|
||||
FetchItemByNamer: finD{id: "id1.meta", name: "name1"},
|
||||
},
|
||||
}
|
||||
|
||||
expectedItems := []export.Item{
|
||||
{
|
||||
ID: "id1.data",
|
||||
Data: export.ItemData{
|
||||
Name: "name1",
|
||||
Body: io.NopCloser((bytes.NewBufferString("body1"))),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
exportCfg := control.ExportConfig{}
|
||||
ecs, err := ProduceExportCollections(ctx, int(version.Backup), exportCfg, control.Options{}, dcs, nil, fault.New(true))
|
||||
assert.NoError(t, err, "export collections error")
|
||||
|
||||
assert.Len(t, ecs, 1, "num of collections")
|
||||
|
||||
items := ecs[0].Items(ctx)
|
||||
|
||||
fitems := []export.Item{}
|
||||
for item := range items {
|
||||
fitems = append(fitems, item)
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedItems, fitems, "items")
|
||||
}
|
||||
@ -313,7 +313,7 @@ func (suite *OneDriveIntgSuite) SetupSuite() {
|
||||
|
||||
suite.creds = creds
|
||||
|
||||
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||
suite.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
|
||||
@ -23,6 +23,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -38,114 +39,11 @@ const (
|
||||
maxUploadRetries = 3
|
||||
)
|
||||
|
||||
type driveInfo struct {
|
||||
id string
|
||||
name string
|
||||
rootFolderID string
|
||||
}
|
||||
|
||||
type restoreCaches struct {
|
||||
BackupDriveIDName idname.Cacher
|
||||
collisionKeyToItemID map[string]api.DriveItemIDType
|
||||
DriveIDToDriveInfo map[string]driveInfo
|
||||
DriveNameToDriveInfo map[string]driveInfo
|
||||
Folders *folderCache
|
||||
OldLinkShareIDToNewID map[string]string
|
||||
OldPermIDToNewID map[string]string
|
||||
ParentDirToMeta map[string]metadata.Metadata
|
||||
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func (rc *restoreCaches) AddDrive(
|
||||
ctx context.Context,
|
||||
md models.Driveable,
|
||||
grf GetRootFolderer,
|
||||
) error {
|
||||
di := driveInfo{
|
||||
id: ptr.Val(md.GetId()),
|
||||
name: ptr.Val(md.GetName()),
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "drive_info", di)
|
||||
|
||||
root, err := grf.GetRootFolder(ctx, di.id)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting drive root id")
|
||||
}
|
||||
|
||||
di.rootFolderID = ptr.Val(root.GetId())
|
||||
|
||||
rc.DriveIDToDriveInfo[di.id] = di
|
||||
rc.DriveNameToDriveInfo[di.name] = di
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Populate looks up drive items available to the protectedResource
|
||||
// and adds their info to the caches.
|
||||
func (rc *restoreCaches) Populate(
|
||||
ctx context.Context,
|
||||
gdparf GetDrivePagerAndRootFolderer,
|
||||
protectedResourceID string,
|
||||
) error {
|
||||
drives, err := api.GetAllDrives(
|
||||
ctx,
|
||||
gdparf.NewDrivePager(protectedResourceID, nil),
|
||||
true,
|
||||
maxDrivesRetries)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting drives")
|
||||
}
|
||||
|
||||
for _, md := range drives {
|
||||
if err := rc.AddDrive(ctx, md, gdparf); err != nil {
|
||||
return clues.Wrap(err, "caching drive")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetDrivePagerAndRootFolderer interface {
|
||||
GetRootFolderer
|
||||
NewDrivePagerer
|
||||
}
|
||||
|
||||
func NewRestoreCaches(
|
||||
backupDriveIDNames idname.Cacher,
|
||||
) *restoreCaches {
|
||||
// avoid nil panics
|
||||
if backupDriveIDNames == nil {
|
||||
backupDriveIDNames = idname.NewCache(nil)
|
||||
}
|
||||
|
||||
return &restoreCaches{
|
||||
BackupDriveIDName: backupDriveIDNames,
|
||||
collisionKeyToItemID: map[string]api.DriveItemIDType{},
|
||||
DriveIDToDriveInfo: map[string]driveInfo{},
|
||||
DriveNameToDriveInfo: map[string]driveInfo{},
|
||||
Folders: NewFolderCache(),
|
||||
OldLinkShareIDToNewID: map[string]string{},
|
||||
OldPermIDToNewID: map[string]string{},
|
||||
ParentDirToMeta: map[string]metadata.Metadata{},
|
||||
// Buffer pool for uploads
|
||||
pool: sync.Pool{
|
||||
New: func() any {
|
||||
b := make([]byte, graph.CopyBufferSize)
|
||||
return &b
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ConsumeRestoreCollections will restore the specified data collections into OneDrive
|
||||
func ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
rh RestoreHandler,
|
||||
backupVersion int,
|
||||
restoreCfg control.RestoreConfig,
|
||||
opts control.Options,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
backupDriveIDNames idname.Cacher,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
@ -153,16 +51,15 @@ func ConsumeRestoreCollections(
|
||||
ctr *count.Bus,
|
||||
) (*support.ControllerOperationStatus, error) {
|
||||
var (
|
||||
restoreMetrics support.CollectionMetrics
|
||||
el = errs.Local()
|
||||
caches = NewRestoreCaches(backupDriveIDNames)
|
||||
protectedResourceID = dcs[0].FullPath().ResourceOwner()
|
||||
fallbackDriveName = restoreCfg.Location
|
||||
restoreMetrics support.CollectionMetrics
|
||||
el = errs.Local()
|
||||
caches = NewRestoreCaches(backupDriveIDNames)
|
||||
fallbackDriveName = rcc.RestoreConfig.Location
|
||||
)
|
||||
|
||||
ctx = clues.Add(ctx, "backup_version", backupVersion)
|
||||
ctx = clues.Add(ctx, "backup_version", rcc.BackupVersion)
|
||||
|
||||
err := caches.Populate(ctx, rh, protectedResourceID)
|
||||
err := caches.Populate(ctx, rh, rcc.ProtectedResource.ID())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "initializing restore caches")
|
||||
}
|
||||
@ -183,19 +80,16 @@ func ConsumeRestoreCollections(
|
||||
ictx = clues.Add(
|
||||
ctx,
|
||||
"category", dc.FullPath().Category(),
|
||||
"resource_owner", clues.Hide(protectedResourceID),
|
||||
"full_path", dc.FullPath())
|
||||
)
|
||||
|
||||
metrics, err = RestoreCollection(
|
||||
ictx,
|
||||
rh,
|
||||
restoreCfg,
|
||||
backupVersion,
|
||||
rcc,
|
||||
dc,
|
||||
caches,
|
||||
deets,
|
||||
opts.RestorePermissions,
|
||||
fallbackDriveName,
|
||||
errs,
|
||||
ctr.Local())
|
||||
@ -215,7 +109,7 @@ func ConsumeRestoreCollections(
|
||||
support.Restore,
|
||||
len(dcs),
|
||||
restoreMetrics,
|
||||
restoreCfg.Location)
|
||||
rcc.RestoreConfig.Location)
|
||||
|
||||
return status, el.Failure()
|
||||
}
|
||||
@ -228,26 +122,23 @@ func ConsumeRestoreCollections(
|
||||
func RestoreCollection(
|
||||
ctx context.Context,
|
||||
rh RestoreHandler,
|
||||
restoreCfg control.RestoreConfig,
|
||||
backupVersion int,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
dc data.RestoreCollection,
|
||||
caches *restoreCaches,
|
||||
deets *details.Builder,
|
||||
restorePerms bool, // TODD: move into restoreConfig
|
||||
fallbackDriveName string,
|
||||
errs *fault.Bus,
|
||||
ctr *count.Bus,
|
||||
) (support.CollectionMetrics, error) {
|
||||
var (
|
||||
metrics = support.CollectionMetrics{}
|
||||
directory = dc.FullPath()
|
||||
protectedResourceID = directory.ResourceOwner()
|
||||
el = errs.Local()
|
||||
metricsObjects int64
|
||||
metricsBytes int64
|
||||
metricsSuccess int64
|
||||
wg sync.WaitGroup
|
||||
complete bool
|
||||
metrics = support.CollectionMetrics{}
|
||||
directory = dc.FullPath()
|
||||
el = errs.Local()
|
||||
metricsObjects int64
|
||||
metricsBytes int64
|
||||
metricsSuccess int64
|
||||
wg sync.WaitGroup
|
||||
complete bool
|
||||
)
|
||||
|
||||
ctx, end := diagnostics.Span(ctx, "gc:drive:restoreCollection", diagnostics.Label("path", directory))
|
||||
@ -263,7 +154,7 @@ func RestoreCollection(
|
||||
rh,
|
||||
caches,
|
||||
drivePath,
|
||||
protectedResourceID,
|
||||
rcc.ProtectedResource.ID(),
|
||||
fallbackDriveName)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "ensuring drive exists")
|
||||
@ -281,8 +172,8 @@ func RestoreCollection(
|
||||
// the drive into which this folder gets restored is tracked separately in drivePath.
|
||||
restoreDir := &path.Builder{}
|
||||
|
||||
if len(restoreCfg.Location) > 0 {
|
||||
restoreDir = restoreDir.Append(restoreCfg.Location)
|
||||
if len(rcc.RestoreConfig.Location) > 0 {
|
||||
restoreDir = restoreDir.Append(rcc.RestoreConfig.Location)
|
||||
}
|
||||
|
||||
restoreDir = restoreDir.Append(drivePath.Folders...)
|
||||
@ -301,8 +192,8 @@ func RestoreCollection(
|
||||
drivePath,
|
||||
dc,
|
||||
caches,
|
||||
backupVersion,
|
||||
restorePerms)
|
||||
rcc.BackupVersion,
|
||||
rcc.RestoreConfig.IncludePermissions)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "getting permissions").WithClues(ctx)
|
||||
}
|
||||
@ -316,7 +207,7 @@ func RestoreCollection(
|
||||
dc.FullPath(),
|
||||
colMeta,
|
||||
caches,
|
||||
restorePerms)
|
||||
rcc.RestoreConfig.IncludePermissions)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "creating folders for restore")
|
||||
}
|
||||
@ -390,14 +281,12 @@ func RestoreCollection(
|
||||
itemInfo, skipped, err := restoreItem(
|
||||
ictx,
|
||||
rh,
|
||||
restoreCfg,
|
||||
rcc,
|
||||
dc,
|
||||
backupVersion,
|
||||
drivePath,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
caches,
|
||||
restorePerms,
|
||||
itemData,
|
||||
itemPath,
|
||||
ctr)
|
||||
@ -440,14 +329,12 @@ func RestoreCollection(
|
||||
func restoreItem(
|
||||
ctx context.Context,
|
||||
rh RestoreHandler,
|
||||
restoreCfg control.RestoreConfig,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
fibn data.FetchItemByNamer,
|
||||
backupVersion int,
|
||||
drivePath *path.DrivePath,
|
||||
restoreFolderID string,
|
||||
copyBuffer []byte,
|
||||
caches *restoreCaches,
|
||||
restorePerms bool,
|
||||
itemData data.Stream,
|
||||
itemPath path.Path,
|
||||
ctr *count.Bus,
|
||||
@ -455,11 +342,11 @@ func restoreItem(
|
||||
itemUUID := itemData.UUID()
|
||||
ctx = clues.Add(ctx, "item_id", itemUUID)
|
||||
|
||||
if backupVersion < version.OneDrive1DataAndMetaFiles {
|
||||
if rcc.BackupVersion < version.OneDrive1DataAndMetaFiles {
|
||||
itemInfo, err := restoreV0File(
|
||||
ctx,
|
||||
rh,
|
||||
restoreCfg,
|
||||
rcc.RestoreConfig,
|
||||
drivePath,
|
||||
fibn,
|
||||
restoreFolderID,
|
||||
@ -468,7 +355,7 @@ func restoreItem(
|
||||
itemData,
|
||||
ctr)
|
||||
if err != nil {
|
||||
if errors.Is(err, graph.ErrItemAlreadyExistsConflict) && restoreCfg.OnCollision == control.Skip {
|
||||
if errors.Is(err, graph.ErrItemAlreadyExistsConflict) && rcc.RestoreConfig.OnCollision == control.Skip {
|
||||
return details.ItemInfo{}, true, nil
|
||||
}
|
||||
|
||||
@ -491,7 +378,7 @@ func restoreItem(
|
||||
// Only the version.OneDrive1DataAndMetaFiles needed to deserialize the
|
||||
// permission for child folders here. Later versions can request
|
||||
// permissions inline when processing the collection.
|
||||
if !restorePerms || backupVersion >= version.OneDrive4DirIncludesPermissions {
|
||||
if !rcc.RestoreConfig.IncludePermissions || rcc.BackupVersion >= version.OneDrive4DirIncludesPermissions {
|
||||
return details.ItemInfo{}, true, nil
|
||||
}
|
||||
|
||||
@ -511,22 +398,21 @@ func restoreItem(
|
||||
|
||||
// only items with DataFileSuffix from this point on
|
||||
|
||||
if backupVersion < version.OneDrive6NameInMeta {
|
||||
if rcc.BackupVersion < version.OneDrive6NameInMeta {
|
||||
itemInfo, err := restoreV1File(
|
||||
ctx,
|
||||
rh,
|
||||
restoreCfg,
|
||||
rcc,
|
||||
drivePath,
|
||||
fibn,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
restorePerms,
|
||||
caches,
|
||||
itemPath,
|
||||
itemData,
|
||||
ctr)
|
||||
if err != nil {
|
||||
if errors.Is(err, graph.ErrItemAlreadyExistsConflict) && restoreCfg.OnCollision == control.Skip {
|
||||
if errors.Is(err, graph.ErrItemAlreadyExistsConflict) && rcc.RestoreConfig.OnCollision == control.Skip {
|
||||
return details.ItemInfo{}, true, nil
|
||||
}
|
||||
|
||||
@ -541,18 +427,17 @@ func restoreItem(
|
||||
itemInfo, err := restoreV6File(
|
||||
ctx,
|
||||
rh,
|
||||
restoreCfg,
|
||||
rcc,
|
||||
drivePath,
|
||||
fibn,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
restorePerms,
|
||||
caches,
|
||||
itemPath,
|
||||
itemData,
|
||||
ctr)
|
||||
if err != nil {
|
||||
if errors.Is(err, graph.ErrItemAlreadyExistsConflict) && restoreCfg.OnCollision == control.Skip {
|
||||
if errors.Is(err, graph.ErrItemAlreadyExistsConflict) && rcc.RestoreConfig.OnCollision == control.Skip {
|
||||
return details.ItemInfo{}, true, nil
|
||||
}
|
||||
|
||||
@ -596,12 +481,11 @@ func restoreV0File(
|
||||
func restoreV1File(
|
||||
ctx context.Context,
|
||||
rh RestoreHandler,
|
||||
restoreCfg control.RestoreConfig,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
drivePath *path.DrivePath,
|
||||
fibn data.FetchItemByNamer,
|
||||
restoreFolderID string,
|
||||
copyBuffer []byte,
|
||||
restorePerms bool,
|
||||
caches *restoreCaches,
|
||||
itemPath path.Path,
|
||||
itemData data.Stream,
|
||||
@ -611,7 +495,7 @@ func restoreV1File(
|
||||
|
||||
itemID, itemInfo, err := restoreFile(
|
||||
ctx,
|
||||
restoreCfg,
|
||||
rcc.RestoreConfig,
|
||||
rh,
|
||||
fibn,
|
||||
trimmedName,
|
||||
@ -627,7 +511,7 @@ func restoreV1File(
|
||||
|
||||
// Mark it as success without processing .meta
|
||||
// file if we are not restoring permissions
|
||||
if !restorePerms {
|
||||
if !rcc.RestoreConfig.IncludePermissions {
|
||||
return itemInfo, nil
|
||||
}
|
||||
|
||||
@ -657,12 +541,11 @@ func restoreV1File(
|
||||
func restoreV6File(
|
||||
ctx context.Context,
|
||||
rh RestoreHandler,
|
||||
restoreCfg control.RestoreConfig,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
drivePath *path.DrivePath,
|
||||
fibn data.FetchItemByNamer,
|
||||
restoreFolderID string,
|
||||
copyBuffer []byte,
|
||||
restorePerms bool,
|
||||
caches *restoreCaches,
|
||||
itemPath path.Path,
|
||||
itemData data.Stream,
|
||||
@ -696,7 +579,7 @@ func restoreV6File(
|
||||
|
||||
itemID, itemInfo, err := restoreFile(
|
||||
ctx,
|
||||
restoreCfg,
|
||||
rcc.RestoreConfig,
|
||||
rh,
|
||||
fibn,
|
||||
meta.FileName,
|
||||
@ -712,10 +595,12 @@ func restoreV6File(
|
||||
|
||||
// Mark it as success without processing .meta
|
||||
// file if we are not restoring permissions
|
||||
if !restorePerms {
|
||||
if !rcc.RestoreConfig.IncludePermissions {
|
||||
return itemInfo, nil
|
||||
}
|
||||
|
||||
fmt.Printf("\n-----\nrestorev6 %+v\n-----\n", rcc.RestoreConfig.IncludePermissions)
|
||||
|
||||
err = RestorePermissions(
|
||||
ctx,
|
||||
rh,
|
||||
@ -765,6 +650,8 @@ func CreateRestoreFolders(
|
||||
return id, nil
|
||||
}
|
||||
|
||||
fmt.Printf("\n-----\ncreatefolders %+v\n-----\n", restorePerms)
|
||||
|
||||
err = RestorePermissions(
|
||||
ctx,
|
||||
rh,
|
||||
|
||||
116
src/internal/m365/onedrive/restore_caches.go
Normal file
116
src/internal/m365/onedrive/restore_caches.go
Normal file
@ -0,0 +1,116 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type driveInfo struct {
|
||||
id string
|
||||
name string
|
||||
rootFolderID string
|
||||
}
|
||||
|
||||
type restoreCaches struct {
|
||||
BackupDriveIDName idname.Cacher
|
||||
collisionKeyToItemID map[string]api.DriveItemIDType
|
||||
DriveIDToDriveInfo map[string]driveInfo
|
||||
DriveNameToDriveInfo map[string]driveInfo
|
||||
Folders *folderCache
|
||||
OldLinkShareIDToNewID map[string]string
|
||||
OldPermIDToNewID map[string]string
|
||||
ParentDirToMeta map[string]metadata.Metadata
|
||||
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func (rc *restoreCaches) AddDrive(
|
||||
ctx context.Context,
|
||||
md models.Driveable,
|
||||
grf GetRootFolderer,
|
||||
) error {
|
||||
di := driveInfo{
|
||||
id: ptr.Val(md.GetId()),
|
||||
name: ptr.Val(md.GetName()),
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "drive_info", di)
|
||||
|
||||
root, err := grf.GetRootFolder(ctx, di.id)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting drive root id")
|
||||
}
|
||||
|
||||
di.rootFolderID = ptr.Val(root.GetId())
|
||||
|
||||
rc.DriveIDToDriveInfo[di.id] = di
|
||||
rc.DriveNameToDriveInfo[di.name] = di
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Populate looks up drive items available to the protectedResource
|
||||
// and adds their info to the caches.
|
||||
func (rc *restoreCaches) Populate(
|
||||
ctx context.Context,
|
||||
gdparf GetDrivePagerAndRootFolderer,
|
||||
protectedResourceID string,
|
||||
) error {
|
||||
drives, err := api.GetAllDrives(
|
||||
ctx,
|
||||
gdparf.NewDrivePager(protectedResourceID, nil),
|
||||
true,
|
||||
maxDrivesRetries)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "getting drives")
|
||||
}
|
||||
|
||||
for _, md := range drives {
|
||||
if err := rc.AddDrive(ctx, md, gdparf); err != nil {
|
||||
return clues.Wrap(err, "caching drive")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetDrivePagerAndRootFolderer interface {
|
||||
GetRootFolderer
|
||||
NewDrivePagerer
|
||||
}
|
||||
|
||||
func NewRestoreCaches(
|
||||
backupDriveIDNames idname.Cacher,
|
||||
) *restoreCaches {
|
||||
// avoid nil panics
|
||||
if backupDriveIDNames == nil {
|
||||
backupDriveIDNames = idname.NewCache(nil)
|
||||
}
|
||||
|
||||
return &restoreCaches{
|
||||
BackupDriveIDName: backupDriveIDNames,
|
||||
collisionKeyToItemID: map[string]api.DriveItemIDType{},
|
||||
DriveIDToDriveInfo: map[string]driveInfo{},
|
||||
DriveNameToDriveInfo: map[string]driveInfo{},
|
||||
Folders: NewFolderCache(),
|
||||
OldLinkShareIDToNewID: map[string]string{},
|
||||
OldPermIDToNewID: map[string]string{},
|
||||
ParentDirToMeta: map[string]metadata.Metadata{},
|
||||
// Buffer pool for uploads
|
||||
pool: sync.Pool{
|
||||
New: func() any {
|
||||
b := make([]byte, graph.CopyBufferSize)
|
||||
return &b
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -16,6 +16,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive/mock"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -512,21 +513,25 @@ func (suite *RestoreUnitSuite) TestRestoreItem_collisionHandling() {
|
||||
|
||||
ctr := count.New()
|
||||
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: version.Backup,
|
||||
Options: control.DefaultOptions(),
|
||||
RestoreConfig: restoreCfg,
|
||||
}
|
||||
|
||||
_, skip, err := restoreItem(
|
||||
ctx,
|
||||
rh,
|
||||
restoreCfg,
|
||||
rcc,
|
||||
mock.FetchItemByName{
|
||||
Item: &mock.Data{
|
||||
Reader: mock.FileRespReadCloser(mock.DriveFileMetaData),
|
||||
},
|
||||
},
|
||||
version.Backup,
|
||||
dp,
|
||||
"",
|
||||
make([]byte, graph.CopyBufferSize),
|
||||
caches,
|
||||
false,
|
||||
&mock.Data{
|
||||
ID: uuid.NewString(),
|
||||
Reader: mock.FileRespReadCloser(mock.DriveFilePayloadData),
|
||||
|
||||
@ -21,7 +21,7 @@ type oneDriveService struct {
|
||||
}
|
||||
|
||||
func NewOneDriveService(credentials account.M365Config) (*oneDriveService, error) {
|
||||
ac, err := api.NewClient(credentials, control.Defaults())
|
||||
ac, err := api.NewClient(credentials, control.DefaultOptions())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -53,7 +53,7 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||
suite.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
drive, err := suite.ac.Users().GetDefaultDrive(ctx, suite.user)
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
||||
@ -223,9 +224,9 @@ func (suite *SharePointIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
testPermissionsRestoreAndBackup(suite, version.Backup)
|
||||
}
|
||||
|
||||
func (suite *SharePointIntegrationSuite) TestPermissionsBackupAndNoRestore() {
|
||||
func (suite *SharePointIntegrationSuite) TestRestoreNoPermissionsAndBackup() {
|
||||
suite.T().Skip("Temporarily disabled due to CI issues")
|
||||
testPermissionsBackupAndNoRestore(suite, version.Backup)
|
||||
testRestoreNoPermissionsAndBackup(suite, version.Backup)
|
||||
}
|
||||
|
||||
func (suite *SharePointIntegrationSuite) TestPermissionsInheritanceRestoreAndBackup() {
|
||||
@ -290,8 +291,8 @@ func (suite *OneDriveIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
testPermissionsRestoreAndBackup(suite, version.Backup)
|
||||
}
|
||||
|
||||
func (suite *OneDriveIntegrationSuite) TestPermissionsBackupAndNoRestore() {
|
||||
testPermissionsBackupAndNoRestore(suite, version.Backup)
|
||||
func (suite *OneDriveIntegrationSuite) TestRestoreNoPermissionsAndBackup() {
|
||||
testRestoreNoPermissionsAndBackup(suite, version.Backup)
|
||||
}
|
||||
|
||||
func (suite *OneDriveIntegrationSuite) TestPermissionsInheritanceRestoreAndBackup() {
|
||||
@ -354,8 +355,8 @@ func (suite *OneDriveNightlySuite) TestPermissionsRestoreAndBackup() {
|
||||
testPermissionsRestoreAndBackup(suite, version.OneDrive1DataAndMetaFiles)
|
||||
}
|
||||
|
||||
func (suite *OneDriveNightlySuite) TestPermissionsBackupAndNoRestore() {
|
||||
testPermissionsBackupAndNoRestore(suite, version.OneDrive1DataAndMetaFiles)
|
||||
func (suite *OneDriveNightlySuite) TestRestoreNoPermissionsAndBackup() {
|
||||
testRestoreNoPermissionsAndBackup(suite, version.OneDrive1DataAndMetaFiles)
|
||||
}
|
||||
|
||||
func (suite *OneDriveNightlySuite) TestPermissionsInheritanceRestoreAndBackup() {
|
||||
@ -517,19 +518,17 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
|
||||
collectionsLatest: expected,
|
||||
}
|
||||
|
||||
rc := testdata.DefaultRestoreConfig("od_restore_and_backup_multi")
|
||||
rc.OnCollision = control.Replace
|
||||
restoreCfg := testdata.DefaultRestoreConfig("od_restore_and_backup_multi")
|
||||
restoreCfg.OnCollision = control.Replace
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rc)
|
||||
control.DefaultOptions(),
|
||||
restoreCfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -768,24 +767,22 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
||||
collectionsLatest: expected,
|
||||
}
|
||||
|
||||
rc := testdata.DefaultRestoreConfig("perms_restore_and_backup")
|
||||
rc.OnCollision = control.Replace
|
||||
restoreCfg := testdata.DefaultRestoreConfig("perms_restore_and_backup")
|
||||
restoreCfg.OnCollision = control.Replace
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rc)
|
||||
control.DefaultOptions(),
|
||||
restoreCfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
||||
func testRestoreNoPermissionsAndBackup(suite oneDriveSuite, startVersion int) {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
@ -860,19 +857,19 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
||||
collectionsLatest: expected,
|
||||
}
|
||||
|
||||
rc := testdata.DefaultRestoreConfig("perms_backup_no_restore")
|
||||
rc.OnCollision = control.Replace
|
||||
restoreCfg := testdata.DefaultRestoreConfig("perms_backup_no_restore")
|
||||
restoreCfg.OnCollision = control.Replace
|
||||
restoreCfg.IncludePermissions = false
|
||||
|
||||
fmt.Printf("\n-----\nrcfg %+v\n-----\n", restoreCfg.IncludePermissions)
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: false,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rc)
|
||||
control.DefaultOptions(),
|
||||
restoreCfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1067,19 +1064,17 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
||||
collectionsLatest: expected,
|
||||
}
|
||||
|
||||
rc := testdata.DefaultRestoreConfig("perms_inherit_restore_and_backup")
|
||||
rc.OnCollision = control.Replace
|
||||
restoreCfg := testdata.DefaultRestoreConfig("perms_inherit_restore_and_backup")
|
||||
restoreCfg.OnCollision = control.Replace
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rc)
|
||||
control.DefaultOptions(),
|
||||
restoreCfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1264,19 +1259,17 @@ func testLinkSharesInheritanceRestoreAndBackup(suite oneDriveSuite, startVersion
|
||||
collectionsLatest: expected,
|
||||
}
|
||||
|
||||
rc := testdata.DefaultRestoreConfig("linkshares_inherit_restore_and_backup")
|
||||
rc.OnCollision = control.Replace
|
||||
restoreCfg := testdata.DefaultRestoreConfig("linkshares_inherit_restore_and_backup")
|
||||
restoreCfg.OnCollision = control.Replace
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
rc)
|
||||
control.DefaultOptions(),
|
||||
restoreCfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1383,16 +1376,16 @@ func testRestoreFolderNamedFolderRegression(
|
||||
collectionsLatest: expected,
|
||||
}
|
||||
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.HumanReadableDriveItem)
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
runRestoreTestWithVersion(
|
||||
t,
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
)
|
||||
control.DefaultOptions(),
|
||||
restoreCfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -12,11 +12,11 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/m365/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
// ConsumeRestoreCollections restores data from the specified collections
|
||||
@ -24,10 +24,7 @@ import (
|
||||
// SideEffect: status is updated at the completion of operation
|
||||
func (ctrl *Controller) ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
sels selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
opts control.Options,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
dcs []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
ctr *count.Bus,
|
||||
@ -35,48 +32,64 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
||||
ctx, end := diagnostics.Span(ctx, "m365:restore")
|
||||
defer end()
|
||||
|
||||
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: sels.PathService()})
|
||||
ctx = clues.Add(ctx, "restore_config", restoreCfg)
|
||||
ctx = graph.BindRateLimiterConfig(ctx, graph.LimiterCfg{Service: rcc.Selector.PathService()})
|
||||
ctx = clues.Add(ctx, "restore_config", rcc.RestoreConfig)
|
||||
|
||||
if len(dcs) == 0 {
|
||||
return nil, clues.New("no data collections to restore")
|
||||
}
|
||||
|
||||
serviceEnabled, _, err := checkServiceEnabled(
|
||||
ctx,
|
||||
ctrl.AC.Users(),
|
||||
rcc.Selector.PathService(),
|
||||
rcc.ProtectedResource.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !serviceEnabled {
|
||||
return nil, clues.Stack(graph.ErrServiceNotEnabled).WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
status *support.ControllerOperationStatus
|
||||
deets = &details.Builder{}
|
||||
err error
|
||||
service = rcc.Selector.PathService()
|
||||
status *support.ControllerOperationStatus
|
||||
deets = &details.Builder{}
|
||||
)
|
||||
|
||||
switch sels.Service {
|
||||
case selectors.ServiceExchange:
|
||||
status, err = exchange.ConsumeRestoreCollections(ctx, ctrl.AC, restoreCfg, dcs, deets, errs, ctr)
|
||||
case selectors.ServiceOneDrive:
|
||||
switch service {
|
||||
case path.ExchangeService:
|
||||
status, err = exchange.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
ctrl.AC,
|
||||
rcc,
|
||||
dcs,
|
||||
deets,
|
||||
errs,
|
||||
ctr)
|
||||
case path.OneDriveService:
|
||||
status, err = onedrive.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
onedrive.NewRestoreHandler(ctrl.AC),
|
||||
backupVersion,
|
||||
restoreCfg,
|
||||
opts,
|
||||
rcc,
|
||||
ctrl.backupDriveIDNames,
|
||||
dcs,
|
||||
deets,
|
||||
errs,
|
||||
ctr)
|
||||
case selectors.ServiceSharePoint:
|
||||
case path.SharePointService:
|
||||
status, err = sharepoint.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
rcc,
|
||||
ctrl.AC,
|
||||
restoreCfg,
|
||||
opts,
|
||||
ctrl.backupDriveIDNames,
|
||||
dcs,
|
||||
deets,
|
||||
errs,
|
||||
ctr)
|
||||
default:
|
||||
err = clues.Wrap(clues.New(sels.Service.String()), "service not supported")
|
||||
err = clues.Wrap(clues.New(service.String()), "service not supported")
|
||||
}
|
||||
|
||||
ctrl.incrementAwaitingMessages()
|
||||
|
||||
@ -107,7 +107,7 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
||||
tenantID,
|
||||
site,
|
||||
nil,
|
||||
control.Defaults())
|
||||
control.DefaultOptions())
|
||||
|
||||
c.CollectionMap = collMap
|
||||
|
||||
@ -201,7 +201,7 @@ func (suite *SharePointPagesSuite) TestCollectPages() {
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ac, err := api.NewClient(creds, control.Defaults())
|
||||
ac, err := api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
col, err := collectPages(
|
||||
@ -210,7 +210,7 @@ func (suite *SharePointPagesSuite) TestCollectPages() {
|
||||
ac,
|
||||
mock.NewProvider(siteID, siteID),
|
||||
&MockGraphService{},
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
fault.New(true))
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotEmpty(t, col)
|
||||
|
||||
@ -43,7 +43,7 @@ func (suite *SharePointCollectionSuite) SetupSuite() {
|
||||
|
||||
suite.creds = m365
|
||||
|
||||
ac, err := api.NewClient(m365, control.Defaults())
|
||||
ac, err := api.NewClient(m365, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac = ac
|
||||
@ -168,7 +168,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
suite.ac,
|
||||
test.category,
|
||||
nil,
|
||||
control.Defaults())
|
||||
control.DefaultOptions())
|
||||
col.data <- test.getItem(t, test.itemName)
|
||||
|
||||
readItems := []data.Stream{}
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
||||
betaAPI "github.com/alcionai/corso/src/internal/m365/sharepoint/api"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
@ -31,10 +32,8 @@ import (
|
||||
// ConsumeRestoreCollections will restore the specified data collections into OneDrive
|
||||
func ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
ac api.Client,
|
||||
restoreCfg control.RestoreConfig,
|
||||
opts control.Options,
|
||||
backupDriveIDNames idname.Cacher,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
@ -42,14 +41,13 @@ func ConsumeRestoreCollections(
|
||||
ctr *count.Bus,
|
||||
) (*support.ControllerOperationStatus, error) {
|
||||
var (
|
||||
lrh = libraryRestoreHandler{ac}
|
||||
protectedResourceID = dcs[0].FullPath().ResourceOwner()
|
||||
restoreMetrics support.CollectionMetrics
|
||||
caches = onedrive.NewRestoreCaches(backupDriveIDNames)
|
||||
el = errs.Local()
|
||||
lrh = libraryRestoreHandler{ac}
|
||||
restoreMetrics support.CollectionMetrics
|
||||
caches = onedrive.NewRestoreCaches(backupDriveIDNames)
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
err := caches.Populate(ctx, lrh, protectedResourceID)
|
||||
err := caches.Populate(ctx, lrh, rcc.ProtectedResource.ID())
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "initializing restore caches")
|
||||
}
|
||||
@ -70,7 +68,7 @@ func ConsumeRestoreCollections(
|
||||
metrics support.CollectionMetrics
|
||||
ictx = clues.Add(ctx,
|
||||
"category", category,
|
||||
"restore_location", restoreCfg.Location,
|
||||
"restore_location", clues.Hide(rcc.RestoreConfig.Location),
|
||||
"resource_owner", clues.Hide(dc.FullPath().ResourceOwner()),
|
||||
"full_path", dc.FullPath())
|
||||
)
|
||||
@ -80,12 +78,10 @@ func ConsumeRestoreCollections(
|
||||
metrics, err = onedrive.RestoreCollection(
|
||||
ictx,
|
||||
lrh,
|
||||
restoreCfg,
|
||||
backupVersion,
|
||||
rcc,
|
||||
dc,
|
||||
caches,
|
||||
deets,
|
||||
opts.RestorePermissions,
|
||||
control.DefaultRestoreContainerName(dttm.HumanReadableDriveItem),
|
||||
errs,
|
||||
ctr)
|
||||
@ -95,7 +91,7 @@ func ConsumeRestoreCollections(
|
||||
ictx,
|
||||
ac.Stable,
|
||||
dc,
|
||||
restoreCfg.Location,
|
||||
rcc.RestoreConfig.Location,
|
||||
deets,
|
||||
errs)
|
||||
|
||||
@ -104,7 +100,7 @@ func ConsumeRestoreCollections(
|
||||
ictx,
|
||||
ac.Stable,
|
||||
dc,
|
||||
restoreCfg.Location,
|
||||
rcc.RestoreConfig.Location,
|
||||
deets,
|
||||
errs)
|
||||
|
||||
@ -128,7 +124,7 @@ func ConsumeRestoreCollections(
|
||||
support.Restore,
|
||||
len(dcs),
|
||||
restoreMetrics,
|
||||
restoreCfg.Location)
|
||||
rcc.RestoreConfig.Location)
|
||||
|
||||
return status, el.Failure()
|
||||
}
|
||||
|
||||
@ -68,8 +68,7 @@ func GetCollectionsAndExpected(
|
||||
owner,
|
||||
config.RestoreCfg,
|
||||
testCollections,
|
||||
backupVersion,
|
||||
)
|
||||
backupVersion)
|
||||
if err != nil {
|
||||
return totalItems, totalKopiaItems, collections, expectedData, err
|
||||
}
|
||||
|
||||
@ -40,6 +40,7 @@ const (
|
||||
OpUnknown Operation = iota
|
||||
Backup
|
||||
Restore
|
||||
Export
|
||||
)
|
||||
|
||||
// Constructor for ConnectorOperationStatus. If the counts do not agree, an error is returned.
|
||||
|
||||
@ -362,7 +362,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_PersistResults() {
|
||||
|
||||
op, err := NewBackupOperation(
|
||||
ctx,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
kw,
|
||||
sw,
|
||||
ctrl,
|
||||
@ -1137,7 +1137,7 @@ func (suite *BackupOpIntegrationSuite) SetupSuite() {
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds, control.Defaults())
|
||||
suite.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
@ -1147,7 +1147,7 @@ func (suite *BackupOpIntegrationSuite) TestNewBackupOperation() {
|
||||
sw = &store.Wrapper{}
|
||||
ctrl = &mock.Controller{}
|
||||
acct = tconfig.NewM365Account(suite.T())
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
table := []struct {
|
||||
|
||||
359
src/internal/operations/export.go
Normal file
359
src/internal/operations/export.go
Normal file
@ -0,0 +1,359 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/archive"
|
||||
"github.com/alcionai/corso/src/internal/common/crash"
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/stats"
|
||||
"github.com/alcionai/corso/src/internal/streamstore"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
|
||||
const (
|
||||
// CopyBufferSize is the size of the copy buffer for disk
|
||||
// write operations
|
||||
// TODO(meain): tweak this value
|
||||
CopyBufferSize = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
// ExportOperation wraps an operation with export-specific props.
|
||||
type ExportOperation struct {
|
||||
operation
|
||||
|
||||
BackupID model.StableID
|
||||
Results RestoreResults
|
||||
Selectors selectors.Selector
|
||||
ExportCfg control.ExportConfig
|
||||
Version string
|
||||
|
||||
acct account.Account
|
||||
ec inject.ExportConsumer
|
||||
}
|
||||
|
||||
// NewExportOperation constructs and validates a export operation.
|
||||
func NewExportOperation(
|
||||
ctx context.Context,
|
||||
opts control.Options,
|
||||
kw *kopia.Wrapper,
|
||||
sw *store.Wrapper,
|
||||
ec inject.ExportConsumer,
|
||||
acct account.Account,
|
||||
backupID model.StableID,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
bus events.Eventer,
|
||||
) (ExportOperation, error) {
|
||||
op := ExportOperation{
|
||||
operation: newOperation(opts, bus, count.New(), kw, sw),
|
||||
acct: acct,
|
||||
BackupID: backupID,
|
||||
ExportCfg: exportCfg,
|
||||
Selectors: sel,
|
||||
Version: "v0",
|
||||
ec: ec,
|
||||
}
|
||||
if err := op.validate(); err != nil {
|
||||
return ExportOperation{}, err
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func (op ExportOperation) validate() error {
|
||||
if op.ec == nil {
|
||||
return clues.New("missing export consumer")
|
||||
}
|
||||
|
||||
return op.operation.validate()
|
||||
}
|
||||
|
||||
// aggregates stats from the export.Run().
|
||||
// primarily used so that the defer can take in a
|
||||
// pointer wrapping the values, while those values
|
||||
// get populated asynchronously.
|
||||
type exportStats struct {
|
||||
cs []data.RestoreCollection
|
||||
ctrl *data.CollectionStats
|
||||
bytesRead *stats.ByteCounter
|
||||
resourceCount int
|
||||
|
||||
// a transient value only used to pair up start-end events.
|
||||
exportID string
|
||||
}
|
||||
|
||||
// Run begins a synchronous export operation.
|
||||
func (op *ExportOperation) Run(ctx context.Context) (
|
||||
expColl []export.Collection,
|
||||
err error,
|
||||
) {
|
||||
defer func() {
|
||||
if crErr := crash.Recovery(ctx, recover(), "export"); crErr != nil {
|
||||
err = crErr
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
opStats = exportStats{
|
||||
bytesRead: &stats.ByteCounter{},
|
||||
exportID: uuid.NewString(),
|
||||
}
|
||||
start = time.Now()
|
||||
sstore = streamstore.NewStreamer(op.kopia, op.acct.ID(), op.Selectors.PathService())
|
||||
)
|
||||
|
||||
// -----
|
||||
// Setup
|
||||
// -----
|
||||
|
||||
ctx, end := diagnostics.Span(ctx, "operations:export:run")
|
||||
defer func() {
|
||||
end()
|
||||
// wait for the progress display to clean up
|
||||
observe.Complete()
|
||||
}()
|
||||
|
||||
ctx, flushMetrics := events.NewMetrics(ctx, logger.Writer{Ctx: ctx})
|
||||
defer flushMetrics()
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"tenant_id", clues.Hide(op.acct.ID()),
|
||||
"backup_id", op.BackupID,
|
||||
"service", op.Selectors.Service)
|
||||
|
||||
defer func() {
|
||||
op.bus.Event(
|
||||
ctx,
|
||||
events.ExportEnd,
|
||||
map[string]any{
|
||||
events.BackupID: op.BackupID,
|
||||
events.DataRetrieved: op.Results.BytesRead,
|
||||
events.Duration: op.Results.CompletedAt.Sub(op.Results.StartedAt),
|
||||
events.EndTime: dttm.Format(op.Results.CompletedAt),
|
||||
events.ItemsRead: op.Results.ItemsRead,
|
||||
events.ItemsWritten: op.Results.ItemsWritten,
|
||||
events.Resources: op.Results.ResourceOwners,
|
||||
events.ExportID: opStats.exportID,
|
||||
events.Service: op.Selectors.Service.String(),
|
||||
events.StartTime: dttm.Format(op.Results.StartedAt),
|
||||
events.Status: op.Status.String(),
|
||||
})
|
||||
}()
|
||||
|
||||
// -----
|
||||
// Execution
|
||||
// -----
|
||||
|
||||
expCollections, err := op.do(ctx, &opStats, sstore, start)
|
||||
if err != nil {
|
||||
// No return here! We continue down to persistResults, even in case of failure.
|
||||
logger.CtxErr(ctx, err).Error("running export")
|
||||
|
||||
if errors.Is(err, kopia.ErrNoRestorePath) {
|
||||
op.Errors.Fail(clues.New("empty backup or unknown path provided"))
|
||||
}
|
||||
|
||||
op.Errors.Fail(clues.Wrap(err, "running export"))
|
||||
}
|
||||
|
||||
finalizeErrorHandling(ctx, op.Options, op.Errors, "running export")
|
||||
LogFaultErrors(ctx, op.Errors.Errors(), "running export")
|
||||
|
||||
// -----
|
||||
// Persistence
|
||||
// -----
|
||||
|
||||
err = op.finalizeMetrics(ctx, start, &opStats)
|
||||
if err != nil {
|
||||
op.Errors.Fail(clues.Wrap(err, "finalizing export metrics"))
|
||||
return nil, op.Errors.Failure()
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow("completed export", "results", op.Results)
|
||||
|
||||
return expCollections, nil
|
||||
}
|
||||
|
||||
func (op *ExportOperation) do(
|
||||
ctx context.Context,
|
||||
opStats *exportStats,
|
||||
detailsStore streamstore.Reader,
|
||||
start time.Time,
|
||||
) ([]export.Collection, error) {
|
||||
logger.Ctx(ctx).
|
||||
With("control_options", op.Options, "selectors", op.Selectors).
|
||||
Info("exporting selection")
|
||||
|
||||
bup, deets, err := getBackupAndDetailsFromID(
|
||||
ctx,
|
||||
op.BackupID,
|
||||
op.store,
|
||||
detailsStore,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting backup and details")
|
||||
}
|
||||
|
||||
observe.Message(ctx, "Exporting", observe.Bullet, clues.Hide(bup.Selector.DiscreteOwner))
|
||||
|
||||
paths, err := formatDetailsForRestoration(ctx, bup.Version, op.Selectors, deets, op.ec, op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "formatting paths from details")
|
||||
}
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"resource_owner_id", bup.Selector.ID(),
|
||||
"resource_owner_name", clues.Hide(bup.Selector.Name()),
|
||||
"details_entries", len(deets.Entries),
|
||||
"details_paths", len(paths),
|
||||
"backup_snapshot_id", bup.SnapshotID,
|
||||
"backup_version", bup.Version)
|
||||
|
||||
op.bus.Event(
|
||||
ctx,
|
||||
events.ExportStart,
|
||||
map[string]any{
|
||||
events.StartTime: start,
|
||||
events.BackupID: op.BackupID,
|
||||
events.BackupCreateTime: bup.CreationTime,
|
||||
events.ExportID: opStats.exportID,
|
||||
})
|
||||
|
||||
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to export", len(paths), op.BackupID))
|
||||
|
||||
kopiaComplete := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
|
||||
defer close(kopiaComplete)
|
||||
|
||||
dcs, err := op.kopia.ProduceRestoreCollections(ctx, bup.SnapshotID, paths, opStats.bytesRead, op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "producing collections to export")
|
||||
}
|
||||
|
||||
kopiaComplete <- struct{}{}
|
||||
|
||||
ctx = clues.Add(ctx, "coll_count", len(dcs))
|
||||
|
||||
// should always be 1, since backups are 1:1 with resourceOwners.
|
||||
opStats.resourceCount = 1
|
||||
opStats.cs = dcs
|
||||
|
||||
expCollections, err := exportRestoreCollections(
|
||||
ctx,
|
||||
op.ec,
|
||||
bup.Version,
|
||||
op.Selectors,
|
||||
op.ExportCfg,
|
||||
op.Options,
|
||||
dcs,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "exporting collections")
|
||||
}
|
||||
|
||||
opStats.ctrl = op.ec.Wait()
|
||||
|
||||
logger.Ctx(ctx).Debug(opStats.ctrl)
|
||||
|
||||
if op.ExportCfg.Archive {
|
||||
zc, err := archive.ZipExportCollection(ctx, expCollections)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "zipping export collections")
|
||||
}
|
||||
|
||||
return []export.Collection{zc}, nil
|
||||
}
|
||||
|
||||
return expCollections, nil
|
||||
}
|
||||
|
||||
// persists details and statistics about the export operation.
|
||||
func (op *ExportOperation) finalizeMetrics(
|
||||
ctx context.Context,
|
||||
started time.Time,
|
||||
opStats *exportStats,
|
||||
) error {
|
||||
op.Results.StartedAt = started
|
||||
op.Results.CompletedAt = time.Now()
|
||||
|
||||
op.Status = Completed
|
||||
|
||||
if op.Errors.Failure() != nil {
|
||||
op.Status = Failed
|
||||
}
|
||||
|
||||
op.Results.BytesRead = opStats.bytesRead.NumBytes
|
||||
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
|
||||
op.Results.ResourceOwners = opStats.resourceCount
|
||||
|
||||
if opStats.ctrl == nil {
|
||||
op.Status = Failed
|
||||
return clues.New("restoration never completed")
|
||||
}
|
||||
|
||||
if op.Status != Failed && opStats.ctrl.IsZero() {
|
||||
op.Status = NoData
|
||||
}
|
||||
|
||||
// We don't have data on what all items were written
|
||||
// op.Results.ItemsWritten = opStats.ctrl.Successes
|
||||
|
||||
return op.Errors.Failure()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Exporter funcs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func exportRestoreCollections(
|
||||
ctx context.Context,
|
||||
ec inject.ExportConsumer,
|
||||
backupVersion int,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
) ([]export.Collection, error) {
|
||||
complete := observe.MessageWithCompletion(ctx, "Preparing export")
|
||||
defer func() {
|
||||
complete <- struct{}{}
|
||||
close(complete)
|
||||
}()
|
||||
|
||||
expCollections, err := ec.ProduceExportCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
sel,
|
||||
exportCfg,
|
||||
opts,
|
||||
dcs,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "exporting collections")
|
||||
}
|
||||
|
||||
return expCollections, nil
|
||||
}
|
||||
321
src/internal/operations/export_test.go
Normal file
321
src/internal/operations/export_test.go
Normal file
@ -0,0 +1,321 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/archive"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
evmock "github.com/alcionai/corso/src/internal/events/mock"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/mock"
|
||||
"github.com/alcionai/corso/src/internal/stats"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
|
||||
type ExportOpSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestExportOpSuite(t *testing.T) {
|
||||
suite.Run(t, &ExportOpSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ExportOpSuite) TestExportOperation_PersistResults() {
|
||||
var (
|
||||
kw = &kopia.Wrapper{}
|
||||
sw = &store.Wrapper{}
|
||||
ctrl = &mock.Controller{}
|
||||
now = time.Now()
|
||||
exportCfg = control.DefaultExportConfig()
|
||||
)
|
||||
|
||||
table := []struct {
|
||||
expectStatus OpStatus
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
stats exportStats
|
||||
fail error
|
||||
}{
|
||||
{
|
||||
expectStatus: Completed,
|
||||
expectErr: assert.NoError,
|
||||
stats: exportStats{
|
||||
resourceCount: 1,
|
||||
bytesRead: &stats.ByteCounter{
|
||||
NumBytes: 42,
|
||||
},
|
||||
cs: []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{
|
||||
Collection: &exchMock.DataCollection{},
|
||||
},
|
||||
},
|
||||
ctrl: &data.CollectionStats{
|
||||
Objects: 1,
|
||||
Successes: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectStatus: Failed,
|
||||
expectErr: assert.Error,
|
||||
fail: assert.AnError,
|
||||
stats: exportStats{
|
||||
bytesRead: &stats.ByteCounter{},
|
||||
ctrl: &data.CollectionStats{},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectStatus: NoData,
|
||||
expectErr: assert.NoError,
|
||||
stats: exportStats{
|
||||
bytesRead: &stats.ByteCounter{},
|
||||
cs: []data.RestoreCollection{},
|
||||
ctrl: &data.CollectionStats{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.expectStatus.String(), func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
op, err := NewExportOperation(
|
||||
ctx,
|
||||
control.DefaultOptions(),
|
||||
kw,
|
||||
sw,
|
||||
ctrl,
|
||||
account.Account{},
|
||||
"foo",
|
||||
selectors.Selector{DiscreteOwner: "test"},
|
||||
exportCfg,
|
||||
evmock.NewBus())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
op.Errors.Fail(test.fail)
|
||||
|
||||
err = op.finalizeMetrics(ctx, now, &test.stats)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
||||
assert.Equal(t, len(test.stats.cs), op.Results.ItemsRead, "items read")
|
||||
assert.Equal(t, test.stats.bytesRead.NumBytes, op.Results.BytesRead, "resource owners")
|
||||
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
||||
assert.Equal(t, now, op.Results.StartedAt, "started at")
|
||||
assert.Less(t, now, op.Results.CompletedAt, "completed at")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type expCol struct {
|
||||
base string
|
||||
items []export.Item
|
||||
}
|
||||
|
||||
func (ec expCol) BasePath() string { return ec.base }
|
||||
func (ec expCol) Items(ctx context.Context) <-chan export.Item {
|
||||
ch := make(chan export.Item)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
for _, item := range ec.items {
|
||||
ch <- item
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// ReadSeekCloser implements io.ReadSeekCloser.
|
||||
type ReadSeekCloser struct {
|
||||
*bytes.Reader
|
||||
}
|
||||
|
||||
// NewReadSeekCloser creates a new ReadSeekCloser from a byte slice.
|
||||
func NewReadSeekCloser(byts []byte) *ReadSeekCloser {
|
||||
return &ReadSeekCloser{
|
||||
Reader: bytes.NewReader(byts),
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
// Nothing to close for a byte slice.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (suite *ExportOpSuite) TestZipExports() {
|
||||
table := []struct {
|
||||
name string
|
||||
collection []export.Collection
|
||||
shouldErr bool
|
||||
readErr bool
|
||||
}{
|
||||
{
|
||||
name: "nothing",
|
||||
collection: []export.Collection{},
|
||||
shouldErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
collection: []export.Collection{
|
||||
expCol{
|
||||
base: "",
|
||||
items: []export.Item{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one item",
|
||||
collection: []export.Collection{
|
||||
expCol{
|
||||
base: "",
|
||||
items: []export.Item{
|
||||
{
|
||||
ID: "id1",
|
||||
Data: export.ItemData{
|
||||
Name: "test",
|
||||
Body: NewReadSeekCloser([]byte("test")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple items",
|
||||
collection: []export.Collection{
|
||||
expCol{
|
||||
base: "",
|
||||
items: []export.Item{
|
||||
{
|
||||
ID: "id1",
|
||||
Data: export.ItemData{
|
||||
Name: "test",
|
||||
Body: NewReadSeekCloser([]byte("test")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expCol{
|
||||
base: "/fold",
|
||||
items: []export.Item{
|
||||
{
|
||||
ID: "id2",
|
||||
Data: export.ItemData{
|
||||
Name: "test2",
|
||||
Body: NewReadSeekCloser([]byte("test2")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one item with err",
|
||||
collection: []export.Collection{
|
||||
expCol{
|
||||
base: "",
|
||||
items: []export.Item{
|
||||
{
|
||||
ID: "id3",
|
||||
Error: assert.AnError,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
readErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
zc, err := archive.ZipExportCollection(ctx, test.collection)
|
||||
|
||||
if test.shouldErr {
|
||||
assert.Error(t, err, "error")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err, "error")
|
||||
assert.Empty(t, zc.BasePath(), "base path")
|
||||
|
||||
zippedItems := []export.ItemData{}
|
||||
|
||||
count := 0
|
||||
for item := range zc.Items(ctx) {
|
||||
assert.True(t, strings.HasPrefix(item.Data.Name, "Corso_Export_"), "name prefix")
|
||||
assert.True(t, strings.HasSuffix(item.Data.Name, ".zip"), "name suffix")
|
||||
|
||||
data, err := io.ReadAll(item.Data.Body)
|
||||
if test.readErr {
|
||||
assert.Error(t, err, "read error")
|
||||
return
|
||||
}
|
||||
|
||||
size := int64(len(data))
|
||||
|
||||
item.Data.Body.Close()
|
||||
|
||||
reader, err := zip.NewReader(bytes.NewReader(data), size)
|
||||
require.NoError(t, err, "zip reader")
|
||||
|
||||
for _, f := range reader.File {
|
||||
rc, err := f.Open()
|
||||
assert.NoError(t, err, "open file in zip")
|
||||
|
||||
data, err := io.ReadAll(rc)
|
||||
require.NoError(t, err, "read zip file content")
|
||||
|
||||
rc.Close()
|
||||
|
||||
zippedItems = append(zippedItems, export.ItemData{
|
||||
Name: f.Name,
|
||||
Body: NewReadSeekCloser([]byte(data)),
|
||||
})
|
||||
}
|
||||
|
||||
count++
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, count, "single item")
|
||||
|
||||
expectedZippedItems := []export.ItemData{}
|
||||
for _, col := range test.collection {
|
||||
for item := range col.Items(ctx) {
|
||||
if col.BasePath() != "" {
|
||||
item.Data.Name = strings.Join([]string{col.BasePath(), item.Data.Name}, "/")
|
||||
}
|
||||
_, err := item.Data.Body.(io.ReadSeeker).Seek(0, io.SeekStart)
|
||||
require.NoError(t, err, "seek")
|
||||
expectedZippedItems = append(expectedZippedItems, item.Data)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, expectedZippedItems, zippedItems, "items")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -27,7 +27,7 @@ func ControllerWithSelector(
|
||||
ins idname.Cacher,
|
||||
onFail func(),
|
||||
) (*m365.Controller, selectors.Selector) {
|
||||
ctrl, err := m365.NewController(ctx, acct, cr, sel.PathService(), control.Defaults())
|
||||
ctrl, err := m365.NewController(ctx, acct, cr, sel.PathService(), control.DefaultOptions())
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
if onFail != nil {
|
||||
onFail()
|
||||
@ -36,7 +36,7 @@ func ControllerWithSelector(
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, sel.DiscreteOwner, ins)
|
||||
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
if onFail != nil {
|
||||
onFail()
|
||||
|
||||
18
src/internal/operations/inject/containers.go
Normal file
18
src/internal/operations/inject/containers.go
Normal file
@ -0,0 +1,18 @@
|
||||
package inject
|
||||
|
||||
import (
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
// RestoreConsumerConfig is a container-of-things for holding options and
|
||||
// configurations from various packages, which are widely used by all
|
||||
// restore consumers independent of service or data category.
|
||||
type RestoreConsumerConfig struct {
|
||||
BackupVersion int
|
||||
Options control.Options
|
||||
ProtectedResource idname.Provider
|
||||
RestoreConfig control.RestoreConfig
|
||||
Selector selectors.Selector
|
||||
}
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/export"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -36,10 +37,7 @@ type (
|
||||
RestoreConsumer interface {
|
||||
ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
selector selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
opts control.Options,
|
||||
rcc RestoreConsumerConfig,
|
||||
dcs []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
ctr *count.Bus,
|
||||
@ -48,6 +46,7 @@ type (
|
||||
Wait() *data.CollectionStats
|
||||
|
||||
CacheItemInfoer
|
||||
PopulateProtectedResourceIDAndNamer
|
||||
}
|
||||
|
||||
CacheItemInfoer interface {
|
||||
@ -59,6 +58,41 @@ type (
|
||||
CacheItemInfo(v details.ItemInfo)
|
||||
}
|
||||
|
||||
ExportConsumer interface {
|
||||
ProduceExportCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
selector selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
) ([]export.Collection, error)
|
||||
|
||||
Wait() *data.CollectionStats
|
||||
|
||||
CacheItemInfoer
|
||||
}
|
||||
|
||||
PopulateProtectedResourceIDAndNamer interface {
|
||||
// PopulateProtectedResourceIDAndName takes the provided owner identifier and produces
|
||||
// the owner's name and ID from that value. Returns an error if the owner is
|
||||
// not recognized by the current tenant.
|
||||
//
|
||||
// The id-name cacher should be optional. Some processes will look up all owners in
|
||||
// the tenant before reaching this step. In that case, the data gets handed
|
||||
// down for this func to consume instead of performing further queries. The
|
||||
// data gets stored inside the controller instance for later re-use.
|
||||
PopulateProtectedResourceIDAndName(
|
||||
ctx context.Context,
|
||||
owner string, // input value, can be either id or name
|
||||
ins idname.Cacher,
|
||||
) (
|
||||
id, name string,
|
||||
err error,
|
||||
)
|
||||
}
|
||||
|
||||
RepoMaintenancer interface {
|
||||
RepoMaintenance(ctx context.Context, opts repository.Maintenance) error
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ func (suite *MaintenanceOpIntegrationSuite) TestRepoMaintenance() {
|
||||
|
||||
mo, err := NewMaintenanceOperation(
|
||||
ctx,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
kw,
|
||||
repository.Maintenance{
|
||||
Type: repository.MetadataMaintenance,
|
||||
|
||||
@ -26,7 +26,7 @@ func TestOperationSuite(t *testing.T) {
|
||||
|
||||
func (suite *OperationSuite) TestNewOperation() {
|
||||
t := suite.T()
|
||||
op := newOperation(control.Defaults(), events.Bus{}, &count.Bus{}, nil, nil)
|
||||
op := newOperation(control.DefaultOptions(), events.Bus{}, &count.Bus{}, nil, nil)
|
||||
assert.Greater(t, op.CreatedAt, time.Time{})
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ func (suite *OperationSuite) TestOperation_Validate() {
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
err := newOperation(control.Defaults(), events.Bus{}, &count.Bus{}, test.kw, test.sw).validate()
|
||||
err := newOperation(control.DefaultOptions(), events.Bus{}, &count.Bus{}, test.kw, test.sw).validate()
|
||||
test.errCheck(suite.T(), err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/crash"
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
@ -172,7 +173,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
||||
logger.CtxErr(ctx, err).Error("running restore")
|
||||
|
||||
if errors.Is(err, kopia.ErrNoRestorePath) {
|
||||
op.Errors.Fail(clues.New("empty backup or unknown path provided"))
|
||||
op.Errors.Fail(clues.Wrap(err, "empty backup or unknown path provided"))
|
||||
}
|
||||
|
||||
op.Errors.Fail(clues.Wrap(err, "running restore"))
|
||||
@ -217,7 +218,19 @@ func (op *RestoreOperation) do(
|
||||
return nil, clues.Wrap(err, "getting backup and details")
|
||||
}
|
||||
|
||||
observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(bup.Selector.DiscreteOwner))
|
||||
restoreToProtectedResource, err := chooseRestoreResource(ctx, op.rc, op.RestoreCfg, bup.Selector)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting destination protected resource")
|
||||
}
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"backup_protected_resource_id", bup.Selector.ID(),
|
||||
"backup_protected_resource_name", clues.Hide(bup.Selector.Name()),
|
||||
"restore_protected_resource_id", restoreToProtectedResource.ID(),
|
||||
"restore_protected_resource_name", clues.Hide(restoreToProtectedResource.Name()))
|
||||
|
||||
observe.Message(ctx, "Restoring", observe.Bullet, clues.Hide(restoreToProtectedResource.Name()))
|
||||
|
||||
paths, err := formatDetailsForRestoration(
|
||||
ctx,
|
||||
@ -232,8 +245,6 @@ func (op *RestoreOperation) do(
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"resource_owner_id", bup.Selector.ID(),
|
||||
"resource_owner_name", clues.Hide(bup.Selector.Name()),
|
||||
"details_entries", len(deets.Entries),
|
||||
"details_paths", len(paths),
|
||||
"backup_snapshot_id", bup.SnapshotID,
|
||||
@ -254,7 +265,12 @@ func (op *RestoreOperation) do(
|
||||
kopiaComplete := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
|
||||
defer close(kopiaComplete)
|
||||
|
||||
dcs, err := op.kopia.ProduceRestoreCollections(ctx, bup.SnapshotID, paths, opStats.bytesRead, op.Errors)
|
||||
dcs, err := op.kopia.ProduceRestoreCollections(
|
||||
ctx,
|
||||
bup.SnapshotID,
|
||||
paths,
|
||||
opStats.bytesRead,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "producing collections to restore")
|
||||
}
|
||||
@ -271,6 +287,7 @@ func (op *RestoreOperation) do(
|
||||
ctx,
|
||||
op.rc,
|
||||
bup.Version,
|
||||
restoreToProtectedResource,
|
||||
op.Selectors,
|
||||
op.RestoreCfg,
|
||||
op.Options,
|
||||
@ -321,6 +338,24 @@ func (op *RestoreOperation) persistResults(
|
||||
return op.Errors.Failure()
|
||||
}
|
||||
|
||||
func chooseRestoreResource(
|
||||
ctx context.Context,
|
||||
pprian inject.PopulateProtectedResourceIDAndNamer,
|
||||
restoreCfg control.RestoreConfig,
|
||||
orig idname.Provider,
|
||||
) (idname.Provider, error) {
|
||||
if len(restoreCfg.ProtectedResource) == 0 {
|
||||
return orig, nil
|
||||
}
|
||||
|
||||
id, name, err := pprian.PopulateProtectedResourceIDAndName(
|
||||
ctx,
|
||||
restoreCfg.ProtectedResource,
|
||||
nil)
|
||||
|
||||
return idname.NewProvider(id, name), clues.Stack(err).OrNil()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Restorer funcs
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -329,6 +364,7 @@ func consumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
rc inject.RestoreConsumer,
|
||||
backupVersion int,
|
||||
toProtectedResource idname.Provider,
|
||||
sel selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
opts control.Options,
|
||||
@ -342,15 +378,15 @@ func consumeRestoreCollections(
|
||||
close(complete)
|
||||
}()
|
||||
|
||||
deets, err := rc.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
sel,
|
||||
restoreCfg,
|
||||
opts,
|
||||
dcs,
|
||||
errs,
|
||||
ctr)
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: backupVersion,
|
||||
Options: opts,
|
||||
ProtectedResource: toProtectedResource,
|
||||
RestoreConfig: restoreCfg,
|
||||
Selector: sel,
|
||||
}
|
||||
|
||||
deets, err := rc.ConsumeRestoreCollections(ctx, rcc, dcs, errs, ctr)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "restoring collections")
|
||||
}
|
||||
|
||||
@ -10,6 +10,8 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
evmock "github.com/alcionai/corso/src/internal/events/mock"
|
||||
@ -37,15 +39,15 @@ import (
|
||||
// unit
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type RestoreOpSuite struct {
|
||||
type RestoreOpUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestRestoreOpSuite(t *testing.T) {
|
||||
suite.Run(t, &RestoreOpSuite{Suite: tester.NewUnitSuite(t)})
|
||||
func TestRestoreOpUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &RestoreOpUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
func (suite *RestoreOpUnitSuite) TestRestoreOperation_PersistResults() {
|
||||
var (
|
||||
kw = &kopia.Wrapper{}
|
||||
sw = &store.Wrapper{}
|
||||
@ -107,7 +109,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
|
||||
op, err := NewRestoreOperation(
|
||||
ctx,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
kw,
|
||||
sw,
|
||||
ctrl,
|
||||
@ -135,6 +137,75 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *RestoreOpUnitSuite) TestChooseRestoreResource() {
|
||||
var (
|
||||
id = "id"
|
||||
name = "name"
|
||||
cfgWithPR = control.DefaultRestoreConfig(dttm.HumanReadable)
|
||||
)
|
||||
|
||||
cfgWithPR.ProtectedResource = "cfgid"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
cfg control.RestoreConfig
|
||||
ctrl *mock.Controller
|
||||
orig idname.Provider
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
expectProvider assert.ValueAssertionFunc
|
||||
expectID string
|
||||
expectName string
|
||||
}{
|
||||
{
|
||||
name: "use original",
|
||||
cfg: control.DefaultRestoreConfig(dttm.HumanReadable),
|
||||
ctrl: &mock.Controller{
|
||||
ProtectedResourceID: id,
|
||||
ProtectedResourceName: name,
|
||||
},
|
||||
orig: idname.NewProvider("oid", "oname"),
|
||||
expectErr: assert.NoError,
|
||||
expectID: "oid",
|
||||
expectName: "oname",
|
||||
},
|
||||
{
|
||||
name: "look up resource with iface",
|
||||
cfg: cfgWithPR,
|
||||
ctrl: &mock.Controller{
|
||||
ProtectedResourceID: id,
|
||||
ProtectedResourceName: name,
|
||||
},
|
||||
orig: idname.NewProvider("oid", "oname"),
|
||||
expectErr: assert.NoError,
|
||||
expectID: id,
|
||||
expectName: name,
|
||||
},
|
||||
{
|
||||
name: "error looking up protected resource",
|
||||
cfg: cfgWithPR,
|
||||
ctrl: &mock.Controller{
|
||||
ProtectedResourceErr: assert.AnError,
|
||||
},
|
||||
orig: idname.NewProvider("oid", "oname"),
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
result, err := chooseRestoreResource(ctx, test.ctrl, test.cfg, test.orig)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, test.expectID, result.ID())
|
||||
assert.Equal(t, test.expectName, result.Name())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// integration
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -216,7 +287,7 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() {
|
||||
sw = &store.Wrapper{}
|
||||
ctrl = &mock.Controller{}
|
||||
restoreCfg = testdata.DefaultRestoreConfig("")
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
table := []struct {
|
||||
@ -275,12 +346,12 @@ func (suite *RestoreOpIntegrationSuite) TestRestore_Run_errorNoBackup() {
|
||||
suite.acct,
|
||||
resource.Users,
|
||||
rsel.PathService(),
|
||||
control.Defaults())
|
||||
control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ro, err := NewRestoreOperation(
|
||||
ctx,
|
||||
control.Defaults(),
|
||||
control.DefaultOptions(),
|
||||
suite.kw,
|
||||
suite.sw,
|
||||
ctrl,
|
||||
|
||||
@ -67,9 +67,9 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
|
||||
{
|
||||
name: "Mail",
|
||||
selector: func() *selectors.ExchangeBackup {
|
||||
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
|
||||
sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||
sel.Include(sel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
|
||||
sel.DiscreteOwner = suite.its.userID
|
||||
sel.DiscreteOwner = suite.its.user.ID
|
||||
|
||||
return sel
|
||||
},
|
||||
@ -79,7 +79,7 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
|
||||
{
|
||||
name: "Contacts",
|
||||
selector: func() *selectors.ExchangeBackup {
|
||||
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
|
||||
sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||
sel.Include(sel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()))
|
||||
return sel
|
||||
},
|
||||
@ -89,7 +89,7 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
|
||||
{
|
||||
name: "Calendar Events",
|
||||
selector: func() *selectors.ExchangeBackup {
|
||||
sel := selectors.NewExchangeBackup([]string{suite.its.userID})
|
||||
sel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||
sel.Include(sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()))
|
||||
return sel
|
||||
},
|
||||
@ -107,7 +107,7 @@ func (suite *ExchangeBackupIntgSuite) TestBackup_Run_exchange() {
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
sel = test.selector().Selector
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
whatSet = deeTD.CategoryFromRepoRef
|
||||
)
|
||||
|
||||
@ -258,9 +258,9 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
|
||||
// later on during the tests. Putting their identifiers into the selector
|
||||
// at this point is harmless.
|
||||
containers = []string{container1, container2, container3, containerRename}
|
||||
sel = selectors.NewExchangeBackup([]string{suite.its.userID})
|
||||
sel = selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||
whatSet = deeTD.CategoryFromRepoRef
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
opts.ToggleFeatures = toggles
|
||||
@ -278,7 +278,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ac, err := api.NewClient(creds, control.Defaults())
|
||||
ac, err := api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// generate 3 new folders with two items each.
|
||||
@ -295,7 +295,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
|
||||
|
||||
mailDBF := func(id, timeStamp, subject, body string) []byte {
|
||||
return exchMock.MessageWith(
|
||||
suite.its.userID, suite.its.userID, suite.its.userID,
|
||||
suite.its.user.ID, suite.its.user.ID, suite.its.user.ID,
|
||||
subject, body, body,
|
||||
now, now, now, now)
|
||||
}
|
||||
@ -312,7 +312,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
|
||||
|
||||
eventDBF := func(id, timeStamp, subject, body string) []byte {
|
||||
return exchMock.EventWith(
|
||||
suite.its.userID, subject, body, body,
|
||||
suite.its.user.ID, subject, body, body,
|
||||
exchMock.NoOriginalStartDate, now, now,
|
||||
exchMock.NoRecurrence, exchMock.NoAttendees,
|
||||
exchMock.NoAttachments, exchMock.NoCancelledOccurrences,
|
||||
@ -578,7 +578,7 @@ func testExchangeContinuousBackups(suite *ExchangeBackupIntgSuite, toggles contr
|
||||
service,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
|
||||
creds.AzureTenantID, suite.its.userID, "", container3,
|
||||
creds.AzureTenantID, suite.its.user.ID, "", container3,
|
||||
2,
|
||||
version.Backup,
|
||||
gen.dbf)
|
||||
@ -897,7 +897,7 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
|
||||
// a backup is required to run restores
|
||||
|
||||
baseSel := selectors.NewExchangeBackup([]string{suite.its.userID})
|
||||
baseSel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||
baseSel.Include(
|
||||
// events cannot be run, for the same reason as incremental backups: the user needs
|
||||
// to have their account recycled.
|
||||
@ -905,11 +905,11 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
baseSel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()),
|
||||
baseSel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
|
||||
|
||||
baseSel.DiscreteOwner = suite.its.userID
|
||||
baseSel.DiscreteOwner = suite.its.user.ID
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
bo, bod := prepNewTestBackupOp(t, ctx, mb, baseSel.Selector, opts, version.Backup)
|
||||
@ -1272,3 +1272,216 @@ func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeWithAdvancedOptio
|
||||
assert.Len(t, result, 0, "no items should have been added as copies")
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ExchangeRestoreIntgSuite) TestRestore_Run_exchangeAlternateProtectedResource() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// a backup is required to run restores
|
||||
|
||||
baseSel := selectors.NewExchangeBackup([]string{suite.its.user.ID})
|
||||
baseSel.Include(
|
||||
// events cannot be run, for the same reason as incremental backups: the user needs
|
||||
// to have their account recycled.
|
||||
// base_sel.EventCalendars([]string{api.DefaultCalendar}, selectors.PrefixMatch()),
|
||||
baseSel.ContactFolders([]string{api.DefaultContacts}, selectors.PrefixMatch()),
|
||||
baseSel.MailFolders([]string{api.MailInbox}, selectors.PrefixMatch()))
|
||||
|
||||
baseSel.DiscreteOwner = suite.its.user.ID
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
bo, bod := prepNewTestBackupOp(t, ctx, mb, baseSel.Selector, opts, version.Backup)
|
||||
defer bod.close(t, ctx)
|
||||
|
||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||
|
||||
rsel, err := baseSel.ToExchangeRestore()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
var (
|
||||
restoreCfg = ctrlTD.DefaultRestoreConfig("exchange_restore_to_user")
|
||||
sel = rsel.Selector
|
||||
userID = suite.its.user.ID
|
||||
secondaryUserID = suite.its.secondaryUser.ID
|
||||
uid = userID
|
||||
acCont = suite.its.ac.Contacts()
|
||||
acMail = suite.its.ac.Mail()
|
||||
// acEvts = suite.its.ac.Events()
|
||||
firstCtr = count.New()
|
||||
)
|
||||
|
||||
restoreCfg.OnCollision = control.Copy
|
||||
mb = evmock.NewBus()
|
||||
|
||||
// first restore to the current user
|
||||
|
||||
ro1, _ := prepNewTestRestoreOp(
|
||||
t,
|
||||
ctx,
|
||||
bod.st,
|
||||
bo.Results.BackupID,
|
||||
mb,
|
||||
firstCtr,
|
||||
sel,
|
||||
opts,
|
||||
restoreCfg)
|
||||
|
||||
runAndCheckRestore(t, ctx, &ro1, mb, false)
|
||||
|
||||
// get all files in folder, use these as the base
|
||||
// set of files to compare against.
|
||||
|
||||
var (
|
||||
userItemIDs = map[path.CategoryType]map[string]struct{}{}
|
||||
userCollisionKeys = map[path.CategoryType]map[string]string{}
|
||||
)
|
||||
|
||||
// --- contacts
|
||||
cat := path.ContactsCategory
|
||||
userItemIDs[cat], userCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acCont,
|
||||
uid,
|
||||
"",
|
||||
restoreCfg.Location)
|
||||
|
||||
// --- events
|
||||
// cat = path.EventsCategory
|
||||
// userItemIDs[cat], userCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
// t,
|
||||
// ctx,
|
||||
// acEvts,
|
||||
// uid,
|
||||
// "",
|
||||
// restoreCfg.Location)
|
||||
|
||||
// --- mail
|
||||
cat = path.EmailCategory
|
||||
userItemIDs[cat], userCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acMail,
|
||||
uid,
|
||||
"",
|
||||
restoreCfg.Location,
|
||||
api.MailInbox)
|
||||
|
||||
// then restore to the secondary user
|
||||
|
||||
uid = secondaryUserID
|
||||
mb = evmock.NewBus()
|
||||
secondCtr := count.New()
|
||||
restoreCfg.ProtectedResource = uid
|
||||
|
||||
ro2, _ := prepNewTestRestoreOp(
|
||||
t,
|
||||
ctx,
|
||||
bod.st,
|
||||
bo.Results.BackupID,
|
||||
mb,
|
||||
secondCtr,
|
||||
sel,
|
||||
opts,
|
||||
restoreCfg)
|
||||
|
||||
runAndCheckRestore(t, ctx, &ro2, mb, false)
|
||||
|
||||
var (
|
||||
secondaryItemIDs = map[path.CategoryType]map[string]struct{}{}
|
||||
secondaryCollisionKeys = map[path.CategoryType]map[string]string{}
|
||||
)
|
||||
|
||||
// --- contacts
|
||||
cat = path.ContactsCategory
|
||||
secondaryItemIDs[cat], secondaryCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acCont,
|
||||
uid,
|
||||
"",
|
||||
restoreCfg.Location)
|
||||
|
||||
// --- events
|
||||
// cat = path.EventsCategory
|
||||
// secondaryItemIDs[cat], secondaryCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
// t,
|
||||
// ctx,
|
||||
// acEvts,
|
||||
// uid,
|
||||
// "",
|
||||
// restoreCfg.Location)
|
||||
|
||||
// --- mail
|
||||
cat = path.EmailCategory
|
||||
secondaryItemIDs[cat], secondaryCollisionKeys[cat] = getCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acMail,
|
||||
uid,
|
||||
"",
|
||||
restoreCfg.Location,
|
||||
api.MailInbox)
|
||||
|
||||
// compare restore results
|
||||
for _, cat := range []path.CategoryType{path.ContactsCategory, path.EmailCategory, path.EventsCategory} {
|
||||
assert.Equal(t, len(userItemIDs[cat]), len(secondaryItemIDs[cat]))
|
||||
assert.ElementsMatch(t, maps.Keys(userCollisionKeys[cat]), maps.Keys(secondaryCollisionKeys[cat]))
|
||||
}
|
||||
}
|
||||
|
||||
type GetItemsKeysAndContainerByNameer interface {
|
||||
GetItemIDsInContainer(
|
||||
ctx context.Context,
|
||||
userID, containerID string,
|
||||
) (map[string]struct{}, error)
|
||||
GetContainerByName(
|
||||
ctx context.Context,
|
||||
userID, parentContainerID, containerName string,
|
||||
) (graph.Container, error)
|
||||
GetItemsInContainerByCollisionKey(
|
||||
ctx context.Context,
|
||||
userID, containerID string,
|
||||
) (map[string]string, error)
|
||||
}
|
||||
|
||||
func getCollKeysAndItemIDs(
|
||||
t *testing.T,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
gikacbn GetItemsKeysAndContainerByNameer,
|
||||
userID, parentContainerID string,
|
||||
containerNames ...string,
|
||||
) (map[string]struct{}, map[string]string) {
|
||||
var (
|
||||
c graph.Container
|
||||
err error
|
||||
cID string
|
||||
)
|
||||
|
||||
for _, cn := range containerNames {
|
||||
pcid := parentContainerID
|
||||
|
||||
if len(cID) != 0 {
|
||||
pcid = cID
|
||||
}
|
||||
|
||||
c, err = gikacbn.GetContainerByName(ctx, userID, pcid, cn)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cID = ptr.Val(c.GetId())
|
||||
}
|
||||
|
||||
itemIDs, err := gikacbn.GetItemIDsInContainer(ctx, userID, cID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
collisionKeys, err := gikacbn.GetItemsInContainerByCollisionKey(ctx, userID, cID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return itemIDs, collisionKeys
|
||||
}
|
||||
|
||||
@ -25,6 +25,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/streamstore"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
@ -400,6 +401,7 @@ func generateContainerOfItems(
|
||||
|
||||
restoreCfg := control.DefaultRestoreConfig(dttm.SafeForTesting)
|
||||
restoreCfg.Location = destFldr
|
||||
restoreCfg.IncludePermissions = true
|
||||
|
||||
dataColls := buildCollections(
|
||||
t,
|
||||
@ -408,15 +410,19 @@ func generateContainerOfItems(
|
||||
restoreCfg,
|
||||
collections)
|
||||
|
||||
opts := control.Defaults()
|
||||
opts.RestorePermissions = true
|
||||
opts := control.DefaultOptions()
|
||||
|
||||
rcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: backupVersion,
|
||||
Options: opts,
|
||||
ProtectedResource: sel,
|
||||
RestoreConfig: restoreCfg,
|
||||
Selector: sel,
|
||||
}
|
||||
|
||||
deets, err := ctrl.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
sel,
|
||||
restoreCfg,
|
||||
opts,
|
||||
rcc,
|
||||
dataColls,
|
||||
fault.New(true),
|
||||
count.New())
|
||||
@ -535,7 +541,7 @@ func ControllerWithSelector(
|
||||
ins idname.Cacher,
|
||||
onFail func(*testing.T, context.Context),
|
||||
) (*m365.Controller, selectors.Selector) {
|
||||
ctrl, err := m365.NewController(ctx, acct, cr, sel.PathService(), control.Defaults())
|
||||
ctrl, err := m365.NewController(ctx, acct, cr, sel.PathService(), control.DefaultOptions())
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
if onFail != nil {
|
||||
onFail(t, ctx)
|
||||
@ -544,7 +550,7 @@ func ControllerWithSelector(
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
id, name, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, sel.DiscreteOwner, ins)
|
||||
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
if onFail != nil {
|
||||
onFail(t, ctx)
|
||||
@ -562,15 +568,19 @@ func ControllerWithSelector(
|
||||
// Suite Setup
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type ids struct {
|
||||
ID string
|
||||
DriveID string
|
||||
DriveRootFolderID string
|
||||
}
|
||||
|
||||
type intgTesterSetup struct {
|
||||
ac api.Client
|
||||
gockAC api.Client
|
||||
userID string
|
||||
userDriveID string
|
||||
userDriveRootFolderID string
|
||||
siteID string
|
||||
siteDriveID string
|
||||
siteDriveRootFolderID string
|
||||
ac api.Client
|
||||
gockAC api.Client
|
||||
user ids
|
||||
secondaryUser ids
|
||||
site ids
|
||||
secondarySite ids
|
||||
}
|
||||
|
||||
func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
|
||||
@ -585,43 +595,58 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.ac, err = api.NewClient(creds, control.Defaults())
|
||||
its.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.gockAC, err = mock.NewClient(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// user drive
|
||||
|
||||
its.userID = tconfig.M365UserID(t)
|
||||
|
||||
userDrive, err := its.ac.Users().GetDefaultDrive(ctx, its.userID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.userDriveID = ptr.Val(userDrive.GetId())
|
||||
|
||||
userDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.userDriveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.userDriveRootFolderID = ptr.Val(userDriveRootFolder.GetId())
|
||||
|
||||
its.siteID = tconfig.M365SiteID(t)
|
||||
|
||||
// site
|
||||
|
||||
siteDrive, err := its.ac.Sites().GetDefaultDrive(ctx, its.siteID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.siteDriveID = ptr.Val(siteDrive.GetId())
|
||||
|
||||
siteDriveRootFolder, err := its.ac.Drives().GetRootFolder(ctx, its.siteDriveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.siteDriveRootFolderID = ptr.Val(siteDriveRootFolder.GetId())
|
||||
its.user = userIDs(t, tconfig.M365UserID(t), its.ac)
|
||||
its.secondaryUser = userIDs(t, tconfig.SecondaryM365UserID(t), its.ac)
|
||||
its.site = siteIDs(t, tconfig.M365SiteID(t), its.ac)
|
||||
its.secondarySite = siteIDs(t, tconfig.SecondaryM365SiteID(t), its.ac)
|
||||
|
||||
return its
|
||||
}
|
||||
|
||||
func userIDs(t *testing.T, id string, ac api.Client) ids {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
r := ids{ID: id}
|
||||
|
||||
drive, err := ac.Users().GetDefaultDrive(ctx, id)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r.DriveID = ptr.Val(drive.GetId())
|
||||
|
||||
driveRootFolder, err := ac.Drives().GetRootFolder(ctx, r.DriveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r.DriveRootFolderID = ptr.Val(driveRootFolder.GetId())
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func siteIDs(t *testing.T, id string, ac api.Client) ids {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
r := ids{ID: id}
|
||||
|
||||
drive, err := ac.Sites().GetDefaultDrive(ctx, id)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r.DriveID = ptr.Val(drive.GetId())
|
||||
|
||||
driveRootFolder, err := ac.Drives().GetRootFolder(ctx, r.DriveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r.DriveRootFolderID = ptr.Val(driveRootFolder.GetId())
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func getTestExtensionFactories() []extensions.CreateItemExtensioner {
|
||||
return []extensions.CreateItemExtensioner{
|
||||
&extensions.MockItemExtensionFactory{},
|
||||
|
||||
@ -72,7 +72,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() {
|
||||
osel = selectors.NewOneDriveBackup([]string{userID})
|
||||
ws = deeTD.DriveIDFromRepoRef
|
||||
svc = path.OneDriveService
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
osel.Include(selTD.OneDriveBackupFolderScope(osel))
|
||||
@ -106,7 +106,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDrive() {
|
||||
}
|
||||
|
||||
func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
|
||||
sel := selectors.NewOneDriveRestore([]string{suite.its.userID})
|
||||
sel := selectors.NewOneDriveRestore([]string{suite.its.user.ID})
|
||||
|
||||
ic := func(cs []string) selectors.Selector {
|
||||
sel.Include(sel.Folders(cs, selectors.PrefixMatch()))
|
||||
@ -117,10 +117,10 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
) string {
|
||||
d, err := suite.its.ac.Users().GetDefaultDrive(ctx, suite.its.userID)
|
||||
d, err := suite.its.ac.Users().GetDefaultDrive(ctx, suite.its.user.ID)
|
||||
if err != nil {
|
||||
err = graph.Wrap(ctx, err, "retrieving default user drive").
|
||||
With("user", suite.its.userID)
|
||||
With("user", suite.its.user.ID)
|
||||
}
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -137,8 +137,8 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_incrementalOneDrive() {
|
||||
|
||||
runDriveIncrementalTest(
|
||||
suite,
|
||||
suite.its.userID,
|
||||
suite.its.userID,
|
||||
suite.its.user.ID,
|
||||
suite.its.user.ID,
|
||||
resource.Users,
|
||||
path.OneDriveService,
|
||||
path.FilesCategory,
|
||||
@ -166,7 +166,7 @@ func runDriveIncrementalTest(
|
||||
|
||||
var (
|
||||
acct = tconfig.NewM365Account(t)
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
mb = evmock.NewBus()
|
||||
ws = deeTD.DriveIDFromRepoRef
|
||||
|
||||
@ -683,7 +683,7 @@ func runDriveIncrementalTest(
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
cleanCtrl, err := m365.NewController(ctx, acct, rc, sel.PathService(), control.Defaults())
|
||||
cleanCtrl, err := m365.NewController(ctx, acct, rc, sel.PathService(), control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
bod.ctrl = cleanCtrl
|
||||
@ -785,7 +785,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
|
||||
|
||||
var (
|
||||
acct = tconfig.NewM365Account(t)
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
mb = evmock.NewBus()
|
||||
|
||||
categories = map[path.CategoryType][]string{
|
||||
@ -801,10 +801,10 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveOwnerMigration() {
|
||||
acct,
|
||||
resource.Users,
|
||||
path.OneDriveService,
|
||||
control.Defaults())
|
||||
control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
userable, err := ctrl.AC.Users().GetByID(ctx, suite.its.userID)
|
||||
userable, err := ctrl.AC.Users().GetByID(ctx, suite.its.user.ID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
uid := ptr.Val(userable.GetId())
|
||||
@ -922,7 +922,7 @@ func (suite *OneDriveBackupIntgSuite) TestBackup_Run_oneDriveExtensions() {
|
||||
osel = selectors.NewOneDriveBackup([]string{userID})
|
||||
ws = deeTD.DriveIDFromRepoRef
|
||||
svc = path.OneDriveService
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
opts.ItemExtensionFactory = getTestExtensionFactories()
|
||||
@ -982,17 +982,17 @@ func (suite *OneDriveRestoreIntgSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
func (suite *OneDriveRestoreIntgSuite) TestRestore_Run_onedriveWithAdvancedOptions() {
|
||||
sel := selectors.NewOneDriveBackup([]string{suite.its.userID})
|
||||
sel := selectors.NewOneDriveBackup([]string{suite.its.user.ID})
|
||||
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||
sel.DiscreteOwner = suite.its.userID
|
||||
sel.DiscreteOwner = suite.its.user.ID
|
||||
|
||||
runDriveRestoreWithAdvancedOptions(
|
||||
suite.T(),
|
||||
suite,
|
||||
suite.its.ac,
|
||||
sel.Selector,
|
||||
suite.its.userDriveID,
|
||||
suite.its.userDriveRootFolderID)
|
||||
suite.its.user.DriveID,
|
||||
suite.its.user.DriveRootFolderID)
|
||||
}
|
||||
|
||||
func runDriveRestoreWithAdvancedOptions(
|
||||
@ -1009,7 +1009,7 @@ func runDriveRestoreWithAdvancedOptions(
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel, opts, version.Backup)
|
||||
@ -1250,3 +1250,173 @@ func runDriveRestoreWithAdvancedOptions(
|
||||
assert.Subset(t, maps.Keys(currentFileIDs), maps.Keys(fileIDs), "original item should exist after copy")
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *OneDriveRestoreIntgSuite) TestRestore_Run_onedriveAlternateProtectedResource() {
|
||||
sel := selectors.NewOneDriveBackup([]string{suite.its.user.ID})
|
||||
sel.Include(selTD.OneDriveBackupFolderScope(sel))
|
||||
sel.DiscreteOwner = suite.its.user.ID
|
||||
|
||||
runDriveRestoreToAlternateProtectedResource(
|
||||
suite.T(),
|
||||
suite,
|
||||
suite.its.ac,
|
||||
sel.Selector,
|
||||
suite.its.user,
|
||||
suite.its.secondaryUser)
|
||||
}
|
||||
|
||||
func runDriveRestoreToAlternateProtectedResource(
|
||||
t *testing.T,
|
||||
suite tester.Suite,
|
||||
ac api.Client,
|
||||
sel selectors.Selector, // owner should match 'from', both Restore and Backup types work.
|
||||
from, to ids,
|
||||
) {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// a backup is required to run restores
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
bo, bod := prepNewTestBackupOp(t, ctx, mb, sel, opts, version.Backup)
|
||||
defer bod.close(t, ctx)
|
||||
|
||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||
|
||||
var (
|
||||
restoreCfg = ctrlTD.DefaultRestoreConfig("drive_restore_to_resource")
|
||||
fromCollisionKeys map[string]api.DriveItemIDType
|
||||
fromItemIDs map[string]api.DriveItemIDType
|
||||
acd = ac.Drives()
|
||||
)
|
||||
|
||||
// first restore to the 'from' resource
|
||||
|
||||
suite.Run("restore original resource", func() {
|
||||
mb = evmock.NewBus()
|
||||
fromCtr := count.New()
|
||||
driveID := from.DriveID
|
||||
rootFolderID := from.DriveRootFolderID
|
||||
restoreCfg.OnCollision = control.Copy
|
||||
|
||||
ro, _ := prepNewTestRestoreOp(
|
||||
t,
|
||||
ctx,
|
||||
bod.st,
|
||||
bo.Results.BackupID,
|
||||
mb,
|
||||
fromCtr,
|
||||
sel,
|
||||
opts,
|
||||
restoreCfg)
|
||||
|
||||
runAndCheckRestore(t, ctx, &ro, mb, false)
|
||||
|
||||
// get all files in folder, use these as the base
|
||||
// set of files to compare against.
|
||||
fromItemIDs, fromCollisionKeys = getDriveCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acd,
|
||||
driveID,
|
||||
rootFolderID,
|
||||
restoreCfg.Location,
|
||||
selTD.TestFolderName)
|
||||
})
|
||||
|
||||
// then restore to the 'to' resource
|
||||
var (
|
||||
toCollisionKeys map[string]api.DriveItemIDType
|
||||
toItemIDs map[string]api.DriveItemIDType
|
||||
)
|
||||
|
||||
suite.Run("restore to alternate resource", func() {
|
||||
mb = evmock.NewBus()
|
||||
toCtr := count.New()
|
||||
driveID := to.DriveID
|
||||
rootFolderID := to.DriveRootFolderID
|
||||
restoreCfg.ProtectedResource = to.ID
|
||||
|
||||
ro, _ := prepNewTestRestoreOp(
|
||||
t,
|
||||
ctx,
|
||||
bod.st,
|
||||
bo.Results.BackupID,
|
||||
mb,
|
||||
toCtr,
|
||||
sel,
|
||||
opts,
|
||||
restoreCfg)
|
||||
|
||||
runAndCheckRestore(t, ctx, &ro, mb, false)
|
||||
|
||||
// get all files in folder, use these as the base
|
||||
// set of files to compare against.
|
||||
toItemIDs, toCollisionKeys = getDriveCollKeysAndItemIDs(
|
||||
t,
|
||||
ctx,
|
||||
acd,
|
||||
driveID,
|
||||
rootFolderID,
|
||||
restoreCfg.Location,
|
||||
selTD.TestFolderName)
|
||||
})
|
||||
|
||||
// compare restore results
|
||||
assert.Equal(t, len(fromItemIDs), len(toItemIDs))
|
||||
assert.ElementsMatch(t, maps.Keys(fromCollisionKeys), maps.Keys(toCollisionKeys))
|
||||
}
|
||||
|
||||
type GetItemsKeysAndFolderByNameer interface {
|
||||
GetItemIDsInContainer(
|
||||
ctx context.Context,
|
||||
driveID, containerID string,
|
||||
) (map[string]api.DriveItemIDType, error)
|
||||
GetFolderByName(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID, folderName string,
|
||||
) (models.DriveItemable, error)
|
||||
GetItemsInContainerByCollisionKey(
|
||||
ctx context.Context,
|
||||
driveID, containerID string,
|
||||
) (map[string]api.DriveItemIDType, error)
|
||||
}
|
||||
|
||||
func getDriveCollKeysAndItemIDs(
|
||||
t *testing.T,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
gikafbn GetItemsKeysAndFolderByNameer,
|
||||
driveID, parentContainerID string,
|
||||
containerNames ...string,
|
||||
) (map[string]api.DriveItemIDType, map[string]api.DriveItemIDType) {
|
||||
var (
|
||||
c models.DriveItemable
|
||||
err error
|
||||
cID string
|
||||
)
|
||||
|
||||
for _, cn := range containerNames {
|
||||
pcid := parentContainerID
|
||||
|
||||
if len(cID) != 0 {
|
||||
pcid = cID
|
||||
}
|
||||
|
||||
c, err = gikafbn.GetFolderByName(ctx, driveID, pcid, cn)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cID = ptr.Val(c.GetId())
|
||||
}
|
||||
|
||||
itemIDs, err := gikafbn.GetItemIDsInContainer(ctx, driveID, cID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
collisionKeys, err := gikafbn.GetItemsInContainerByCollisionKey(ctx, driveID, cID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return itemIDs, collisionKeys
|
||||
}
|
||||
|
||||
@ -49,7 +49,7 @@ func (suite *SharePointBackupIntgSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
|
||||
sel := selectors.NewSharePointRestore([]string{suite.its.siteID})
|
||||
sel := selectors.NewSharePointRestore([]string{suite.its.site.ID})
|
||||
|
||||
ic := func(cs []string) selectors.Selector {
|
||||
sel.Include(sel.LibraryFolders(cs, selectors.PrefixMatch()))
|
||||
@ -60,10 +60,10 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
) string {
|
||||
d, err := suite.its.ac.Sites().GetDefaultDrive(ctx, suite.its.siteID)
|
||||
d, err := suite.its.ac.Sites().GetDefaultDrive(ctx, suite.its.site.ID)
|
||||
if err != nil {
|
||||
err = graph.Wrap(ctx, err, "retrieving default site drive").
|
||||
With("site", suite.its.siteID)
|
||||
With("site", suite.its.site.ID)
|
||||
}
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -80,8 +80,8 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
|
||||
|
||||
runDriveIncrementalTest(
|
||||
suite,
|
||||
suite.its.siteID,
|
||||
suite.its.userID,
|
||||
suite.its.site.ID,
|
||||
suite.its.user.ID,
|
||||
resource.Sites,
|
||||
path.SharePointService,
|
||||
path.LibrariesCategory,
|
||||
@ -91,7 +91,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_incrementalSharePoint() {
|
||||
true)
|
||||
}
|
||||
|
||||
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePoint() {
|
||||
func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointBasic() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
@ -99,8 +99,8 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePoint() {
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
sel = selectors.NewSharePointBackup([]string{suite.its.siteID})
|
||||
opts = control.Defaults()
|
||||
sel = selectors.NewSharePointBackup([]string{suite.its.site.ID})
|
||||
opts = control.DefaultOptions()
|
||||
)
|
||||
|
||||
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||
@ -116,7 +116,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePoint() {
|
||||
bod.sw,
|
||||
&bo,
|
||||
bod.sel,
|
||||
suite.its.siteID,
|
||||
bod.sel.ID(),
|
||||
path.LibrariesCategory)
|
||||
}
|
||||
|
||||
@ -128,8 +128,8 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointExtensions() {
|
||||
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
sel = selectors.NewSharePointBackup([]string{suite.its.siteID})
|
||||
opts = control.Defaults()
|
||||
sel = selectors.NewSharePointBackup([]string{suite.its.site.ID})
|
||||
opts = control.DefaultOptions()
|
||||
tenID = tconfig.M365TenantID(t)
|
||||
svc = path.SharePointService
|
||||
ws = deeTD.DriveIDFromRepoRef
|
||||
@ -150,7 +150,7 @@ func (suite *SharePointBackupIntgSuite) TestBackup_Run_sharePointExtensions() {
|
||||
bod.sw,
|
||||
&bo,
|
||||
bod.sel,
|
||||
suite.its.siteID,
|
||||
bod.sel.ID(),
|
||||
path.LibrariesCategory)
|
||||
|
||||
bID := bo.Results.BackupID
|
||||
@ -201,18 +201,33 @@ func (suite *SharePointRestoreIntgSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointWithAdvancedOptions() {
|
||||
sel := selectors.NewSharePointBackup([]string{suite.its.siteID})
|
||||
sel := selectors.NewSharePointBackup([]string{suite.its.site.ID})
|
||||
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||
sel.Filter(sel.Library("documents"))
|
||||
sel.DiscreteOwner = suite.its.siteID
|
||||
sel.DiscreteOwner = suite.its.site.ID
|
||||
|
||||
runDriveRestoreWithAdvancedOptions(
|
||||
suite.T(),
|
||||
suite,
|
||||
suite.its.ac,
|
||||
sel.Selector,
|
||||
suite.its.siteDriveID,
|
||||
suite.its.siteDriveRootFolderID)
|
||||
suite.its.site.DriveID,
|
||||
suite.its.site.DriveRootFolderID)
|
||||
}
|
||||
|
||||
func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointAlternateProtectedResource() {
|
||||
sel := selectors.NewSharePointBackup([]string{suite.its.site.ID})
|
||||
sel.Include(selTD.SharePointBackupFolderScope(sel))
|
||||
sel.Filter(sel.Library("documents"))
|
||||
sel.DiscreteOwner = suite.its.site.ID
|
||||
|
||||
runDriveRestoreToAlternateProtectedResource(
|
||||
suite.T(),
|
||||
suite,
|
||||
suite.its.ac,
|
||||
sel.Selector,
|
||||
suite.its.site,
|
||||
suite.its.secondarySite)
|
||||
}
|
||||
|
||||
func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointDeletedDrives() {
|
||||
@ -229,7 +244,7 @@ func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointDeletedDrives
|
||||
rc.OnCollision = control.Copy
|
||||
|
||||
// create a new drive
|
||||
md, err := suite.its.ac.Lists().PostDrive(ctx, suite.its.siteID, rc.Location)
|
||||
md, err := suite.its.ac.Lists().PostDrive(ctx, suite.its.site.ID, rc.Location)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
driveID := ptr.Val(md.GetId())
|
||||
@ -260,14 +275,14 @@ func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointDeletedDrives
|
||||
// run a backup
|
||||
var (
|
||||
mb = evmock.NewBus()
|
||||
opts = control.Defaults()
|
||||
opts = control.DefaultOptions()
|
||||
graphClient = suite.its.ac.Stable.Client()
|
||||
)
|
||||
|
||||
bsel := selectors.NewSharePointBackup([]string{suite.its.siteID})
|
||||
bsel := selectors.NewSharePointBackup([]string{suite.its.site.ID})
|
||||
bsel.Include(selTD.SharePointBackupFolderScope(bsel))
|
||||
bsel.Filter(bsel.Library(rc.Location))
|
||||
bsel.DiscreteOwner = suite.its.siteID
|
||||
bsel.DiscreteOwner = suite.its.site.ID
|
||||
|
||||
bo, bod := prepNewTestBackupOp(t, ctx, mb, bsel.Selector, opts, version.Backup)
|
||||
defer bod.close(t, ctx)
|
||||
@ -367,7 +382,7 @@ func (suite *SharePointRestoreIntgSuite) TestRestore_Run_sharepointDeletedDrives
|
||||
|
||||
pgr := suite.its.ac.
|
||||
Drives().
|
||||
NewSiteDrivePager(suite.its.siteID, []string{"id", "name"})
|
||||
NewSiteDrivePager(suite.its.site.ID, []string{"id", "name"})
|
||||
|
||||
drives, err := api.GetAllDrives(ctx, pgr, false, -1)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -23,6 +23,7 @@ const (
|
||||
|
||||
// M365 config
|
||||
TestCfgAzureTenantID = "azure_tenantid"
|
||||
TestCfgSecondarySiteID = "secondarym365siteid"
|
||||
TestCfgSiteID = "m365siteid"
|
||||
TestCfgSiteURL = "m365siteurl"
|
||||
TestCfgTeamID = "m365teamid"
|
||||
@ -38,15 +39,16 @@ const (
|
||||
|
||||
// test specific env vars
|
||||
const (
|
||||
EnvCorsoM365LoadTestUserID = "CORSO_M365_LOAD_TEST_USER_ID"
|
||||
EnvCorsoM365LoadTestOrgUsers = "CORSO_M365_LOAD_TEST_ORG_USERS"
|
||||
EnvCorsoM365TestSiteID = "CORSO_M365_TEST_SITE_ID"
|
||||
EnvCorsoM365TestSiteURL = "CORSO_M365_TEST_SITE_URL"
|
||||
EnvCorsoM365TestTeamID = "CORSO_M365_TEST_TEAM_ID"
|
||||
EnvCorsoM365TestGroupID = "CORSO_M365_TEST_GROUP_ID"
|
||||
EnvCorsoM365TestUserID = "CORSO_M365_TEST_USER_ID"
|
||||
EnvCorsoSecondaryM365TestSiteID = "CORSO_SECONDARY_M365_TEST_SITE_ID"
|
||||
EnvCorsoSecondaryM365TestUserID = "CORSO_SECONDARY_M365_TEST_USER_ID"
|
||||
EnvCorsoTertiaryM365TestUserID = "CORSO_TERTIARY_M365_TEST_USER_ID"
|
||||
EnvCorsoM365LoadTestUserID = "CORSO_M365_LOAD_TEST_USER_ID"
|
||||
EnvCorsoM365LoadTestOrgUsers = "CORSO_M365_LOAD_TEST_ORG_USERS"
|
||||
EnvCorsoTestConfigFilePath = "CORSO_TEST_CONFIG_FILE"
|
||||
EnvCorsoUnlicensedM365TestUserID = "CORSO_M365_TEST_UNLICENSED_USER"
|
||||
)
|
||||
@ -151,7 +153,7 @@ func ReadTestConfig() (map[string]string, error) {
|
||||
TestCfgSiteID,
|
||||
os.Getenv(EnvCorsoM365TestSiteID),
|
||||
vpr.GetString(TestCfgSiteID),
|
||||
"10rqc2.sharepoint.com,4892edf5-2ebf-46be-a6e5-a40b2cbf1c1a,38ab6d06-fc82-4417-af93-22d8733c22be")
|
||||
"4892edf5-2ebf-46be-a6e5-a40b2cbf1c1a,38ab6d06-fc82-4417-af93-22d8733c22be")
|
||||
fallbackTo(
|
||||
testEnv,
|
||||
TestCfgTeamID,
|
||||
@ -170,6 +172,12 @@ func ReadTestConfig() (map[string]string, error) {
|
||||
os.Getenv(EnvCorsoM365TestSiteURL),
|
||||
vpr.GetString(TestCfgSiteURL),
|
||||
"https://10rqc2.sharepoint.com/sites/CorsoCI")
|
||||
fallbackTo(
|
||||
testEnv,
|
||||
TestCfgSecondarySiteID,
|
||||
os.Getenv(EnvCorsoSecondaryM365TestSiteID),
|
||||
vpr.GetString(TestCfgSecondarySiteID),
|
||||
"053684d8-ca6c-4376-a03e-2567816bb091,9b3e9abe-6a5e-4084-8b44-ea5a356fe02c")
|
||||
fallbackTo(
|
||||
testEnv,
|
||||
TestCfgUnlicensedUserID,
|
||||
|
||||
@ -198,6 +198,17 @@ func GetM365SiteID(ctx context.Context) string {
|
||||
return strings.ToLower(cfg[TestCfgSiteID])
|
||||
}
|
||||
|
||||
// SecondaryM365SiteID returns a siteID string representing the secondarym365SiteID described
|
||||
// by either the env var CORSO_SECONDARY_M365_TEST_SITE_ID, the corso_test.toml config
|
||||
// file or the default value (in that order of priority). The default is a
|
||||
// last-attempt fallback that will only work on alcion's testing org.
|
||||
func SecondaryM365SiteID(t *testing.T) string {
|
||||
cfg, err := ReadTestConfig()
|
||||
require.NoError(t, err, "retrieving secondary m365 site id from test configuration: %+v", clues.ToCore(err))
|
||||
|
||||
return strings.ToLower(cfg[TestCfgSecondarySiteID])
|
||||
}
|
||||
|
||||
// UnlicensedM365UserID returns an userID string representing the m365UserID
|
||||
// described by either the env var CORSO_M365_TEST_UNLICENSED_USER, the
|
||||
// corso_test.toml config file or the default value (in that order of priority).
|
||||
|
||||
22
src/pkg/control/export.go
Normal file
22
src/pkg/control/export.go
Normal file
@ -0,0 +1,22 @@
|
||||
package control
|
||||
|
||||
// ExportConfig contains config for exports
|
||||
type ExportConfig struct {
|
||||
// Archive decides if we should create an archive from the data
|
||||
// instead of just returning all the files. If Archive is set to
|
||||
// true, we return a single collection with a single file which is
|
||||
// the archive.
|
||||
Archive bool
|
||||
|
||||
// DataFormat decides the format in which we return the data. This is
|
||||
// only useful for outlook exports, for example they can be in eml
|
||||
// or pst for emails.
|
||||
// TODO: Enable once we support outlook exports
|
||||
// DataFormat string
|
||||
}
|
||||
|
||||
func DefaultExportConfig() ExportConfig {
|
||||
return ExportConfig{
|
||||
Archive: false,
|
||||
}
|
||||
}
|
||||
@ -15,7 +15,6 @@ type Options struct {
|
||||
ItemExtensionFactory []extensions.CreateItemExtensioner `json:"-"`
|
||||
Parallelism Parallelism `json:"parallelism"`
|
||||
Repo repository.Options `json:"repo"`
|
||||
RestorePermissions bool `json:"restorePermissions"`
|
||||
SkipReduce bool `json:"skipReduce"`
|
||||
ToggleFeatures Toggles `json:"toggleFeatures"`
|
||||
}
|
||||
@ -38,8 +37,8 @@ const (
|
||||
BestEffort FailurePolicy = "best-effort"
|
||||
)
|
||||
|
||||
// Defaults provides an Options with the default values set.
|
||||
func Defaults() Options {
|
||||
// DefaultOptions provides an Options with the default values set.
|
||||
func DefaultOptions() Options {
|
||||
return Options{
|
||||
FailureHandling: FailAfterRecovery,
|
||||
DeltaPageSize: 500,
|
||||
|
||||
@ -61,6 +61,10 @@ type RestoreConfig struct {
|
||||
// up.
|
||||
// Defaults to empty.
|
||||
Drive string `json:"drive"`
|
||||
|
||||
// IncludePermissions toggles whether the restore will include the original
|
||||
// folder- and item-level permissions.
|
||||
IncludePermissions bool `json:"includePermissions"`
|
||||
}
|
||||
|
||||
func DefaultRestoreConfig(timeFormat dttm.TimeFormat) RestoreConfig {
|
||||
@ -120,10 +124,11 @@ func (rc RestoreConfig) marshal() string {
|
||||
|
||||
func (rc RestoreConfig) concealed() RestoreConfig {
|
||||
return RestoreConfig{
|
||||
OnCollision: rc.OnCollision,
|
||||
ProtectedResource: clues.Hide(rc.ProtectedResource).Conceal(),
|
||||
Location: path.LoggableDir(rc.Location),
|
||||
Drive: clues.Hide(rc.Drive).Conceal(),
|
||||
OnCollision: rc.OnCollision,
|
||||
ProtectedResource: clues.Hide(rc.ProtectedResource).Conceal(),
|
||||
Location: path.LoggableDir(rc.Location),
|
||||
Drive: clues.Hide(rc.Drive).Conceal(),
|
||||
IncludePermissions: rc.IncludePermissions,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
45
src/pkg/export/export.go
Normal file
45
src/pkg/export/export.go
Normal file
@ -0,0 +1,45 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Collection is the interface that is returned to the SDK consumer
|
||||
type Collection interface {
|
||||
// BasePath gets the base path of the collection
|
||||
BasePath() string
|
||||
|
||||
// Items gets the items within the collection(folder)
|
||||
Items(context.Context) <-chan Item
|
||||
}
|
||||
|
||||
// ItemData is the data for an individual item.
|
||||
type ItemData struct {
|
||||
// Name is the name of the item. This is the name that the item
|
||||
// would have had in the service.
|
||||
Name string
|
||||
|
||||
// Body is the body of the item. This is an io.ReadCloser and the
|
||||
// SDK consumer is responsible for closing it.
|
||||
Body io.ReadCloser
|
||||
}
|
||||
|
||||
// Item is the item that is returned to the SDK consumer
|
||||
type Item struct {
|
||||
// ID will be a unique id for the item. This is same as the id
|
||||
// that is used to store the data. This is not the name and is
|
||||
// mostly used just for tracking.
|
||||
ID string
|
||||
|
||||
// Data contains the actual data of the item. It will have both
|
||||
// the name of the item and an io.ReadCloser which contains the
|
||||
// body of the item.
|
||||
Data ItemData
|
||||
|
||||
// Error will contain any error that happened while trying to get
|
||||
// the item/items like when trying to resolve the name of the item.
|
||||
// In case we have the error bound to a particular item, we will
|
||||
// also return the id of the item.
|
||||
Error error
|
||||
}
|
||||
@ -208,6 +208,32 @@ func (e *Bus) Errors() *Errors {
|
||||
}
|
||||
}
|
||||
|
||||
// ItemsAndRecovered returns the items that failed along with other
|
||||
// recoverable errors
|
||||
func (e *Bus) ItemsAndRecovered() ([]Item, []error) {
|
||||
var (
|
||||
is = map[string]Item{}
|
||||
non = []error{}
|
||||
)
|
||||
|
||||
for _, err := range e.recoverable {
|
||||
var ie *Item
|
||||
if !errors.As(err, &ie) {
|
||||
non = append(non, err)
|
||||
continue
|
||||
}
|
||||
|
||||
is[ie.dedupeID()] = *ie
|
||||
}
|
||||
|
||||
var ie *Item
|
||||
if errors.As(e.failure, &ie) {
|
||||
is[ie.dedupeID()] = *ie
|
||||
}
|
||||
|
||||
return maps.Values(is), non
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Errors Data
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -74,6 +74,12 @@ type Repository interface {
|
||||
sel selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) (operations.RestoreOperation, error)
|
||||
NewExport(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
) (operations.ExportOperation, error)
|
||||
NewMaintenance(
|
||||
ctx context.Context,
|
||||
mOpts rep.Maintenance,
|
||||
@ -329,7 +335,7 @@ func (r repository) NewBackupWithLookup(
|
||||
return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
ownerID, ownerName, err := ctrl.PopulateOwnerIDAndNamesFrom(ctx, sel.DiscreteOwner, ins)
|
||||
ownerID, ownerName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
|
||||
if err != nil {
|
||||
return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details")
|
||||
}
|
||||
@ -349,6 +355,31 @@ func (r repository) NewBackupWithLookup(
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
// NewExport generates a exportOperation runner.
|
||||
func (r repository) NewExport(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
) (operations.ExportOperation, error) {
|
||||
ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts)
|
||||
if err != nil {
|
||||
return operations.ExportOperation{}, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
return operations.NewExportOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
store.NewKopiaStore(r.modelStore),
|
||||
ctrl,
|
||||
r.Account,
|
||||
model.StableID(backupID),
|
||||
sel,
|
||||
exportCfg,
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
// NewRestore generates a restoreOperation runner.
|
||||
func (r repository) NewRestore(
|
||||
ctx context.Context,
|
||||
@ -585,8 +616,13 @@ func deleteBackup(
|
||||
}
|
||||
}
|
||||
|
||||
if len(b.DetailsID) > 0 {
|
||||
if err := kw.DeleteSnapshot(ctx, b.DetailsID); err != nil {
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
ssid = b.DetailsID
|
||||
}
|
||||
|
||||
if len(ssid) > 0 {
|
||||
if err := kw.DeleteSnapshot(ctx, ssid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ func (suite *RepositoryUnitSuite) TestInitialize() {
|
||||
st, err := test.storage()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, err = Initialize(ctx, test.account, st, control.Defaults())
|
||||
_, err = Initialize(ctx, test.account, st, control.DefaultOptions())
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -94,7 +94,7 @@ func (suite *RepositoryUnitSuite) TestConnect() {
|
||||
st, err := test.storage()
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
_, err = Connect(ctx, test.account, st, "not_found", control.Defaults())
|
||||
_, err = Connect(ctx, test.account, st, "not_found", control.DefaultOptions())
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -137,7 +137,7 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
||||
defer flush()
|
||||
|
||||
st := test.storage(t)
|
||||
r, err := Initialize(ctx, test.account, st, control.Defaults())
|
||||
r, err := Initialize(ctx, test.account, st, control.DefaultOptions())
|
||||
if err == nil {
|
||||
defer func() {
|
||||
err := r.Close(ctx)
|
||||
@ -186,11 +186,11 @@ func (suite *RepositoryIntegrationSuite) TestConnect() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
repo, err := Initialize(ctx, account.Account{}, st, control.Defaults())
|
||||
repo, err := Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// now re-connect
|
||||
_, err = Connect(ctx, account.Account{}, st, repo.GetID(), control.Defaults())
|
||||
_, err = Connect(ctx, account.Account{}, st, repo.GetID(), control.DefaultOptions())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, account.Account{}, st, control.Defaults())
|
||||
r, err := Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
oldID := r.GetID()
|
||||
@ -212,7 +212,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// now re-connect
|
||||
r, err = Connect(ctx, account.Account{}, st, oldID, control.Defaults())
|
||||
r, err = Connect(ctx, account.Account{}, st, oldID, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, oldID, r.GetID())
|
||||
}
|
||||
@ -228,7 +228,7 @@ func (suite *RepositoryIntegrationSuite) TestNewBackup() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, acct, st, control.Defaults())
|
||||
r, err := Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
userID := tconfig.M365UserID(t)
|
||||
@ -250,7 +250,7 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, acct, st, control.Defaults())
|
||||
r, err := Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ro, err := r.NewRestore(ctx, "backup-id", selectors.Selector{DiscreteOwner: "test"}, restoreCfg)
|
||||
@ -269,7 +269,7 @@ func (suite *RepositoryIntegrationSuite) TestNewMaintenance() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
r, err := Initialize(ctx, acct, st, control.Defaults())
|
||||
r, err := Initialize(ctx, acct, st, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
mo, err := r.NewMaintenance(ctx, ctrlRepo.Maintenance{})
|
||||
@ -286,7 +286,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_DisableMetrics() {
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
|
||||
repo, err := Initialize(ctx, account.Account{}, st, control.Defaults())
|
||||
repo, err := Initialize(ctx, account.Account{}, st, control.DefaultOptions())
|
||||
require.NoError(t, err)
|
||||
|
||||
// now re-connect
|
||||
@ -308,14 +308,14 @@ func (suite *RepositoryIntegrationSuite) Test_Options() {
|
||||
{
|
||||
name: "default options",
|
||||
opts: func() control.Options {
|
||||
return control.Defaults()
|
||||
return control.DefaultOptions()
|
||||
},
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "options with an extension factory",
|
||||
opts: func() control.Options {
|
||||
o := control.Defaults()
|
||||
o := control.DefaultOptions()
|
||||
o.ItemExtensionFactory = append(
|
||||
o.ItemExtensionFactory,
|
||||
&extensions.MockItemExtensionFactory{})
|
||||
@ -327,7 +327,7 @@ func (suite *RepositoryIntegrationSuite) Test_Options() {
|
||||
{
|
||||
name: "options with multiple extension factories",
|
||||
opts: func() control.Options {
|
||||
o := control.Defaults()
|
||||
o := control.DefaultOptions()
|
||||
f := []extensions.CreateItemExtensioner{
|
||||
&extensions.MockItemExtensionFactory{},
|
||||
&extensions.MockItemExtensionFactory{},
|
||||
|
||||
@ -98,7 +98,7 @@ func newIntegrationTesterSetup(t *testing.T) intgTesterSetup {
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.ac, err = api.NewClient(creds, control.Defaults())
|
||||
its.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
its.gockAC, err = mock.NewClient(creds)
|
||||
|
||||
@ -143,12 +143,16 @@ func (suite *SitesIntgSuite) TestSites_GetByID() {
|
||||
var (
|
||||
t = suite.T()
|
||||
siteID = tconfig.M365SiteID(t)
|
||||
host = strings.Split(siteID, ",")[0]
|
||||
shortID = strings.TrimPrefix(siteID, host+",")
|
||||
parts = strings.Split(siteID, ",")
|
||||
uuids = siteID
|
||||
siteURL = tconfig.M365SiteURL(t)
|
||||
modifiedSiteURL = siteURL + "foo"
|
||||
)
|
||||
|
||||
if len(parts) == 3 {
|
||||
uuids = strings.Join(parts[1:], ",")
|
||||
}
|
||||
|
||||
sitesAPI := suite.its.ac.Sites()
|
||||
|
||||
table := []struct {
|
||||
@ -165,7 +169,7 @@ func (suite *SitesIntgSuite) TestSites_GetByID() {
|
||||
},
|
||||
{
|
||||
name: "2 part id",
|
||||
id: shortID,
|
||||
id: uuids,
|
||||
expectErr: func(t *testing.T, err error) {
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
@ -191,13 +195,6 @@ func (suite *SitesIntgSuite) TestSites_GetByID() {
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "host only",
|
||||
id: host,
|
||||
expectErr: func(t *testing.T, err error) {
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "malformed url",
|
||||
id: "barunihlda",
|
||||
|
||||
@ -329,7 +329,7 @@ func makeAC(
|
||||
return api.Client{}, clues.Wrap(err, "getting m365 account creds")
|
||||
}
|
||||
|
||||
cli, err := api.NewClient(creds, control.Defaults())
|
||||
cli, err := api.NewClient(creds, control.DefaultOptions())
|
||||
if err != nil {
|
||||
return api.Client{}, clues.Wrap(err, "constructing api client")
|
||||
}
|
||||
|
||||
@ -10,7 +10,7 @@ manner to a new folder. When you need more control over the results you can
|
||||
use the advanced configuration options to change where and how your data
|
||||
gets restored.
|
||||
|
||||
## Destination
|
||||
## Restore to target folder
|
||||
|
||||
The `--destination` flag lets you select the top-level folder where Corso will
|
||||
write all of the restored data.
|
||||
@ -18,7 +18,7 @@ write all of the restored data.
|
||||
### The default destination
|
||||
|
||||
<CodeBlock language="bash">{
|
||||
`corso restore onedrive --backup abcd`
|
||||
`corso restore onedrive --backup a422895c-c20c-4b06-883d-b866db9f86ef`
|
||||
}</CodeBlock>
|
||||
|
||||
If the flag isn't provided, Corso will create a new folder with a standard name:
|
||||
@ -29,7 +29,7 @@ data integrity then this is always the safest option.
|
||||
### An alternate destination
|
||||
|
||||
<CodeBlock language="bash">{
|
||||
`corso restore onedrive --backup abcd --destination /my-latest-restore`
|
||||
`corso restore onedrive --destination /my-latest-restore --backup a422895c-c20c-4b06-883d-b866db9f86ef`
|
||||
}</CodeBlock>
|
||||
|
||||
When a destination is manually specified, all restored will appear in that top-level
|
||||
@ -41,14 +41,14 @@ folder multiple times.
|
||||
### The original location
|
||||
|
||||
<CodeBlock language="bash">{
|
||||
`corso restore onedrive --backup abcd --destination /`
|
||||
`corso restore onedrive --destination / --backup a422895c-c20c-4b06-883d-b866db9f86ef`
|
||||
}</CodeBlock>
|
||||
|
||||
You can restore items back to their original location by setting the destination
|
||||
to `/`. This skips the creation of a top-level folder, and all restored items will
|
||||
appear back in their location at the time of backup.
|
||||
|
||||
### Limitations
|
||||
### Destination Limitations
|
||||
|
||||
* Destination won't create N-depth folder structures. `--destination a/b/c`
|
||||
doesn't create three folders; it creates a single, top-level folder named `a/b/c`.
|
||||
@ -79,19 +79,19 @@ it still collides.
|
||||
Collisions can be handled with three different configurations: `Skip`, `Copy`,
|
||||
and `Replace`.
|
||||
|
||||
## Skip (default)
|
||||
### Skip (default)
|
||||
|
||||
<CodeBlock language="bash">{
|
||||
`corso restore onedrive --backup abcd --collisions skip --destination /`
|
||||
`corso restore onedrive --collisions skip --destination / --backup a422895c-c20c-4b06-883d-b866db9f86ef`
|
||||
}</CodeBlock>
|
||||
|
||||
When a collision is identified, the item is skipped and
|
||||
no restore is attempted.
|
||||
|
||||
## Copy
|
||||
### Copy
|
||||
|
||||
<CodeBlock language="bash">{
|
||||
`corso restore onedrive --backup abcd --collisions copy --destination /my-latest-restore`
|
||||
`corso restore onedrive --collisions copy --destination /my-latest-restore --backup a422895c-c20c-4b06-883d-b866db9f86ef`
|
||||
}</CodeBlock>
|
||||
|
||||
Item collisions create a copy of the item in the backup. The copy holds the backup
|
||||
@ -99,12 +99,31 @@ version of the item, leaving the current version unchanged. If necessary, change
|
||||
item properties (such as filenames) to avoid additional collisions. Eg:
|
||||
the copy of`reports.txt` is named `reports 1.txt`.
|
||||
|
||||
## Replace
|
||||
### Replace
|
||||
|
||||
<CodeBlock language="bash">{
|
||||
`corso restore onedrive --backup abcd --collisions replace --destination /`
|
||||
`corso restore onedrive --collisions replace --destination / --backup a422895c-c20c-4b06-883d-b866db9f86ef`
|
||||
}</CodeBlock>
|
||||
|
||||
Collisions will entirely replace the current version of the item with the backup
|
||||
version. If multiple existing items collide with the backup item, only one of the
|
||||
existing items is replaced.
|
||||
|
||||
## Restore to target resource
|
||||
|
||||
The `--to-resource` flag lets you select which resource will receive the restored data.
|
||||
A resource can be a mailbox, user, sharepoint site, or other owner of data.
|
||||
|
||||
When restoring to a target resource, all other restore configuration behaves normally.
|
||||
Data is restored into the default folder: `Corso_Restore_<current-date-time>` (unless a
|
||||
`--destination` flag is added). When restoring in-place, collision policies are followed.
|
||||
|
||||
<CodeBlock language="bash">{
|
||||
`corso restore onedrive --to-resource adelev@alcion.ai --backup a422895c-c20c-4b06-883d-b866db9f86ef`
|
||||
}</CodeBlock>
|
||||
|
||||
### Resource Limitations
|
||||
|
||||
* The resource must exist. Corso won't create new mailboxes, users, or sites.
|
||||
* The resource must have access to the service being restored. No restore will be
|
||||
performed for an unlicensed resource.
|
||||
|
||||
144
website/package-lock.json
generated
144
website/package-lock.json
generated
@ -20,7 +20,7 @@
|
||||
"feather-icons": "^4.29.0",
|
||||
"jarallax": "^2.1.3",
|
||||
"mdx-mermaid": "^1.3.2",
|
||||
"mermaid": "^10.2.4",
|
||||
"mermaid": "^10.3.0",
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
@ -3377,6 +3377,24 @@
|
||||
"@types/node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/d3-scale": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.3.tgz",
|
||||
"integrity": "sha512-PATBiMCpvHJSMtZAMEhc2WyL+hnzarKzI6wAHYjhsonjWJYGq5BXTzQjv4l8m2jO183/4wZ90rKvSeT7o72xNQ==",
|
||||
"dependencies": {
|
||||
"@types/d3-time": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/d3-scale-chromatic": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz",
|
||||
"integrity": "sha512-dsoJGEIShosKVRBZB0Vo3C8nqSDqVGujJU6tPznsBJxNJNwMF8utmS83nvCBKQYPpjCzaaHcrf66iTRpZosLPw=="
|
||||
},
|
||||
"node_modules/@types/d3-time": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz",
|
||||
"integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg=="
|
||||
},
|
||||
"node_modules/@types/debug": {
|
||||
"version": "4.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.8.tgz",
|
||||
@ -6079,6 +6097,41 @@
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-sankey": {
|
||||
"version": "0.12.3",
|
||||
"resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz",
|
||||
"integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==",
|
||||
"dependencies": {
|
||||
"d3-array": "1 - 2",
|
||||
"d3-shape": "^1.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-sankey/node_modules/d3-array": {
|
||||
"version": "2.12.1",
|
||||
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz",
|
||||
"integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==",
|
||||
"dependencies": {
|
||||
"internmap": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-sankey/node_modules/d3-path": {
|
||||
"version": "1.0.9",
|
||||
"resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz",
|
||||
"integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg=="
|
||||
},
|
||||
"node_modules/d3-sankey/node_modules/d3-shape": {
|
||||
"version": "1.3.7",
|
||||
"resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz",
|
||||
"integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==",
|
||||
"dependencies": {
|
||||
"d3-path": "1"
|
||||
}
|
||||
},
|
||||
"node_modules/d3-sankey/node_modules/internmap": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz",
|
||||
"integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw=="
|
||||
},
|
||||
"node_modules/d3-scale": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
|
||||
@ -6612,9 +6665,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/dompurify": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.3.tgz",
|
||||
"integrity": "sha512-axQ9zieHLnAnHh0sfAamKYiqXMJAVwu+LM/alQ7WDagoWessyWvMSFyW65CqF3owufNu8HBcE4cM2Vflu7YWcQ=="
|
||||
"version": "3.0.5",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.5.tgz",
|
||||
"integrity": "sha512-F9e6wPGtY+8KNMRAVfxeCOHU0/NPWMSENNq4pQctuXRqqdEPW7q3CrLbR5Nse044WwacyjHGOMlvNsBe1y6z9A=="
|
||||
},
|
||||
"node_modules/domutils": {
|
||||
"version": "2.8.0",
|
||||
@ -9279,18 +9332,21 @@
|
||||
}
|
||||
},
|
||||
"node_modules/mermaid": {
|
||||
"version": "10.2.4",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.2.4.tgz",
|
||||
"integrity": "sha512-zHGjEI7lBvWZX+PQYmlhSA2p40OzW6QbGodTCSzDeVpqaTnyAC+2sRGqrpXO+uQk3CnoeClHQPraQUMStdqy2g==",
|
||||
"version": "10.3.0",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.3.0.tgz",
|
||||
"integrity": "sha512-H5quxuQjwXC8M1WuuzhAp2TdqGg74t5skfDBrNKJ7dt3z8Wprl5S6h9VJsRhoBUTSs1TMtHEdplLhCqXleZZLw==",
|
||||
"dependencies": {
|
||||
"@braintree/sanitize-url": "^6.0.2",
|
||||
"@types/d3-scale": "^4.0.3",
|
||||
"@types/d3-scale-chromatic": "^3.0.0",
|
||||
"cytoscape": "^3.23.0",
|
||||
"cytoscape-cose-bilkent": "^4.1.0",
|
||||
"cytoscape-fcose": "^2.1.0",
|
||||
"d3": "^7.4.0",
|
||||
"d3-sankey": "^0.12.3",
|
||||
"dagre-d3-es": "7.0.10",
|
||||
"dayjs": "^1.11.7",
|
||||
"dompurify": "3.0.3",
|
||||
"dompurify": "3.0.5",
|
||||
"elkjs": "^0.8.2",
|
||||
"khroma": "^2.0.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
@ -17620,6 +17676,24 @@
|
||||
"@types/node": "*"
|
||||
}
|
||||
},
|
||||
"@types/d3-scale": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.3.tgz",
|
||||
"integrity": "sha512-PATBiMCpvHJSMtZAMEhc2WyL+hnzarKzI6wAHYjhsonjWJYGq5BXTzQjv4l8m2jO183/4wZ90rKvSeT7o72xNQ==",
|
||||
"requires": {
|
||||
"@types/d3-time": "*"
|
||||
}
|
||||
},
|
||||
"@types/d3-scale-chromatic": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz",
|
||||
"integrity": "sha512-dsoJGEIShosKVRBZB0Vo3C8nqSDqVGujJU6tPznsBJxNJNwMF8utmS83nvCBKQYPpjCzaaHcrf66iTRpZosLPw=="
|
||||
},
|
||||
"@types/d3-time": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz",
|
||||
"integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg=="
|
||||
},
|
||||
"@types/debug": {
|
||||
"version": "4.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.8.tgz",
|
||||
@ -19542,6 +19616,43 @@
|
||||
"resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz",
|
||||
"integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ=="
|
||||
},
|
||||
"d3-sankey": {
|
||||
"version": "0.12.3",
|
||||
"resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz",
|
||||
"integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==",
|
||||
"requires": {
|
||||
"d3-array": "1 - 2",
|
||||
"d3-shape": "^1.2.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"d3-array": {
|
||||
"version": "2.12.1",
|
||||
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz",
|
||||
"integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==",
|
||||
"requires": {
|
||||
"internmap": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"d3-path": {
|
||||
"version": "1.0.9",
|
||||
"resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz",
|
||||
"integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg=="
|
||||
},
|
||||
"d3-shape": {
|
||||
"version": "1.3.7",
|
||||
"resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz",
|
||||
"integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==",
|
||||
"requires": {
|
||||
"d3-path": "1"
|
||||
}
|
||||
},
|
||||
"internmap": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz",
|
||||
"integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"d3-scale": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
|
||||
@ -19913,9 +20024,9 @@
|
||||
}
|
||||
},
|
||||
"dompurify": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.3.tgz",
|
||||
"integrity": "sha512-axQ9zieHLnAnHh0sfAamKYiqXMJAVwu+LM/alQ7WDagoWessyWvMSFyW65CqF3owufNu8HBcE4cM2Vflu7YWcQ=="
|
||||
"version": "3.0.5",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.5.tgz",
|
||||
"integrity": "sha512-F9e6wPGtY+8KNMRAVfxeCOHU0/NPWMSENNq4pQctuXRqqdEPW7q3CrLbR5Nse044WwacyjHGOMlvNsBe1y6z9A=="
|
||||
},
|
||||
"domutils": {
|
||||
"version": "2.8.0",
|
||||
@ -21729,18 +21840,21 @@
|
||||
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
|
||||
},
|
||||
"mermaid": {
|
||||
"version": "10.2.4",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.2.4.tgz",
|
||||
"integrity": "sha512-zHGjEI7lBvWZX+PQYmlhSA2p40OzW6QbGodTCSzDeVpqaTnyAC+2sRGqrpXO+uQk3CnoeClHQPraQUMStdqy2g==",
|
||||
"version": "10.3.0",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.3.0.tgz",
|
||||
"integrity": "sha512-H5quxuQjwXC8M1WuuzhAp2TdqGg74t5skfDBrNKJ7dt3z8Wprl5S6h9VJsRhoBUTSs1TMtHEdplLhCqXleZZLw==",
|
||||
"requires": {
|
||||
"@braintree/sanitize-url": "^6.0.2",
|
||||
"@types/d3-scale": "^4.0.3",
|
||||
"@types/d3-scale-chromatic": "^3.0.0",
|
||||
"cytoscape": "^3.23.0",
|
||||
"cytoscape-cose-bilkent": "^4.1.0",
|
||||
"cytoscape-fcose": "^2.1.0",
|
||||
"d3": "^7.4.0",
|
||||
"d3-sankey": "^0.12.3",
|
||||
"dagre-d3-es": "7.0.10",
|
||||
"dayjs": "^1.11.7",
|
||||
"dompurify": "3.0.3",
|
||||
"dompurify": "3.0.5",
|
||||
"elkjs": "^0.8.2",
|
||||
"khroma": "^2.0.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
"feather-icons": "^4.29.0",
|
||||
"jarallax": "^2.1.3",
|
||||
"mdx-mermaid": "^1.3.2",
|
||||
"mermaid": "^10.2.4",
|
||||
"mermaid": "^10.3.0",
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user