merge main
This commit is contained in:
commit
ed72e63e03
26
.github/workflows/weekly_cleanup.yml
vendored
26
.github/workflows/weekly_cleanup.yml
vendored
@ -1,26 +0,0 @@
|
|||||||
name: Weekly S3 Test Bucket Cleanup
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
# every saturday at 23:59 (11:59pm)
|
|
||||||
- cron: "59 23 * * 6"
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
# required to retrieve AWS credentials
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
S3-Test-Cleanup:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
environment: Testing
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Configure AWS credentials from Test account
|
|
||||||
uses: aws-actions/configure-aws-credentials@v2
|
|
||||||
with:
|
|
||||||
role-to-assume: ${{ secrets.AWS_IAM_ROLE }}
|
|
||||||
role-session-name: integration-testing
|
|
||||||
aws-region: us-east-1
|
|
||||||
|
|
||||||
- name: Delete all files in the test bucket
|
|
||||||
run: |
|
|
||||||
aws s3 rm s3://${{ secrets.CI_TESTS_S3_BUCKET }} --recursive --include "*" --exclude "longevity/*"
|
|
||||||
@ -91,6 +91,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
flags.AddFetchParallelismFlag(c)
|
flags.AddFetchParallelismFlag(c)
|
||||||
flags.AddFailFastFlag(c)
|
flags.AddFailFastFlag(c)
|
||||||
flags.AddDisableIncrementalsFlag(c)
|
flags.AddDisableIncrementalsFlag(c)
|
||||||
|
flags.AddForceItemDataDownloadFlag(c)
|
||||||
flags.AddDisableDeltaFlag(c)
|
flags.AddDisableDeltaFlag(c)
|
||||||
flags.AddEnableImmutableIDFlag(c)
|
flags.AddEnableImmutableIDFlag(c)
|
||||||
flags.AddDisableConcurrencyLimiterFlag(c)
|
flags.AddDisableConcurrencyLimiterFlag(c)
|
||||||
|
|||||||
@ -78,6 +78,7 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
|
|
||||||
flags.AddFailFastFlag(c)
|
flags.AddFailFastFlag(c)
|
||||||
flags.AddDisableIncrementalsFlag(c)
|
flags.AddDisableIncrementalsFlag(c)
|
||||||
|
flags.AddForceItemDataDownloadFlag(c)
|
||||||
|
|
||||||
case listCommand:
|
case listCommand:
|
||||||
c, fs = utils.AddCommand(cmd, oneDriveListCmd())
|
c, fs = utils.AddCommand(cmd, oneDriveListCmd())
|
||||||
|
|||||||
@ -93,6 +93,7 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
|||||||
flags.AddDataFlag(c, []string{dataLibraries}, true)
|
flags.AddDataFlag(c, []string{dataLibraries}, true)
|
||||||
flags.AddFailFastFlag(c)
|
flags.AddFailFastFlag(c)
|
||||||
flags.AddDisableIncrementalsFlag(c)
|
flags.AddDisableIncrementalsFlag(c)
|
||||||
|
flags.AddForceItemDataDownloadFlag(c)
|
||||||
|
|
||||||
case listCommand:
|
case listCommand:
|
||||||
c, fs = utils.AddCommand(cmd, sharePointListCmd())
|
c, fs = utils.AddCommand(cmd, sharePointListCmd())
|
||||||
|
|||||||
@ -163,12 +163,11 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
site []string
|
site []string
|
||||||
weburl []string
|
weburl []string
|
||||||
data []string
|
data []string
|
||||||
expect []string
|
expect []string
|
||||||
expectScopesLen int
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no sites or urls",
|
name: "no sites or urls",
|
||||||
@ -181,63 +180,54 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() {
|
|||||||
expect: selectors.None(),
|
expect: selectors.None(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "site wildcard",
|
name: "site wildcard",
|
||||||
site: []string{flags.Wildcard},
|
site: []string{flags.Wildcard},
|
||||||
expect: bothIDs,
|
expect: bothIDs,
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "url wildcard",
|
name: "url wildcard",
|
||||||
weburl: []string{flags.Wildcard},
|
weburl: []string{flags.Wildcard},
|
||||||
expect: bothIDs,
|
expect: bothIDs,
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "sites",
|
name: "sites",
|
||||||
site: []string{id1, id2},
|
site: []string{id1, id2},
|
||||||
expect: []string{id1, id2},
|
expect: []string{id1, id2},
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "urls",
|
name: "urls",
|
||||||
weburl: []string{url1, url2},
|
weburl: []string{url1, url2},
|
||||||
expect: []string{url1, url2},
|
expect: []string{url1, url2},
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mix sites and urls",
|
name: "mix sites and urls",
|
||||||
site: []string{id1},
|
site: []string{id1},
|
||||||
weburl: []string{url2},
|
weburl: []string{url2},
|
||||||
expect: []string{id1, url2},
|
expect: []string{id1, url2},
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "duplicate sites and urls",
|
name: "duplicate sites and urls",
|
||||||
site: []string{id1, id2},
|
site: []string{id1, id2},
|
||||||
weburl: []string{url1, url2},
|
weburl: []string{url1, url2},
|
||||||
expect: []string{id1, id2, url1, url2},
|
expect: []string{id1, id2, url1, url2},
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unnecessary site wildcard",
|
name: "unnecessary site wildcard",
|
||||||
site: []string{id1, flags.Wildcard},
|
site: []string{id1, flags.Wildcard},
|
||||||
weburl: []string{url1, url2},
|
weburl: []string{url1, url2},
|
||||||
expect: bothIDs,
|
expect: bothIDs,
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unnecessary url wildcard",
|
name: "unnecessary url wildcard",
|
||||||
site: []string{id1},
|
site: []string{id1},
|
||||||
weburl: []string{url1, flags.Wildcard},
|
weburl: []string{url1, flags.Wildcard},
|
||||||
expect: bothIDs,
|
expect: bothIDs,
|
||||||
expectScopesLen: 2,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Pages",
|
name: "Pages",
|
||||||
site: bothIDs,
|
site: bothIDs,
|
||||||
data: []string{dataPages},
|
data: []string{dataPages},
|
||||||
expect: bothIDs,
|
expect: bothIDs,
|
||||||
expectScopesLen: 1,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
@ -249,7 +239,7 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() {
|
|||||||
|
|
||||||
sel, err := sharePointBackupCreateSelectors(ctx, ins, test.site, test.weburl, test.data)
|
sel, err := sharePointBackupCreateSelectors(ctx, ins, test.site, test.weburl, test.data)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.ElementsMatch(t, test.expect, sel.DiscreteResourceOwners())
|
assert.ElementsMatch(t, test.expect, sel.ResourceOwners.Targets)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,6 +9,7 @@ const (
|
|||||||
DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
|
DisableConcurrencyLimiterFN = "disable-concurrency-limiter"
|
||||||
DisableDeltaFN = "disable-delta"
|
DisableDeltaFN = "disable-delta"
|
||||||
DisableIncrementalsFN = "disable-incrementals"
|
DisableIncrementalsFN = "disable-incrementals"
|
||||||
|
ForceItemDataDownloadFN = "force-item-data-download"
|
||||||
EnableImmutableIDFN = "enable-immutable-id"
|
EnableImmutableIDFN = "enable-immutable-id"
|
||||||
FailFastFN = "fail-fast"
|
FailFastFN = "fail-fast"
|
||||||
FailedItemsFN = "failed-items"
|
FailedItemsFN = "failed-items"
|
||||||
@ -26,6 +27,7 @@ var (
|
|||||||
DisableConcurrencyLimiterFV bool
|
DisableConcurrencyLimiterFV bool
|
||||||
DisableDeltaFV bool
|
DisableDeltaFV bool
|
||||||
DisableIncrementalsFV bool
|
DisableIncrementalsFV bool
|
||||||
|
ForceItemDataDownloadFV bool
|
||||||
EnableImmutableIDFV bool
|
EnableImmutableIDFV bool
|
||||||
FailFastFV bool
|
FailFastFV bool
|
||||||
FetchParallelismFV int
|
FetchParallelismFV int
|
||||||
@ -110,6 +112,19 @@ func AddDisableIncrementalsFlag(cmd *cobra.Command) {
|
|||||||
cobra.CheckErr(fs.MarkHidden(DisableIncrementalsFN))
|
cobra.CheckErr(fs.MarkHidden(DisableIncrementalsFN))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Adds the hidden '--force-item-data-download' cli flag which, when set,
|
||||||
|
// disables kopia-assisted incremental backups.
|
||||||
|
func AddForceItemDataDownloadFlag(cmd *cobra.Command) {
|
||||||
|
fs := cmd.Flags()
|
||||||
|
fs.BoolVar(
|
||||||
|
&ForceItemDataDownloadFV,
|
||||||
|
ForceItemDataDownloadFN,
|
||||||
|
false,
|
||||||
|
"Disable cached data checks in backups to force item redownloads for "+
|
||||||
|
"items changed since the last successful backup.")
|
||||||
|
cobra.CheckErr(fs.MarkHidden(ForceItemDataDownloadFN))
|
||||||
|
}
|
||||||
|
|
||||||
// Adds the hidden '--disable-delta' cli flag which, when set, disables
|
// Adds the hidden '--disable-delta' cli flag which, when set, disables
|
||||||
// delta based backups.
|
// delta based backups.
|
||||||
func AddDisableDeltaFlag(cmd *cobra.Command) {
|
func AddDisableDeltaFlag(cmd *cobra.Command) {
|
||||||
|
|||||||
@ -23,6 +23,7 @@ func Control() control.Options {
|
|||||||
opt.DisableMetrics = flags.NoStatsFV
|
opt.DisableMetrics = flags.NoStatsFV
|
||||||
opt.SkipReduce = flags.SkipReduceFV
|
opt.SkipReduce = flags.SkipReduceFV
|
||||||
opt.ToggleFeatures.DisableIncrementals = flags.DisableIncrementalsFV
|
opt.ToggleFeatures.DisableIncrementals = flags.DisableIncrementalsFV
|
||||||
|
opt.ToggleFeatures.ForceItemDataDownload = flags.ForceItemDataDownloadFV
|
||||||
opt.ToggleFeatures.DisableDelta = flags.DisableDeltaFV
|
opt.ToggleFeatures.DisableDelta = flags.DisableDeltaFV
|
||||||
opt.ToggleFeatures.ExchangeImmutableIDs = flags.EnableImmutableIDFV
|
opt.ToggleFeatures.ExchangeImmutableIDs = flags.EnableImmutableIDFV
|
||||||
opt.ToggleFeatures.DisableConcurrencyLimiter = flags.DisableConcurrencyLimiterFV
|
opt.ToggleFeatures.DisableConcurrencyLimiter = flags.DisableConcurrencyLimiterFV
|
||||||
|
|||||||
@ -29,6 +29,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
|
|||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
assert.True(t, flags.FailFastFV, flags.FailFastFN)
|
assert.True(t, flags.FailFastFV, flags.FailFastFN)
|
||||||
assert.True(t, flags.DisableIncrementalsFV, flags.DisableIncrementalsFN)
|
assert.True(t, flags.DisableIncrementalsFV, flags.DisableIncrementalsFN)
|
||||||
|
assert.True(t, flags.ForceItemDataDownloadFV, flags.ForceItemDataDownloadFN)
|
||||||
assert.True(t, flags.DisableDeltaFV, flags.DisableDeltaFN)
|
assert.True(t, flags.DisableDeltaFV, flags.DisableDeltaFN)
|
||||||
assert.True(t, flags.NoStatsFV, flags.NoStatsFN)
|
assert.True(t, flags.NoStatsFV, flags.NoStatsFN)
|
||||||
assert.True(t, flags.RestorePermissionsFV, flags.RestorePermissionsFN)
|
assert.True(t, flags.RestorePermissionsFV, flags.RestorePermissionsFN)
|
||||||
@ -44,6 +45,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
|
|||||||
|
|
||||||
flags.AddFailFastFlag(cmd)
|
flags.AddFailFastFlag(cmd)
|
||||||
flags.AddDisableIncrementalsFlag(cmd)
|
flags.AddDisableIncrementalsFlag(cmd)
|
||||||
|
flags.AddForceItemDataDownloadFlag(cmd)
|
||||||
flags.AddDisableDeltaFlag(cmd)
|
flags.AddDisableDeltaFlag(cmd)
|
||||||
flags.AddRestorePermissionsFlag(cmd)
|
flags.AddRestorePermissionsFlag(cmd)
|
||||||
flags.AddSkipReduceFlag(cmd)
|
flags.AddSkipReduceFlag(cmd)
|
||||||
@ -56,6 +58,7 @@ func (suite *OptionsUnitSuite) TestAddExchangeCommands() {
|
|||||||
"test",
|
"test",
|
||||||
"--" + flags.FailFastFN,
|
"--" + flags.FailFastFN,
|
||||||
"--" + flags.DisableIncrementalsFN,
|
"--" + flags.DisableIncrementalsFN,
|
||||||
|
"--" + flags.ForceItemDataDownloadFN,
|
||||||
"--" + flags.DisableDeltaFN,
|
"--" + flags.DisableDeltaFN,
|
||||||
"--" + flags.NoStatsFN,
|
"--" + flags.NoStatsFN,
|
||||||
"--" + flags.RestorePermissionsFN,
|
"--" + flags.RestorePermissionsFN,
|
||||||
|
|||||||
@ -17,9 +17,9 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/str"
|
"github.com/alcionai/corso/src/internal/common/str"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365"
|
"github.com/alcionai/corso/src/internal/m365"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
|
||||||
odStub "github.com/alcionai/corso/src/internal/m365/onedrive/stub"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||||
|
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
|
odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
|
||||||
m365Stub "github.com/alcionai/corso/src/internal/m365/stub"
|
m365Stub "github.com/alcionai/corso/src/internal/m365/stub"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
|||||||
@ -5,8 +5,8 @@ import (
|
|||||||
|
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||||
|
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/count"
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
|||||||
12
src/go.mod
12
src/go.mod
@ -8,7 +8,7 @@ require (
|
|||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||||
github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4
|
github.com/alcionai/clues v0.0.0-20230728164842-7dc4795a43e4
|
||||||
github.com/armon/go-metrics v0.4.1
|
github.com/armon/go-metrics v0.4.1
|
||||||
github.com/aws/aws-sdk-go v1.44.316
|
github.com/aws/aws-sdk-go v1.44.320
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1
|
github.com/cenkalti/backoff/v4 v4.2.1
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
@ -35,7 +35,7 @@ require (
|
|||||||
go.uber.org/zap v1.25.0
|
go.uber.org/zap v1.25.0
|
||||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
|
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
|
||||||
golang.org/x/time v0.3.0
|
golang.org/x/time v0.3.0
|
||||||
golang.org/x/tools v0.11.1
|
golang.org/x/tools v0.12.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -116,12 +116,12 @@ require (
|
|||||||
go.opentelemetry.io/otel v1.16.0 // indirect
|
go.opentelemetry.io/otel v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.11.0 // indirect
|
golang.org/x/crypto v0.12.0 // indirect
|
||||||
golang.org/x/mod v0.12.0 // indirect
|
golang.org/x/mod v0.12.0 // indirect
|
||||||
golang.org/x/net v0.13.0
|
golang.org/x/net v0.14.0
|
||||||
golang.org/x/sync v0.3.0 // indirect
|
golang.org/x/sync v0.3.0 // indirect
|
||||||
golang.org/x/sys v0.10.0 // indirect
|
golang.org/x/sys v0.11.0 // indirect
|
||||||
golang.org/x/text v0.11.0 // indirect
|
golang.org/x/text v0.12.0 // indirect
|
||||||
google.golang.org/grpc v1.57.0 // indirect
|
google.golang.org/grpc v1.57.0 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
|
|||||||
24
src/go.sum
24
src/go.sum
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/
|
|||||||
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
github.com/aws/aws-sdk-go v1.44.316 h1:UC3alCEyzj2XU13ZFGIOHW3yjCNLGTIGVauyetl9fwE=
|
github.com/aws/aws-sdk-go v1.44.320 h1:o2cno15HVUYj+IAgZHJ5No6ifAxwa2HcluzahMEPfOw=
|
||||||
github.com/aws/aws-sdk-go v1.44.316/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.320/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||||
@ -448,8 +448,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@ -524,8 +524,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||||
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
|
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -597,8 +597,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@ -611,8 +611,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
@ -666,8 +666,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
|||||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.11.1 h1:ojD5zOW8+7dOGzdnNgersm8aPfcDjhMp12UfG93NIMc=
|
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
|
||||||
golang.org/x/tools v0.11.1/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
|||||||
228
src/internal/common/readers/retry_handler.go
Normal file
228
src/internal/common/readers/retry_handler.go
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
package readers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ io.ReadCloser = &resetRetryHandler{}
|
||||||
|
|
||||||
|
const (
|
||||||
|
minSleepTime = 3
|
||||||
|
numMaxRetries = 3
|
||||||
|
rangeHeaderKey = "Range"
|
||||||
|
// One-sided range like this is defined as starting at the given byte and
|
||||||
|
// extending to the end of the item.
|
||||||
|
rangeHeaderOneSidedValueTmpl = "bytes=%d-"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Could make this per wrapper instance if we need additional flexibility
|
||||||
|
// between callers.
|
||||||
|
var retryErrs = []error{
|
||||||
|
syscall.ECONNRESET,
|
||||||
|
}
|
||||||
|
|
||||||
|
type Getter interface {
|
||||||
|
// SupportsRange returns true if this Getter supports adding Range headers to
|
||||||
|
// the Get call. Otherwise returns false.
|
||||||
|
SupportsRange() bool
|
||||||
|
// Get attempts to get another reader for the data this reader is returning.
|
||||||
|
// headers denotes any additional headers that should be added to the request,
|
||||||
|
// like a Range header.
|
||||||
|
//
|
||||||
|
// Don't allow passing a URL to Get so that we can hide the fact that some
|
||||||
|
// components may need to dynamically refresh the fetch URL (i.e. OneDrive)
|
||||||
|
// from this wrapper.
|
||||||
|
//
|
||||||
|
// Get should encapsulate all error handling and status code checking required
|
||||||
|
// for the component. This function is called both during NewResetRetryHandler
|
||||||
|
// and Read so it's possible to discover errors with the item prior to
|
||||||
|
// informing other components about it if desired.
|
||||||
|
Get(ctx context.Context, headers map[string]string) (io.ReadCloser, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewResetRetryHandler returns an io.ReadCloser with the reader initialized to
|
||||||
|
// the result of getter. The reader is eagerly initialized during this call so
|
||||||
|
// if callers of this function want to delay initialization they should wrap
|
||||||
|
// this reader in a lazy initializer.
|
||||||
|
//
|
||||||
|
// Selected errors that the reader hits during Read calls (e.x.
|
||||||
|
// syscall.ECONNRESET) will be automatically retried by the returned reader.
|
||||||
|
func NewResetRetryHandler(
|
||||||
|
ctx context.Context,
|
||||||
|
getter Getter,
|
||||||
|
) (*resetRetryHandler, error) {
|
||||||
|
rrh := &resetRetryHandler{
|
||||||
|
ctx: ctx,
|
||||||
|
getter: getter,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry logic encapsulated in reconnect so no need for it here.
|
||||||
|
_, err := rrh.reconnect(numMaxRetries)
|
||||||
|
|
||||||
|
return rrh, clues.Wrap(err, "initializing reader").OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:unused
|
||||||
|
type resetRetryHandler struct {
|
||||||
|
ctx context.Context
|
||||||
|
getter Getter
|
||||||
|
innerReader io.ReadCloser
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRetriable(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range retryErrs {
|
||||||
|
if errors.Is(err, e) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rrh *resetRetryHandler) Read(p []byte) (int, error) {
|
||||||
|
if rrh.innerReader == nil {
|
||||||
|
return 0, clues.New("not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Use separate error variable just to make other assignments in the loop a
|
||||||
|
// bit cleaner.
|
||||||
|
finalErr error
|
||||||
|
read int
|
||||||
|
numRetries int
|
||||||
|
)
|
||||||
|
|
||||||
|
// Still need to check retry count in loop header so we don't go through one
|
||||||
|
// last time after failing to reconnect due to exhausting retries.
|
||||||
|
for numRetries < numMaxRetries {
|
||||||
|
n, err := rrh.innerReader.Read(p[read:])
|
||||||
|
rrh.offset = rrh.offset + int64(n)
|
||||||
|
read = read + n
|
||||||
|
|
||||||
|
// Catch short reads with no error and errors we don't know how to retry.
|
||||||
|
if !isRetriable(err) {
|
||||||
|
// Not everything knows how to handle a wrapped version of EOF (including
|
||||||
|
// io.ReadAll) so return the error itself here.
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Log info about the error, but only if it's not directly an EOF.
|
||||||
|
// Otherwise this can be rather chatty and annoying to filter out.
|
||||||
|
if err != io.EOF {
|
||||||
|
logger.CtxErr(rrh.ctx, err).Debug("dropping wrapped io.EOF")
|
||||||
|
}
|
||||||
|
|
||||||
|
return read, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
return read, clues.Stack(err).WithClues(rrh.ctx).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(rrh.ctx).Infow(
|
||||||
|
"restarting reader",
|
||||||
|
"supports_range", rrh.getter.SupportsRange(),
|
||||||
|
"restart_at_offset", rrh.offset,
|
||||||
|
"retries_remaining", numMaxRetries-numRetries,
|
||||||
|
"retriable_error", err)
|
||||||
|
|
||||||
|
attempts, err := rrh.reconnect(numMaxRetries - numRetries)
|
||||||
|
numRetries = numRetries + attempts
|
||||||
|
finalErr = err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We couln't read anything through all the retries but never had an error
|
||||||
|
// getting another reader. Report this as an error so we don't get stuck in an
|
||||||
|
// infinite loop.
|
||||||
|
if read == 0 && finalErr == nil && numRetries >= numMaxRetries {
|
||||||
|
finalErr = clues.Wrap(io.ErrNoProgress, "unable to read data")
|
||||||
|
}
|
||||||
|
|
||||||
|
return read, clues.Stack(finalErr).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// reconnect attempts to get another instance of the underlying reader and set
|
||||||
|
// the reader to pickup where the previous reader left off.
|
||||||
|
//
|
||||||
|
// Since this function can be called by functions that also implement retries on
|
||||||
|
// read errors pass an int in to denote how many times to attempt to reconnect.
|
||||||
|
// This avoids mulplicative retries when called from other functions.
|
||||||
|
func (rrh *resetRetryHandler) reconnect(maxRetries int) (int, error) {
|
||||||
|
var (
|
||||||
|
attempts int
|
||||||
|
skip = rrh.offset
|
||||||
|
headers = map[string]string{}
|
||||||
|
// This is annoying but we want the equivalent of a do-while loop.
|
||||||
|
err = retryErrs[0]
|
||||||
|
)
|
||||||
|
|
||||||
|
// Only set the range header if we've already read data. Otherwise we could
|
||||||
|
// get 416 (range not satisfiable) if the file is empty.
|
||||||
|
if rrh.getter.SupportsRange() && rrh.offset > 0 {
|
||||||
|
headers[rangeHeaderKey] = fmt.Sprintf(
|
||||||
|
rangeHeaderOneSidedValueTmpl,
|
||||||
|
rrh.offset)
|
||||||
|
skip = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := clues.Add(
|
||||||
|
rrh.ctx,
|
||||||
|
"supports_range", rrh.getter.SupportsRange(),
|
||||||
|
"restart_at_offset", rrh.offset)
|
||||||
|
|
||||||
|
for attempts < maxRetries && isRetriable(err) {
|
||||||
|
// Attempts will be 0 the first time through so it won't sleep then.
|
||||||
|
time.Sleep(time.Duration(attempts*minSleepTime) * time.Second)
|
||||||
|
|
||||||
|
attempts++
|
||||||
|
|
||||||
|
var r io.ReadCloser
|
||||||
|
|
||||||
|
r, err = rrh.getter.Get(ctx, headers)
|
||||||
|
if err != nil {
|
||||||
|
err = clues.Wrap(err, "retrying connection").
|
||||||
|
WithClues(ctx).
|
||||||
|
With("attempt_num", attempts)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if rrh.innerReader != nil {
|
||||||
|
rrh.innerReader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
rrh.innerReader = r
|
||||||
|
|
||||||
|
// If we can't request a specific range of content then read as many bytes
|
||||||
|
// as we've already processed into the equivalent of /dev/null so that the
|
||||||
|
// next read will get content we haven't seen before.
|
||||||
|
if skip > 0 {
|
||||||
|
_, err = io.CopyN(io.Discard, rrh.innerReader, skip)
|
||||||
|
if err != nil {
|
||||||
|
err = clues.Wrap(err, "seeking to correct offset").
|
||||||
|
WithClues(ctx).
|
||||||
|
With("attempt_num", attempts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return attempts, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rrh *resetRetryHandler) Close() error {
|
||||||
|
err := rrh.innerReader.Close()
|
||||||
|
rrh.innerReader = nil
|
||||||
|
|
||||||
|
return clues.Stack(err).OrNil()
|
||||||
|
}
|
||||||
477
src/internal/common/readers/retry_handler_test.go
Normal file
477
src/internal/common/readers/retry_handler_test.go
Normal file
@ -0,0 +1,477 @@
|
|||||||
|
package readers_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
)
|
||||||
|
|
||||||
|
type readResp struct {
|
||||||
|
read int
|
||||||
|
// sticky denotes whether the error should continue to be returned until reset
|
||||||
|
// is called.
|
||||||
|
sticky bool
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockReader struct {
|
||||||
|
r io.Reader
|
||||||
|
data []byte
|
||||||
|
// Associate return values for Read with calls. Allows partial reads as well.
|
||||||
|
// If a value for a particular read call is not in the map that means
|
||||||
|
// completing the request completely with no errors (i.e. all bytes requested
|
||||||
|
// are returned or as many as possible and EOF).
|
||||||
|
resps map[int]readResp
|
||||||
|
callCount int
|
||||||
|
stickyErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *mockReader) Read(p []byte) (int, error) {
|
||||||
|
defer func() {
|
||||||
|
mr.callCount++
|
||||||
|
}()
|
||||||
|
|
||||||
|
if mr.r == nil {
|
||||||
|
mr.reset(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mr.stickyErr != nil {
|
||||||
|
return 0, clues.Wrap(mr.stickyErr, "sticky error")
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, ok := mr.resps[mr.callCount]
|
||||||
|
if !ok {
|
||||||
|
n, err := mr.r.Read(p)
|
||||||
|
return n, clues.Stack(err).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := mr.r.Read(p[:resp.read])
|
||||||
|
|
||||||
|
if resp.err != nil {
|
||||||
|
if resp.sticky {
|
||||||
|
mr.stickyErr = resp.err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, clues.Stack(resp.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, clues.Stack(err).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *mockReader) reset(n int) {
|
||||||
|
mr.r = bytes.NewBuffer(mr.data[n:])
|
||||||
|
mr.stickyErr = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type getterResp struct {
|
||||||
|
offset int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockGetter struct {
|
||||||
|
t *testing.T
|
||||||
|
supportsRange bool
|
||||||
|
reader *mockReader
|
||||||
|
resps map[int]getterResp
|
||||||
|
expectHeaders map[int]map[string]string
|
||||||
|
callCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mg *mockGetter) SupportsRange() bool {
|
||||||
|
return mg.supportsRange
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mg *mockGetter) Get(
|
||||||
|
ctx context.Context,
|
||||||
|
headers map[string]string,
|
||||||
|
) (io.ReadCloser, error) {
|
||||||
|
defer func() {
|
||||||
|
mg.callCount++
|
||||||
|
}()
|
||||||
|
|
||||||
|
expectHeaders := mg.expectHeaders[mg.callCount]
|
||||||
|
if expectHeaders == nil {
|
||||||
|
expectHeaders = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(mg.t, expectHeaders, headers)
|
||||||
|
|
||||||
|
resp := mg.resps[mg.callCount]
|
||||||
|
|
||||||
|
if resp.offset >= 0 {
|
||||||
|
mg.reader.reset(resp.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
return io.NopCloser(mg.reader), clues.Stack(resp.err).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResetRetryHandlerUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResetRetryHandlerUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &ResetRetryHandlerUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *ResetRetryHandlerUnitSuite) TestResetRetryHandler() {
|
||||||
|
data := []byte("abcdefghijklmnopqrstuvwxyz")
|
||||||
|
// Pick a smaller read size so we can see how things will act if we have a
|
||||||
|
// "chunked" set of data.
|
||||||
|
readSize := 4
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
supportsRange bool
|
||||||
|
// 0th entry is the return data when trying to initialize the wrapper.
|
||||||
|
getterResps map[int]getterResp
|
||||||
|
// 0th entry is the return data when trying to initialize the wrapper.
|
||||||
|
getterExpectHeaders map[int]map[string]string
|
||||||
|
readerResps map[int]readResp
|
||||||
|
expectData []byte
|
||||||
|
expectErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "OnlyFirstGetErrors NoRangeSupport",
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
0: {
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OnlyFirstReadErrors RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
0: {
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ErrorInMiddle NoRangeSupport",
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ErrorInMiddle RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: 12},
|
||||||
|
},
|
||||||
|
getterExpectHeaders: map[int]map[string]string{
|
||||||
|
1: {"Range": "bytes=12-"},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleErrorsInMiddle NoRangeSupport",
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
7: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleErrorsInMiddle RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: 12},
|
||||||
|
2: {offset: 20},
|
||||||
|
},
|
||||||
|
getterExpectHeaders: map[int]map[string]string{
|
||||||
|
1: {"Range": "bytes=12-"},
|
||||||
|
2: {"Range": "bytes=20-"},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
6: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ShortReadWithError NoRangeSupport",
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: readSize / 2,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ShortReadWithError RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: 14},
|
||||||
|
},
|
||||||
|
getterExpectHeaders: map[int]map[string]string{
|
||||||
|
1: {"Range": "bytes=14-"},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: readSize / 2,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ErrorAtEndOfRead NoRangeSupport",
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: readSize,
|
||||||
|
sticky: true,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ErrorAtEndOfRead RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: 16},
|
||||||
|
},
|
||||||
|
getterExpectHeaders: map[int]map[string]string{
|
||||||
|
1: {"Range": "bytes=16-"},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: readSize,
|
||||||
|
sticky: true,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "UnexpectedError NoRangeSupport",
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: assert.AnError,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data[:12],
|
||||||
|
expectErr: assert.AnError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "UnexpectedError RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: 12},
|
||||||
|
},
|
||||||
|
getterExpectHeaders: map[int]map[string]string{
|
||||||
|
1: {"Range": "bytes=12-"},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: assert.AnError,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data[:12],
|
||||||
|
expectErr: assert.AnError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ErrorWhileSeeking NoRangeSupport",
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
4: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ShortReadNoError NoRangeSupport",
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: readSize / 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ShortReadNoError RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: 14},
|
||||||
|
},
|
||||||
|
getterExpectHeaders: map[int]map[string]string{
|
||||||
|
1: {"Range": "bytes=14-"},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: readSize / 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TooManyRetriesDuringRead NoRangeSupport",
|
||||||
|
// Fail the final reconnect attempt so we run out of retries. Otherwise we
|
||||||
|
// exit with a short read and successful reconnect.
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
3: {err: syscall.ECONNRESET},
|
||||||
|
},
|
||||||
|
// Even numbered read requests are seeks to the proper offset.
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
5: {
|
||||||
|
read: 1,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
7: {
|
||||||
|
read: 1,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data[:14],
|
||||||
|
expectErr: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TooManyRetriesDuringRead RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: 12},
|
||||||
|
2: {offset: 12},
|
||||||
|
3: {err: syscall.ECONNRESET},
|
||||||
|
},
|
||||||
|
getterExpectHeaders: map[int]map[string]string{
|
||||||
|
1: {"Range": "bytes=12-"},
|
||||||
|
2: {"Range": "bytes=13-"},
|
||||||
|
3: {"Range": "bytes=14-"},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
3: {
|
||||||
|
read: 0,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
4: {
|
||||||
|
read: 1,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
5: {
|
||||||
|
read: 1,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: data[:14],
|
||||||
|
expectErr: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TooManyRetriesDuringRead AlwaysReturnError RangeSupport",
|
||||||
|
supportsRange: true,
|
||||||
|
getterResps: map[int]getterResp{
|
||||||
|
1: {offset: -1},
|
||||||
|
2: {offset: -1},
|
||||||
|
3: {offset: -1},
|
||||||
|
4: {offset: -1},
|
||||||
|
5: {offset: -1},
|
||||||
|
},
|
||||||
|
readerResps: map[int]readResp{
|
||||||
|
0: {
|
||||||
|
sticky: true,
|
||||||
|
err: syscall.ECONNRESET,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectData: []byte{},
|
||||||
|
expectErr: io.ErrNoProgress,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
reader := &mockReader{
|
||||||
|
data: data,
|
||||||
|
resps: test.readerResps,
|
||||||
|
}
|
||||||
|
|
||||||
|
getter := &mockGetter{
|
||||||
|
t: t,
|
||||||
|
supportsRange: test.supportsRange,
|
||||||
|
reader: reader,
|
||||||
|
resps: test.getterResps,
|
||||||
|
expectHeaders: test.getterExpectHeaders,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
n int
|
||||||
|
offset int
|
||||||
|
resData = make([]byte, len(data))
|
||||||
|
)
|
||||||
|
|
||||||
|
rrh, err := readers.NewResetRetryHandler(ctx, getter)
|
||||||
|
require.NoError(t, err, "making reader wrapper: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
for err == nil && offset < len(data) {
|
||||||
|
end := offset + readSize
|
||||||
|
if end > len(data) {
|
||||||
|
end = len(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = rrh.Read(resData[offset:end])
|
||||||
|
|
||||||
|
offset = offset + n
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, test.expectData, data[:offset])
|
||||||
|
|
||||||
|
if test.expectErr == nil {
|
||||||
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.ErrorIs(t, err, test.expectErr, clues.ToCore(err))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -138,6 +138,9 @@ type StreamSize interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StreamModTime is used to provide the modified time of the stream's data.
|
// StreamModTime is used to provide the modified time of the stream's data.
|
||||||
|
//
|
||||||
|
// If an item implements StreamModTime and StreamInfo it should return the same
|
||||||
|
// value here as in item.Info().Modified().
|
||||||
type StreamModTime interface {
|
type StreamModTime interface {
|
||||||
ModTime() time.Time
|
ModTime() time.Time
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -16,6 +17,7 @@ import (
|
|||||||
type BackupBases interface {
|
type BackupBases interface {
|
||||||
RemoveMergeBaseByManifestID(manifestID manifest.ID)
|
RemoveMergeBaseByManifestID(manifestID manifest.ID)
|
||||||
Backups() []BackupEntry
|
Backups() []BackupEntry
|
||||||
|
AssistBackups() []BackupEntry
|
||||||
MinBackupVersion() int
|
MinBackupVersion() int
|
||||||
MergeBases() []ManifestEntry
|
MergeBases() []ManifestEntry
|
||||||
ClearMergeBases()
|
ClearMergeBases()
|
||||||
@ -24,16 +26,17 @@ type BackupBases interface {
|
|||||||
MergeBackupBases(
|
MergeBackupBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
other BackupBases,
|
other BackupBases,
|
||||||
reasonToKey func(Reasoner) string,
|
reasonToKey func(identity.Reasoner) string,
|
||||||
) BackupBases
|
) BackupBases
|
||||||
}
|
}
|
||||||
|
|
||||||
type backupBases struct {
|
type backupBases struct {
|
||||||
// backups and mergeBases should be modified together as they relate similar
|
// backups and mergeBases should be modified together as they relate similar
|
||||||
// data.
|
// data.
|
||||||
backups []BackupEntry
|
backups []BackupEntry
|
||||||
mergeBases []ManifestEntry
|
mergeBases []ManifestEntry
|
||||||
assistBases []ManifestEntry
|
assistBackups []BackupEntry
|
||||||
|
assistBases []ManifestEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bb *backupBases) RemoveMergeBaseByManifestID(manifestID manifest.ID) {
|
func (bb *backupBases) RemoveMergeBaseByManifestID(manifestID manifest.ID) {
|
||||||
@ -71,6 +74,10 @@ func (bb backupBases) Backups() []BackupEntry {
|
|||||||
return slices.Clone(bb.backups)
|
return slices.Clone(bb.backups)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bb backupBases) AssistBackups() []BackupEntry {
|
||||||
|
return slices.Clone(bb.assistBackups)
|
||||||
|
}
|
||||||
|
|
||||||
func (bb *backupBases) MinBackupVersion() int {
|
func (bb *backupBases) MinBackupVersion() int {
|
||||||
min := version.NoBackup
|
min := version.NoBackup
|
||||||
|
|
||||||
@ -116,16 +123,16 @@ func (bb *backupBases) ClearAssistBases() {
|
|||||||
//
|
//
|
||||||
// Selection priority, for each reason key generated by reasonsToKey, follows
|
// Selection priority, for each reason key generated by reasonsToKey, follows
|
||||||
// these rules:
|
// these rules:
|
||||||
// 1. If the called BackupBases has an entry for a given resaon, ignore the
|
// 1. If the called BackupBases has an entry for a given reason, ignore the
|
||||||
// other BackupBases matching that reason.
|
// other BackupBases matching that reason.
|
||||||
// 2. If the the receiver BackupBases has only AssistBases, look for a matching
|
// 2. If the called BackupBases has only AssistBases, look for a matching
|
||||||
// MergeBase manifest in the passed in BackupBases.
|
// MergeBase manifest in the other BackupBases.
|
||||||
// 3. If the called BackupBases has no entry for a reason, look for both
|
// 3. If the called BackupBases has no entry for a reason, look for a matching
|
||||||
// AssistBases and MergeBases in the passed in BackupBases.
|
// MergeBase in the other BackupBases.
|
||||||
func (bb *backupBases) MergeBackupBases(
|
func (bb *backupBases) MergeBackupBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
other BackupBases,
|
other BackupBases,
|
||||||
reasonToKey func(reason Reasoner) string,
|
reasonToKey func(reason identity.Reasoner) string,
|
||||||
) BackupBases {
|
) BackupBases {
|
||||||
if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) {
|
if other == nil || (len(other.MergeBases()) == 0 && len(other.AssistBases()) == 0) {
|
||||||
return bb
|
return bb
|
||||||
@ -159,7 +166,7 @@ func (bb *backupBases) MergeBackupBases(
|
|||||||
|
|
||||||
// Calculate the set of mergeBases to pull from other into this one.
|
// Calculate the set of mergeBases to pull from other into this one.
|
||||||
for _, m := range other.MergeBases() {
|
for _, m := range other.MergeBases() {
|
||||||
useReasons := []Reasoner{}
|
useReasons := []identity.Reasoner{}
|
||||||
|
|
||||||
for _, r := range m.Reasons {
|
for _, r := range m.Reasons {
|
||||||
k := reasonToKey(r)
|
k := reasonToKey(r)
|
||||||
@ -183,6 +190,10 @@ func (bb *backupBases) MergeBackupBases(
|
|||||||
backups: bb.Backups(),
|
backups: bb.Backups(),
|
||||||
mergeBases: bb.MergeBases(),
|
mergeBases: bb.MergeBases(),
|
||||||
assistBases: bb.AssistBases(),
|
assistBases: bb.AssistBases(),
|
||||||
|
// Note that assistBackups are a new feature and don't exist
|
||||||
|
// in prior versions where we were using UPN based reasons i.e.
|
||||||
|
// other won't have any assistBackups.
|
||||||
|
assistBackups: bb.AssistBackups(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add new mergeBases and backups.
|
// Add new mergeBases and backups.
|
||||||
@ -205,39 +216,11 @@ func (bb *backupBases) MergeBackupBases(
|
|||||||
|
|
||||||
res.backups = append(res.backups, bup)
|
res.backups = append(res.backups, bup)
|
||||||
res.mergeBases = append(res.mergeBases, man)
|
res.mergeBases = append(res.mergeBases, man)
|
||||||
|
// TODO(pandeyabs): Remove this once we remove overlap between
|
||||||
|
// between merge and assist bases as part of #3943.
|
||||||
res.assistBases = append(res.assistBases, man)
|
res.assistBases = append(res.assistBases, man)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add assistBases from other to this one as needed.
|
|
||||||
for _, m := range other.AssistBases() {
|
|
||||||
useReasons := []Reasoner{}
|
|
||||||
|
|
||||||
// Assume that all complete manifests in assist overlap with MergeBases.
|
|
||||||
if len(m.IncompleteReason) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, r := range m.Reasons {
|
|
||||||
k := reasonToKey(r)
|
|
||||||
if _, ok := assist[k]; ok {
|
|
||||||
// This reason is already covered by either:
|
|
||||||
// * complete manifest in bb
|
|
||||||
// * incomplete manifest in bb
|
|
||||||
//
|
|
||||||
// If it was already in the assist set then it must be the case that
|
|
||||||
// it's newer than any complete manifests in other for the same reason.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
useReasons = append(useReasons, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(useReasons) > 0 {
|
|
||||||
m.Reasons = useReasons
|
|
||||||
res.assistBases = append(res.assistBases, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,12 +309,16 @@ func getBackupByID(backups []BackupEntry, bID string) (BackupEntry, bool) {
|
|||||||
// pull. On the other hand, *not* dropping them is unsafe as it will muck up
|
// pull. On the other hand, *not* dropping them is unsafe as it will muck up
|
||||||
// merging when we add stuff to kopia (possibly multiple entries for the same
|
// merging when we add stuff to kopia (possibly multiple entries for the same
|
||||||
// item etc).
|
// item etc).
|
||||||
|
//
|
||||||
|
// TODO(pandeyabs): Refactor common code into a helper as part of #3943.
|
||||||
func (bb *backupBases) fixupAndVerify(ctx context.Context) {
|
func (bb *backupBases) fixupAndVerify(ctx context.Context) {
|
||||||
toDrop := findNonUniqueManifests(ctx, bb.mergeBases)
|
toDrop := findNonUniqueManifests(ctx, bb.mergeBases)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
backupsToKeep []BackupEntry
|
backupsToKeep []BackupEntry
|
||||||
mergeToKeep []ManifestEntry
|
assistBackupsToKeep []BackupEntry
|
||||||
|
mergeToKeep []ManifestEntry
|
||||||
|
assistToKeep []ManifestEntry
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, man := range bb.mergeBases {
|
for _, man := range bb.mergeBases {
|
||||||
@ -346,7 +333,7 @@ func (bb *backupBases) fixupAndVerify(ctx context.Context) {
|
|||||||
toDrop[man.ID] = struct{}{}
|
toDrop[man.ID] = struct{}{}
|
||||||
|
|
||||||
logger.Ctx(ctx).Info(
|
logger.Ctx(ctx).Info(
|
||||||
"dropping manifest due to missing backup",
|
"dropping merge base due to missing backup",
|
||||||
"manifest_id", man.ID)
|
"manifest_id", man.ID)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
@ -361,7 +348,7 @@ func (bb *backupBases) fixupAndVerify(ctx context.Context) {
|
|||||||
toDrop[man.ID] = struct{}{}
|
toDrop[man.ID] = struct{}{}
|
||||||
|
|
||||||
logger.Ctx(ctx).Info(
|
logger.Ctx(ctx).Info(
|
||||||
"dropping manifest due to invalid backup",
|
"dropping merge base due to invalid backup",
|
||||||
"manifest_id", man.ID)
|
"manifest_id", man.ID)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
@ -371,9 +358,9 @@ func (bb *backupBases) fixupAndVerify(ctx context.Context) {
|
|||||||
mergeToKeep = append(mergeToKeep, man)
|
mergeToKeep = append(mergeToKeep, man)
|
||||||
}
|
}
|
||||||
|
|
||||||
var assistToKeep []ManifestEntry
|
// Every merge base is also a kopia assist base.
|
||||||
|
// TODO(pandeyabs): This should be removed as part of #3943.
|
||||||
for _, man := range bb.assistBases {
|
for _, man := range bb.mergeBases {
|
||||||
if _, ok := toDrop[man.ID]; ok {
|
if _, ok := toDrop[man.ID]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -381,7 +368,48 @@ func (bb *backupBases) fixupAndVerify(ctx context.Context) {
|
|||||||
assistToKeep = append(assistToKeep, man)
|
assistToKeep = append(assistToKeep, man)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Drop assist snapshots with overlapping reasons.
|
||||||
|
toDropAssists := findNonUniqueManifests(ctx, bb.assistBases)
|
||||||
|
|
||||||
|
for _, man := range bb.assistBases {
|
||||||
|
if _, ok := toDropAssists[man.ID]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
bID, _ := man.GetTag(TagBackupID)
|
||||||
|
|
||||||
|
bup, ok := getBackupByID(bb.assistBackups, bID)
|
||||||
|
if !ok {
|
||||||
|
toDrop[man.ID] = struct{}{}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Info(
|
||||||
|
"dropping assist base due to missing backup",
|
||||||
|
"manifest_id", man.ID)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
deetsID := bup.StreamStoreID
|
||||||
|
if len(deetsID) == 0 {
|
||||||
|
deetsID = bup.DetailsID
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bup.SnapshotID) == 0 || len(deetsID) == 0 {
|
||||||
|
toDrop[man.ID] = struct{}{}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Info(
|
||||||
|
"dropping assist base due to invalid backup",
|
||||||
|
"manifest_id", man.ID)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
assistBackupsToKeep = append(assistBackupsToKeep, bup)
|
||||||
|
assistToKeep = append(assistToKeep, man)
|
||||||
|
}
|
||||||
|
|
||||||
bb.backups = backupsToKeep
|
bb.backups = backupsToKeep
|
||||||
bb.mergeBases = mergeToKeep
|
bb.mergeBases = mergeToKeep
|
||||||
bb.assistBases = assistToKeep
|
bb.assistBases = assistToKeep
|
||||||
|
bb.assistBackups = assistBackupsToKeep
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,10 +13,11 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeManifest(id, incmpl, bID string, reasons ...Reasoner) ManifestEntry {
|
func makeManifest(id, incmpl, bID string, reasons ...identity.Reasoner) ManifestEntry {
|
||||||
bIDKey, _ := makeTagKV(TagBackupID)
|
bIDKey, _ := makeTagKV(TagBackupID)
|
||||||
|
|
||||||
return ManifestEntry{
|
return ManifestEntry{
|
||||||
@ -206,36 +207,25 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
|
|||||||
ro := "resource_owner"
|
ro := "resource_owner"
|
||||||
|
|
||||||
type testInput struct {
|
type testInput struct {
|
||||||
id int
|
id int
|
||||||
incomplete bool
|
cat []path.CategoryType
|
||||||
cat []path.CategoryType
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a function so tests can modify things without messing with each other.
|
// Make a function so tests can modify things without messing with each other.
|
||||||
makeBackupBases := func(ti []testInput) *backupBases {
|
makeBackupBases := func(mergeInputs []testInput, assistInputs []testInput) *backupBases {
|
||||||
res := &backupBases{}
|
res := &backupBases{}
|
||||||
|
|
||||||
for _, i := range ti {
|
for _, i := range mergeInputs {
|
||||||
baseID := fmt.Sprintf("id%d", i.id)
|
baseID := fmt.Sprintf("id%d", i.id)
|
||||||
ir := ""
|
reasons := make([]identity.Reasoner, 0, len(i.cat))
|
||||||
|
|
||||||
if i.incomplete {
|
|
||||||
ir = "checkpoint"
|
|
||||||
}
|
|
||||||
|
|
||||||
reasons := make([]Reasoner, 0, len(i.cat))
|
|
||||||
|
|
||||||
for _, c := range i.cat {
|
for _, c := range i.cat {
|
||||||
reasons = append(reasons, NewReason("", ro, path.ExchangeService, c))
|
reasons = append(reasons, NewReason("", ro, path.ExchangeService, c))
|
||||||
}
|
}
|
||||||
|
|
||||||
m := makeManifest(baseID, ir, "b"+baseID, reasons...)
|
m := makeManifest(baseID, "", "b"+baseID, reasons...)
|
||||||
res.assistBases = append(res.assistBases, m)
|
res.assistBases = append(res.assistBases, m)
|
||||||
|
|
||||||
if i.incomplete {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
b := BackupEntry{
|
b := BackupEntry{
|
||||||
Backup: &backup.Backup{
|
Backup: &backup.Backup{
|
||||||
BaseModel: model.BaseModel{ID: model.StableID("b" + baseID)},
|
BaseModel: model.BaseModel{ID: model.StableID("b" + baseID)},
|
||||||
@ -249,192 +239,217 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
|
|||||||
res.mergeBases = append(res.mergeBases, m)
|
res.mergeBases = append(res.mergeBases, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, i := range assistInputs {
|
||||||
|
baseID := fmt.Sprintf("id%d", i.id)
|
||||||
|
|
||||||
|
reasons := make([]identity.Reasoner, 0, len(i.cat))
|
||||||
|
|
||||||
|
for _, c := range i.cat {
|
||||||
|
reasons = append(reasons, NewReason("", ro, path.ExchangeService, c))
|
||||||
|
}
|
||||||
|
|
||||||
|
m := makeManifest(baseID, "", "a"+baseID, reasons...)
|
||||||
|
|
||||||
|
b := BackupEntry{
|
||||||
|
Backup: &backup.Backup{
|
||||||
|
BaseModel: model.BaseModel{
|
||||||
|
ID: model.StableID("a" + baseID),
|
||||||
|
Tags: map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
},
|
||||||
|
SnapshotID: baseID,
|
||||||
|
StreamStoreID: "ss" + baseID,
|
||||||
|
},
|
||||||
|
Reasons: reasons,
|
||||||
|
}
|
||||||
|
|
||||||
|
res.assistBackups = append(res.assistBackups, b)
|
||||||
|
res.assistBases = append(res.assistBases, m)
|
||||||
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
bb []testInput
|
merge []testInput
|
||||||
other []testInput
|
assist []testInput
|
||||||
expect []testInput
|
otherMerge []testInput
|
||||||
|
otherAssist []testInput
|
||||||
|
expect func() *backupBases
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Other Empty",
|
name: "Other Empty",
|
||||||
bb: []testInput{
|
merge: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
},
|
},
|
||||||
expect: []testInput{
|
assist: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
},
|
},
|
||||||
|
expect: func() *backupBases {
|
||||||
|
bs := makeBackupBases([]testInput{
|
||||||
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
|
}, []testInput{
|
||||||
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
|
})
|
||||||
|
|
||||||
|
return bs
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "BB Empty",
|
name: "current Empty",
|
||||||
other: []testInput{
|
otherMerge: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
},
|
},
|
||||||
expect: []testInput{
|
otherAssist: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
},
|
},
|
||||||
|
expect: func() *backupBases {
|
||||||
|
bs := makeBackupBases([]testInput{
|
||||||
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
|
}, []testInput{
|
||||||
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
|
})
|
||||||
|
|
||||||
|
return bs
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Other overlaps Complete And Incomplete",
|
name: "Other overlaps merge and assist",
|
||||||
bb: []testInput{
|
merge: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
|
||||||
{
|
{
|
||||||
id: 1,
|
id: 1,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
incomplete: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
other: []testInput{
|
assist: []testInput{
|
||||||
|
{
|
||||||
|
id: 4,
|
||||||
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
otherMerge: []testInput{
|
||||||
{
|
{
|
||||||
id: 2,
|
id: 2,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 3,
|
id: 3,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
incomplete: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expect: []testInput{
|
otherAssist: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
|
||||||
{
|
{
|
||||||
id: 1,
|
id: 5,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
incomplete: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
expect: func() *backupBases {
|
||||||
|
bs := makeBackupBases([]testInput{
|
||||||
|
{
|
||||||
|
id: 1,
|
||||||
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
|
},
|
||||||
|
}, []testInput{
|
||||||
|
{
|
||||||
|
id: 4,
|
||||||
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return bs
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Other Overlaps Complete",
|
name: "Other overlaps merge",
|
||||||
bb: []testInput{
|
merge: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
{
|
||||||
|
id: 1,
|
||||||
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
other: []testInput{
|
otherMerge: []testInput{
|
||||||
{
|
{
|
||||||
id: 2,
|
id: 2,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expect: []testInput{
|
expect: func() *backupBases {
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
bs := makeBackupBases([]testInput{
|
||||||
|
{
|
||||||
|
id: 1,
|
||||||
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
return bs
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Other Overlaps Incomplete",
|
name: "Current assist overlaps with Other merge",
|
||||||
bb: []testInput{
|
assist: []testInput{
|
||||||
{
|
{
|
||||||
id: 1,
|
id: 3,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
incomplete: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
other: []testInput{
|
otherMerge: []testInput{
|
||||||
|
{
|
||||||
|
id: 1,
|
||||||
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
otherAssist: []testInput{
|
||||||
{
|
{
|
||||||
id: 2,
|
id: 2,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: 3,
|
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
|
||||||
incomplete: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expect: []testInput{
|
|
||||||
{
|
expect: func() *backupBases {
|
||||||
id: 1,
|
bs := makeBackupBases([]testInput{
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
{
|
||||||
incomplete: true,
|
id: 1,
|
||||||
},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
{
|
},
|
||||||
id: 2,
|
}, []testInput{
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
{
|
||||||
},
|
id: 3,
|
||||||
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return bs
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Other Disjoint",
|
name: "Other Disjoint",
|
||||||
bb: []testInput{
|
merge: []testInput{
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
{
|
{
|
||||||
id: 1,
|
id: 1,
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
incomplete: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
other: []testInput{
|
otherMerge: []testInput{
|
||||||
{
|
{
|
||||||
id: 2,
|
id: 2,
|
||||||
cat: []path.CategoryType{path.ContactsCategory},
|
cat: []path.CategoryType{path.ContactsCategory},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: 3,
|
|
||||||
cat: []path.CategoryType{path.ContactsCategory},
|
|
||||||
incomplete: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
expect: []testInput{
|
expect: func() *backupBases {
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
bs := makeBackupBases([]testInput{
|
||||||
{
|
{cat: []path.CategoryType{path.EmailCategory}},
|
||||||
id: 1,
|
{
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
id: 1,
|
||||||
incomplete: true,
|
cat: []path.CategoryType{path.EmailCategory},
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 2,
|
|
||||||
cat: []path.CategoryType{path.ContactsCategory},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 3,
|
|
||||||
cat: []path.CategoryType{path.ContactsCategory},
|
|
||||||
incomplete: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Other Reduced Reasons",
|
|
||||||
bb: []testInput{
|
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
|
||||||
{
|
|
||||||
id: 1,
|
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
|
||||||
incomplete: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
other: []testInput{
|
|
||||||
{
|
|
||||||
id: 2,
|
|
||||||
cat: []path.CategoryType{
|
|
||||||
path.EmailCategory,
|
|
||||||
path.ContactsCategory,
|
|
||||||
},
|
},
|
||||||
},
|
{
|
||||||
{
|
id: 2,
|
||||||
id: 3,
|
cat: []path.CategoryType{path.ContactsCategory},
|
||||||
cat: []path.CategoryType{
|
|
||||||
path.EmailCategory,
|
|
||||||
path.ContactsCategory,
|
|
||||||
},
|
},
|
||||||
incomplete: true,
|
}, nil)
|
||||||
},
|
|
||||||
},
|
return bs
|
||||||
expect: []testInput{
|
|
||||||
{cat: []path.CategoryType{path.EmailCategory}},
|
|
||||||
{
|
|
||||||
id: 1,
|
|
||||||
cat: []path.CategoryType{path.EmailCategory},
|
|
||||||
incomplete: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 2,
|
|
||||||
cat: []path.CategoryType{path.ContactsCategory},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 3,
|
|
||||||
cat: []path.CategoryType{path.ContactsCategory},
|
|
||||||
incomplete: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -443,9 +458,9 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
bb := makeBackupBases(test.bb)
|
bb := makeBackupBases(test.merge, test.assist)
|
||||||
other := makeBackupBases(test.other)
|
other := makeBackupBases(test.otherMerge, test.otherAssist)
|
||||||
expect := makeBackupBases(test.expect)
|
expected := test.expect()
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
@ -453,10 +468,10 @@ func (suite *BackupBasesUnitSuite) TestMergeBackupBases() {
|
|||||||
got := bb.MergeBackupBases(
|
got := bb.MergeBackupBases(
|
||||||
ctx,
|
ctx,
|
||||||
other,
|
other,
|
||||||
func(r Reasoner) string {
|
func(r identity.Reasoner) string {
|
||||||
return r.Service().String() + r.Category().String()
|
return r.Service().String() + r.Category().String()
|
||||||
})
|
})
|
||||||
AssertBackupBasesEqual(t, expect, got)
|
AssertBackupBasesEqual(t, expected, got)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -486,8 +501,20 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
mergeBases: []ManifestEntry{
|
mergeBases: []ManifestEntry{
|
||||||
makeMan(path.EmailCategory, "id1", "", "bid1"),
|
makeMan(path.EmailCategory, "id1", "", "bid1"),
|
||||||
},
|
},
|
||||||
|
assistBackups: []BackupEntry{
|
||||||
|
{
|
||||||
|
Backup: &backup.Backup{
|
||||||
|
BaseModel: model.BaseModel{
|
||||||
|
ID: "bid2",
|
||||||
|
Tags: map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
},
|
||||||
|
SnapshotID: "id2",
|
||||||
|
StreamStoreID: "ssid2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
assistBases: []ManifestEntry{
|
assistBases: []ManifestEntry{
|
||||||
makeMan(path.EmailCategory, "id1", "", "bid1"),
|
makeMan(path.EmailCategory, "id2", "", "bid2"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -507,24 +534,77 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.backups = nil
|
res.backups = nil
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
expect: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.mergeBases = nil
|
||||||
|
res.backups = nil
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Backup Missing Snapshot ID",
|
name: "Merge Backup Missing Snapshot ID",
|
||||||
bb: func() *backupBases {
|
bb: func() *backupBases {
|
||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.backups[0].SnapshotID = ""
|
res.backups[0].SnapshotID = ""
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
expect: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.mergeBases = nil
|
||||||
|
res.backups = nil
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Backup Missing Deets ID",
|
name: "Assist backup missing snapshot ID",
|
||||||
|
bb: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.assistBackups[0].SnapshotID = ""
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
expect: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.assistBases = res.mergeBases
|
||||||
|
res.assistBackups = nil
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Merge backup missing deets ID",
|
||||||
bb: func() *backupBases {
|
bb: func() *backupBases {
|
||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.backups[0].StreamStoreID = ""
|
res.backups[0].StreamStoreID = ""
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
expect: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.mergeBases = nil
|
||||||
|
res.backups = nil
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Assist backup missing deets ID",
|
||||||
|
bb: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.assistBackups[0].StreamStoreID = ""
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
expect: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.assistBases = res.mergeBases
|
||||||
|
res.assistBackups = nil
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
@ -545,15 +625,22 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res.mergeBases[0].Reasons = append(
|
res.mergeBases[0].Reasons = append(
|
||||||
res.mergeBases[0].Reasons,
|
res.mergeBases[0].Reasons,
|
||||||
res.mergeBases[0].Reasons[0])
|
res.mergeBases[0].Reasons[0])
|
||||||
res.assistBases = res.mergeBases
|
|
||||||
|
|
||||||
|
res.assistBases[0].Reasons = append(
|
||||||
|
res.assistBases[0].Reasons,
|
||||||
|
res.assistBases[0].Reasons[0])
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Single Valid Entry",
|
name: "Single Valid Entry",
|
||||||
bb: validMail1(),
|
bb: validMail1(),
|
||||||
expect: validMail1(),
|
expect: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.assistBases = append(res.mergeBases, res.assistBases...)
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Single Valid Entry With Incomplete Assist With Same Reason",
|
name: "Single Valid Entry With Incomplete Assist With Same Reason",
|
||||||
@ -561,16 +648,14 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.assistBases = append(
|
res.assistBases = append(
|
||||||
res.assistBases,
|
res.assistBases,
|
||||||
makeMan(path.EmailCategory, "id2", "checkpoint", "bid2"))
|
makeMan(path.EmailCategory, "id3", "checkpoint", "bid3"))
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
expect: func() *backupBases {
|
expect: func() *backupBases {
|
||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.assistBases = append(
|
|
||||||
res.assistBases,
|
|
||||||
makeMan(path.EmailCategory, "id2", "checkpoint", "bid2"))
|
|
||||||
|
|
||||||
|
res.assistBases = append(res.mergeBases, res.assistBases...)
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
@ -581,6 +666,9 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res.backups[0].DetailsID = res.backups[0].StreamStoreID
|
res.backups[0].DetailsID = res.backups[0].StreamStoreID
|
||||||
res.backups[0].StreamStoreID = ""
|
res.backups[0].StreamStoreID = ""
|
||||||
|
|
||||||
|
res.assistBackups[0].DetailsID = res.assistBackups[0].StreamStoreID
|
||||||
|
res.assistBackups[0].StreamStoreID = ""
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
expect: func() *backupBases {
|
expect: func() *backupBases {
|
||||||
@ -588,6 +676,11 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res.backups[0].DetailsID = res.backups[0].StreamStoreID
|
res.backups[0].DetailsID = res.backups[0].StreamStoreID
|
||||||
res.backups[0].StreamStoreID = ""
|
res.backups[0].StreamStoreID = ""
|
||||||
|
|
||||||
|
res.assistBackups[0].DetailsID = res.assistBackups[0].StreamStoreID
|
||||||
|
res.assistBackups[0].StreamStoreID = ""
|
||||||
|
|
||||||
|
res.assistBases = append(res.mergeBases, res.assistBases...)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
@ -598,7 +691,10 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res.mergeBases[0].Reasons = append(
|
res.mergeBases[0].Reasons = append(
|
||||||
res.mergeBases[0].Reasons,
|
res.mergeBases[0].Reasons,
|
||||||
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
||||||
res.assistBases = res.mergeBases
|
|
||||||
|
res.assistBases[0].Reasons = append(
|
||||||
|
res.assistBases[0].Reasons,
|
||||||
|
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
@ -607,7 +703,12 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res.mergeBases[0].Reasons = append(
|
res.mergeBases[0].Reasons = append(
|
||||||
res.mergeBases[0].Reasons,
|
res.mergeBases[0].Reasons,
|
||||||
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
||||||
res.assistBases = res.mergeBases
|
|
||||||
|
res.assistBases[0].Reasons = append(
|
||||||
|
res.assistBases[0].Reasons,
|
||||||
|
NewReason("", ro, path.ExchangeService, path.ContactsCategory))
|
||||||
|
|
||||||
|
res.assistBases = append(res.mergeBases, res.assistBases...)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
@ -618,14 +719,17 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.mergeBases = append(
|
res.mergeBases = append(
|
||||||
res.mergeBases,
|
res.mergeBases,
|
||||||
makeMan(path.EmailCategory, "id2", "", "bid2"))
|
makeMan(path.EmailCategory, "id3", "", "bid3"))
|
||||||
res.assistBases = res.mergeBases
|
|
||||||
|
res.assistBases = append(
|
||||||
|
res.assistBases,
|
||||||
|
makeMan(path.EmailCategory, "id4", "", "bid4"))
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Three Entries One Invalid",
|
name: "Merge Backup, Three Entries One Invalid",
|
||||||
bb: func() *backupBases {
|
bb: func() *backupBases {
|
||||||
res := validMail1()
|
res := validMail1()
|
||||||
res.backups = append(
|
res.backups = append(
|
||||||
@ -633,24 +737,23 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
BackupEntry{
|
BackupEntry{
|
||||||
Backup: &backup.Backup{
|
Backup: &backup.Backup{
|
||||||
BaseModel: model.BaseModel{
|
BaseModel: model.BaseModel{
|
||||||
ID: "bid2",
|
ID: "bid3",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
BackupEntry{
|
BackupEntry{
|
||||||
Backup: &backup.Backup{
|
Backup: &backup.Backup{
|
||||||
BaseModel: model.BaseModel{
|
BaseModel: model.BaseModel{
|
||||||
ID: "bid3",
|
ID: "bid4",
|
||||||
},
|
},
|
||||||
SnapshotID: "id3",
|
SnapshotID: "id4",
|
||||||
StreamStoreID: "ssid3",
|
StreamStoreID: "ssid4",
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
res.mergeBases = append(
|
res.mergeBases = append(
|
||||||
res.mergeBases,
|
res.mergeBases,
|
||||||
makeMan(path.ContactsCategory, "id2", "checkpoint", "bid2"),
|
makeMan(path.ContactsCategory, "id3", "checkpoint", "bid3"),
|
||||||
makeMan(path.EventsCategory, "id3", "", "bid3"))
|
makeMan(path.EventsCategory, "id4", "", "bid4"))
|
||||||
res.assistBases = res.mergeBases
|
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
@ -661,16 +764,70 @@ func (suite *BackupBasesUnitSuite) TestFixupAndVerify() {
|
|||||||
BackupEntry{
|
BackupEntry{
|
||||||
Backup: &backup.Backup{
|
Backup: &backup.Backup{
|
||||||
BaseModel: model.BaseModel{
|
BaseModel: model.BaseModel{
|
||||||
ID: "bid3",
|
ID: "bid4",
|
||||||
},
|
},
|
||||||
SnapshotID: "id3",
|
SnapshotID: "id4",
|
||||||
StreamStoreID: "ssid3",
|
StreamStoreID: "ssid4",
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
res.mergeBases = append(
|
res.mergeBases = append(
|
||||||
res.mergeBases,
|
res.mergeBases,
|
||||||
makeMan(path.EventsCategory, "id3", "", "bid3"))
|
makeMan(path.EventsCategory, "id4", "", "bid4"))
|
||||||
res.assistBases = res.mergeBases
|
res.assistBases = append(res.mergeBases, res.assistBases...)
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Assist Backup, Three Entries One Invalid",
|
||||||
|
bb: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.assistBackups = append(
|
||||||
|
res.assistBackups,
|
||||||
|
BackupEntry{
|
||||||
|
Backup: &backup.Backup{
|
||||||
|
BaseModel: model.BaseModel{
|
||||||
|
ID: "bid3",
|
||||||
|
Tags: map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
BackupEntry{
|
||||||
|
Backup: &backup.Backup{
|
||||||
|
BaseModel: model.BaseModel{
|
||||||
|
ID: "bid4",
|
||||||
|
Tags: map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
},
|
||||||
|
SnapshotID: "id4",
|
||||||
|
StreamStoreID: "ssid4",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
res.assistBases = append(
|
||||||
|
res.assistBases,
|
||||||
|
makeMan(path.ContactsCategory, "id3", "checkpoint", "bid3"),
|
||||||
|
makeMan(path.EventsCategory, "id4", "", "bid4"))
|
||||||
|
|
||||||
|
return res
|
||||||
|
}(),
|
||||||
|
expect: func() *backupBases {
|
||||||
|
res := validMail1()
|
||||||
|
res.assistBackups = append(
|
||||||
|
res.assistBackups,
|
||||||
|
BackupEntry{
|
||||||
|
Backup: &backup.Backup{
|
||||||
|
BaseModel: model.BaseModel{
|
||||||
|
ID: "bid4",
|
||||||
|
Tags: map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
},
|
||||||
|
SnapshotID: "id4",
|
||||||
|
StreamStoreID: "ssid4",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
res.assistBases = append(
|
||||||
|
res.assistBases,
|
||||||
|
makeMan(path.EventsCategory, "id4", "", "bid4"))
|
||||||
|
|
||||||
|
res.assistBases = append(res.mergeBases, res.assistBases...)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}(),
|
}(),
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -29,23 +30,11 @@ const (
|
|||||||
userTagPrefix = "tag:"
|
userTagPrefix = "tag:"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(ashmrtn): Move this into some inject package. Here to avoid import
|
|
||||||
// cycles.
|
|
||||||
type Reasoner interface {
|
|
||||||
Tenant() string
|
|
||||||
ProtectedResource() string
|
|
||||||
Service() path.ServiceType
|
|
||||||
Category() path.CategoryType
|
|
||||||
// SubtreePath returns the path prefix for data in existing backups that have
|
|
||||||
// parameters (tenant, protected resourced, etc) that match this Reasoner.
|
|
||||||
SubtreePath() (path.Path, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewReason(
|
func NewReason(
|
||||||
tenant, resource string,
|
tenant, resource string,
|
||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
category path.CategoryType,
|
category path.CategoryType,
|
||||||
) Reasoner {
|
) identity.Reasoner {
|
||||||
return reason{
|
return reason{
|
||||||
tenant: tenant,
|
tenant: tenant,
|
||||||
resource: resource,
|
resource: resource,
|
||||||
@ -90,7 +79,7 @@ func (r reason) SubtreePath() (path.Path, error) {
|
|||||||
return p, clues.Wrap(err, "building path").OrNil()
|
return p, clues.Wrap(err, "building path").OrNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
func tagKeys(r Reasoner) []string {
|
func tagKeys(r identity.Reasoner) []string {
|
||||||
return []string{
|
return []string{
|
||||||
r.ProtectedResource(),
|
r.ProtectedResource(),
|
||||||
serviceCatString(r.Service(), r.Category()),
|
serviceCatString(r.Service(), r.Category()),
|
||||||
@ -98,13 +87,13 @@ func tagKeys(r Reasoner) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// reasonKey returns the concatenation of the ProtectedResource, Service, and Category.
|
// reasonKey returns the concatenation of the ProtectedResource, Service, and Category.
|
||||||
func reasonKey(r Reasoner) string {
|
func reasonKey(r identity.Reasoner) string {
|
||||||
return r.ProtectedResource() + r.Service().String() + r.Category().String()
|
return r.ProtectedResource() + r.Service().String() + r.Category().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackupEntry struct {
|
type BackupEntry struct {
|
||||||
*backup.Backup
|
*backup.Backup
|
||||||
Reasons []Reasoner
|
Reasons []identity.Reasoner
|
||||||
}
|
}
|
||||||
|
|
||||||
type ManifestEntry struct {
|
type ManifestEntry struct {
|
||||||
@ -116,7 +105,7 @@ type ManifestEntry struct {
|
|||||||
// 1. backup user1 email,contacts -> B1
|
// 1. backup user1 email,contacts -> B1
|
||||||
// 2. backup user1 contacts -> B2 (uses B1 as base)
|
// 2. backup user1 contacts -> B2 (uses B1 as base)
|
||||||
// 3. backup user1 email,contacts,events (uses B1 for email, B2 for contacts)
|
// 3. backup user1 email,contacts,events (uses B1 for email, B2 for contacts)
|
||||||
Reasons []Reasoner
|
Reasons []identity.Reasoner
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me ManifestEntry) GetTag(key string) (string, bool) {
|
func (me ManifestEntry) GetTag(key string) (string, bool) {
|
||||||
@ -204,17 +193,20 @@ func (b *baseFinder) getBackupModel(
|
|||||||
return bup, nil
|
return bup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type backupBase struct {
|
||||||
|
backup BackupEntry
|
||||||
|
manifest ManifestEntry
|
||||||
|
}
|
||||||
|
|
||||||
// findBasesInSet goes through manifest metadata entries and sees if they're
|
// findBasesInSet goes through manifest metadata entries and sees if they're
|
||||||
// incomplete or not. If an entry is incomplete and we don't already have a
|
// incomplete or not. Manifests which don't have an associated backup
|
||||||
// complete or incomplete manifest add it to the set for kopia assisted
|
// are discarded as incomplete. Manifests are then checked to see if they
|
||||||
// incrementals. If it's complete, fetch the backup model and see if it
|
// are associated with an assist backup or merge backup.
|
||||||
// corresponds to a successful backup. If it does, return it as we only need the
|
|
||||||
// most recent complete backup as the base.
|
|
||||||
func (b *baseFinder) findBasesInSet(
|
func (b *baseFinder) findBasesInSet(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
reason Reasoner,
|
reason identity.Reasoner,
|
||||||
metas []*manifest.EntryMetadata,
|
metas []*manifest.EntryMetadata,
|
||||||
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
|
) (*backupBase, *backupBase, error) {
|
||||||
// Sort manifests by time so we can go through them sequentially. The code in
|
// Sort manifests by time so we can go through them sequentially. The code in
|
||||||
// kopia appears to sort them already, but add sorting here just so we're not
|
// kopia appears to sort them already, but add sorting here just so we're not
|
||||||
// reliant on undocumented behavior.
|
// reliant on undocumented behavior.
|
||||||
@ -223,8 +215,8 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
})
|
})
|
||||||
|
|
||||||
var (
|
var (
|
||||||
kopiaAssistSnaps []ManifestEntry
|
mergeBase *backupBase
|
||||||
foundIncomplete bool
|
assistBase *backupBase
|
||||||
)
|
)
|
||||||
|
|
||||||
for i := len(metas) - 1; i >= 0; i-- {
|
for i := len(metas) - 1; i >= 0; i-- {
|
||||||
@ -240,16 +232,10 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(man.IncompleteReason) > 0 {
|
if len(man.IncompleteReason) > 0 {
|
||||||
if !foundIncomplete {
|
// Skip here since this snapshot cannot be considered an assist base.
|
||||||
foundIncomplete = true
|
logger.Ctx(ictx).Debugw(
|
||||||
|
"Incomplete snapshot",
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
"incomplete_reason", man.IncompleteReason)
|
||||||
Manifest: man,
|
|
||||||
Reasons: []Reasoner{reason},
|
|
||||||
})
|
|
||||||
|
|
||||||
logger.Ctx(ictx).Info("found incomplete backup")
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -259,19 +245,7 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// Safe to continue here as we'll just end up attempting to use an older
|
// Safe to continue here as we'll just end up attempting to use an older
|
||||||
// backup as the base.
|
// backup as the base.
|
||||||
logger.CtxErr(ictx, err).Debug("searching for base backup")
|
logger.CtxErr(ictx, err).Debug("searching for backup model")
|
||||||
|
|
||||||
if !foundIncomplete {
|
|
||||||
foundIncomplete = true
|
|
||||||
|
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
|
||||||
Manifest: man,
|
|
||||||
Reasons: []Reasoner{reason},
|
|
||||||
})
|
|
||||||
|
|
||||||
logger.Ctx(ictx).Info("found incomplete backup")
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,49 +259,118 @@ func (b *baseFinder) findBasesInSet(
|
|||||||
"empty backup stream store ID",
|
"empty backup stream store ID",
|
||||||
"search_backup_id", bup.ID)
|
"search_backup_id", bup.ID)
|
||||||
|
|
||||||
if !foundIncomplete {
|
|
||||||
foundIncomplete = true
|
|
||||||
|
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, ManifestEntry{
|
|
||||||
Manifest: man,
|
|
||||||
Reasons: []Reasoner{reason},
|
|
||||||
})
|
|
||||||
|
|
||||||
logger.Ctx(ictx).Infow(
|
|
||||||
"found incomplete backup",
|
|
||||||
"search_backup_id", bup.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we've made it to this point then we're considering the backup
|
// If we've made it to this point then we're considering the backup
|
||||||
// complete as it has both an item data snapshot and a backup details
|
// complete as it has both an item data snapshot and a backup details
|
||||||
// snapshot.
|
// snapshot.
|
||||||
logger.Ctx(ictx).Infow("found complete backup", "base_backup_id", bup.ID)
|
//
|
||||||
|
// Check first if this is an assist base. Criteria for selecting an
|
||||||
|
// assist base are:
|
||||||
|
// 1. most recent assist base for the reason.
|
||||||
|
// 2. at most one assist base per reason.
|
||||||
|
// 3. it must be more recent than the merge backup for the reason, if
|
||||||
|
// a merge backup exists.
|
||||||
|
|
||||||
me := ManifestEntry{
|
if b.isAssistBackupModel(ictx, bup) {
|
||||||
Manifest: man,
|
if assistBase == nil {
|
||||||
Reasons: []Reasoner{reason},
|
assistModel := BackupEntry{
|
||||||
|
Backup: bup,
|
||||||
|
Reasons: []identity.Reasoner{reason},
|
||||||
|
}
|
||||||
|
assistSnap := ManifestEntry{
|
||||||
|
Manifest: man,
|
||||||
|
Reasons: []identity.Reasoner{reason},
|
||||||
|
}
|
||||||
|
|
||||||
|
assistBase = &backupBase{
|
||||||
|
backup: assistModel,
|
||||||
|
manifest: assistSnap,
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ictx).Infow(
|
||||||
|
"found assist base",
|
||||||
|
"search_backup_id", bup.ID,
|
||||||
|
"search_snapshot_id", meta.ID,
|
||||||
|
"ssid", ssid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if an assist base has already been selected.
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
kopiaAssistSnaps = append(kopiaAssistSnaps, me)
|
|
||||||
|
|
||||||
return &BackupEntry{
|
logger.Ctx(ictx).Infow("found merge base",
|
||||||
|
"search_backup_id", bup.ID,
|
||||||
|
"search_snapshot_id", meta.ID,
|
||||||
|
"ssid", ssid)
|
||||||
|
|
||||||
|
mergeSnap := ManifestEntry{
|
||||||
|
Manifest: man,
|
||||||
|
Reasons: []identity.Reasoner{reason},
|
||||||
|
}
|
||||||
|
|
||||||
|
mergeModel := BackupEntry{
|
||||||
Backup: bup,
|
Backup: bup,
|
||||||
Reasons: []Reasoner{reason},
|
Reasons: []identity.Reasoner{reason},
|
||||||
}, &me, kopiaAssistSnaps, nil
|
}
|
||||||
|
|
||||||
|
mergeBase = &backupBase{
|
||||||
|
backup: mergeModel,
|
||||||
|
manifest: mergeSnap,
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Info("no base backups for reason")
|
if mergeBase == nil && assistBase == nil {
|
||||||
|
logger.Ctx(ctx).Info("no merge or assist base found for reason")
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil, kopiaAssistSnaps, nil
|
return mergeBase, assistBase, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isAssistBackupModel checks if the provided backup is an assist backup.
|
||||||
|
func (b *baseFinder) isAssistBackupModel(
|
||||||
|
ctx context.Context,
|
||||||
|
bup *backup.Backup,
|
||||||
|
) bool {
|
||||||
|
allTags := map[string]string{
|
||||||
|
model.BackupTypeTag: model.AssistBackup,
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range allTags {
|
||||||
|
if bup.Tags[k] != v {
|
||||||
|
// This is not an assist backup so we can just exit here.
|
||||||
|
logger.Ctx(ctx).Debugw(
|
||||||
|
"assist backup model missing tags",
|
||||||
|
"backup_id", bup.ID,
|
||||||
|
"tag", k,
|
||||||
|
"expected_value", v,
|
||||||
|
"actual_value", bup.Tags[k])
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it has a valid streamstore id and snapshot id.
|
||||||
|
if len(bup.StreamStoreID) == 0 || len(bup.SnapshotID) == 0 {
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"nil ssid or snapshot id in assist base",
|
||||||
|
"ssid", bup.StreamStoreID,
|
||||||
|
"snapshot_id", bup.SnapshotID)
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseFinder) getBase(
|
func (b *baseFinder) getBase(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r Reasoner,
|
r identity.Reasoner,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) (*BackupEntry, *ManifestEntry, []ManifestEntry, error) {
|
) (*backupBase, *backupBase, error) {
|
||||||
allTags := map[string]string{}
|
allTags := map[string]string{}
|
||||||
|
|
||||||
for _, k := range tagKeys(r) {
|
for _, k := range tagKeys(r) {
|
||||||
@ -339,12 +382,12 @@ func (b *baseFinder) getBase(
|
|||||||
|
|
||||||
metas, err := b.sm.FindManifests(ctx, allTags)
|
metas, err := b.sm.FindManifests(ctx, allTags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, clues.Wrap(err, "getting snapshots")
|
return nil, nil, clues.Wrap(err, "getting snapshots")
|
||||||
}
|
}
|
||||||
|
|
||||||
// No snapshots means no backups so we can just exit here.
|
// No snapshots means no backups so we can just exit here.
|
||||||
if len(metas) == 0 {
|
if len(metas) == 0 {
|
||||||
return nil, nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.findBasesInSet(ctx, r, metas)
|
return b.findBasesInSet(ctx, r, metas)
|
||||||
@ -352,7 +395,7 @@ func (b *baseFinder) getBase(
|
|||||||
|
|
||||||
func (b *baseFinder) FindBases(
|
func (b *baseFinder) FindBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
reasons []Reasoner,
|
reasons []identity.Reasoner,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) BackupBases {
|
) BackupBases {
|
||||||
var (
|
var (
|
||||||
@ -360,9 +403,10 @@ func (b *baseFinder) FindBases(
|
|||||||
// the reason for selecting something. Kopia assisted snapshots also use
|
// the reason for selecting something. Kopia assisted snapshots also use
|
||||||
// ManifestEntry so we have the reasons for selecting them to aid in
|
// ManifestEntry so we have the reasons for selecting them to aid in
|
||||||
// debugging.
|
// debugging.
|
||||||
baseBups = map[model.StableID]BackupEntry{}
|
mergeBups = map[model.StableID]BackupEntry{}
|
||||||
baseSnaps = map[manifest.ID]ManifestEntry{}
|
assistBups = map[model.StableID]BackupEntry{}
|
||||||
kopiaAssistSnaps = map[manifest.ID]ManifestEntry{}
|
mergeSnaps = map[manifest.ID]ManifestEntry{}
|
||||||
|
assistSnaps = map[manifest.ID]ManifestEntry{}
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, searchReason := range reasons {
|
for _, searchReason := range reasons {
|
||||||
@ -372,7 +416,10 @@ func (b *baseFinder) FindBases(
|
|||||||
"search_category", searchReason.Category().String())
|
"search_category", searchReason.Category().String())
|
||||||
logger.Ctx(ictx).Info("searching for previous manifests")
|
logger.Ctx(ictx).Info("searching for previous manifests")
|
||||||
|
|
||||||
baseBackup, baseSnap, assistSnaps, err := b.getBase(ictx, searchReason, tags)
|
mergeBase, assistBase, err := b.getBase(
|
||||||
|
ictx,
|
||||||
|
searchReason,
|
||||||
|
tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Ctx(ctx).Info(
|
logger.Ctx(ctx).Info(
|
||||||
"getting base, falling back to full backup for reason",
|
"getting base, falling back to full backup for reason",
|
||||||
@ -381,47 +428,60 @@ func (b *baseFinder) FindBases(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if baseBackup != nil {
|
if mergeBase != nil {
|
||||||
bs, ok := baseBups[baseBackup.ID]
|
mergeSnap := mergeBase.manifest
|
||||||
|
mergeBackup := mergeBase.backup
|
||||||
|
|
||||||
|
ms, ok := mergeSnaps[mergeSnap.ID]
|
||||||
if ok {
|
if ok {
|
||||||
bs.Reasons = append(bs.Reasons, baseSnap.Reasons...)
|
ms.Reasons = append(ms.Reasons, mergeSnap.Reasons...)
|
||||||
} else {
|
} else {
|
||||||
bs = *baseBackup
|
ms = mergeSnap
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reassign since it's structs not pointers to structs.
|
mergeSnaps[mergeSnap.ID] = ms
|
||||||
baseBups[baseBackup.ID] = bs
|
|
||||||
|
mb, ok := mergeBups[mergeBackup.ID]
|
||||||
|
if ok {
|
||||||
|
mb.Reasons = append(mb.Reasons, mergeSnap.Reasons...)
|
||||||
|
} else {
|
||||||
|
mb = mergeBackup
|
||||||
|
}
|
||||||
|
|
||||||
|
mergeBups[mergeBackup.ID] = mb
|
||||||
}
|
}
|
||||||
|
|
||||||
if baseSnap != nil {
|
if assistBase != nil {
|
||||||
bs, ok := baseSnaps[baseSnap.ID]
|
assistSnap := assistBase.manifest
|
||||||
|
assistBackup := assistBase.backup
|
||||||
|
|
||||||
|
as, ok := assistSnaps[assistSnap.ID]
|
||||||
if ok {
|
if ok {
|
||||||
bs.Reasons = append(bs.Reasons, baseSnap.Reasons...)
|
as.Reasons = append(as.Reasons, assistSnap.Reasons...)
|
||||||
} else {
|
} else {
|
||||||
bs = *baseSnap
|
as = assistSnap
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reassign since it's structs not pointers to structs.
|
assistSnaps[assistSnap.ID] = as
|
||||||
baseSnaps[baseSnap.ID] = bs
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range assistSnaps {
|
ab, ok := assistBups[assistBackup.ID]
|
||||||
bs, ok := kopiaAssistSnaps[s.ID]
|
|
||||||
if ok {
|
if ok {
|
||||||
bs.Reasons = append(bs.Reasons, s.Reasons...)
|
ab.Reasons = append(ab.Reasons, assistBackup.Reasons...)
|
||||||
} else {
|
} else {
|
||||||
bs = s
|
ab = assistBackup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reassign since it's structs not pointers to structs.
|
assistBups[assistBackup.ID] = ab
|
||||||
kopiaAssistSnaps[s.ID] = bs
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(pandeyabs): Fix the terminology used in backupBases to go with
|
||||||
|
// new definitions i.e. mergeSnaps instead of mergeBases, etc.
|
||||||
res := &backupBases{
|
res := &backupBases{
|
||||||
backups: maps.Values(baseBups),
|
backups: maps.Values(mergeBups),
|
||||||
mergeBases: maps.Values(baseSnaps),
|
assistBackups: maps.Values(assistBups),
|
||||||
assistBases: maps.Values(kopiaAssistSnaps),
|
mergeBases: maps.Values(mergeSnaps),
|
||||||
|
assistBases: maps.Values(assistSnaps),
|
||||||
}
|
}
|
||||||
|
|
||||||
res.fixupAndVerify(ctx)
|
res.fixupAndVerify(ctx)
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,14 +24,19 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
testT1 = time.Now()
|
testT1 = time.Now()
|
||||||
testT2 = testT1.Add(1 * time.Hour)
|
testT2 = testT1.Add(1 * time.Hour)
|
||||||
|
testT3 = testT2.Add(1 * time.Hour)
|
||||||
|
testT4 = testT3.Add(1 * time.Hour)
|
||||||
testID1 = manifest.ID("snap1")
|
testID1 = manifest.ID("snap1")
|
||||||
testID2 = manifest.ID("snap2")
|
testID2 = manifest.ID("snap2")
|
||||||
|
testID3 = manifest.ID("snap3")
|
||||||
|
testID4 = manifest.ID("snap4")
|
||||||
|
|
||||||
testBackup1 = "backupID1"
|
testBackup1 = "backupID1"
|
||||||
testBackup2 = "backupID2"
|
testBackup2 = "backupID2"
|
||||||
|
testBackup3 = "backupID3"
|
||||||
|
testBackup4 = "backupID4"
|
||||||
|
|
||||||
testMail = path.ExchangeService.String() + path.EmailCategory.String()
|
testMail = path.ExchangeService.String() + path.EmailCategory.String()
|
||||||
testEvents = path.ExchangeService.String() + path.EventsCategory.String()
|
testEvents = path.ExchangeService.String() + path.EventsCategory.String()
|
||||||
@ -39,7 +45,7 @@ var (
|
|||||||
testUser2 = "user2"
|
testUser2 = "user2"
|
||||||
testUser3 = "user3"
|
testUser3 = "user3"
|
||||||
|
|
||||||
testAllUsersAllCats = []Reasoner{
|
testAllUsersAllCats = []identity.Reasoner{
|
||||||
// User1 email and events.
|
// User1 email and events.
|
||||||
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
@ -50,12 +56,12 @@ var (
|
|||||||
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
||||||
NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
|
NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
|
||||||
}
|
}
|
||||||
testAllUsersMail = []Reasoner{
|
testAllUsersMail = []identity.Reasoner{
|
||||||
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser3, path.ExchangeService, path.EmailCategory),
|
||||||
}
|
}
|
||||||
testUser1Mail = []Reasoner{
|
testUser1Mail = []identity.Reasoner{
|
||||||
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -212,12 +218,14 @@ func newBackupModel(
|
|||||||
hasItemSnap bool,
|
hasItemSnap bool,
|
||||||
hasDetailsSnap bool,
|
hasDetailsSnap bool,
|
||||||
oldDetailsID bool,
|
oldDetailsID bool,
|
||||||
|
tags map[string]string,
|
||||||
err error,
|
err error,
|
||||||
) backupInfo {
|
) backupInfo {
|
||||||
res := backupInfo{
|
res := backupInfo{
|
||||||
b: backup.Backup{
|
b: backup.Backup{
|
||||||
BaseModel: model.BaseModel{
|
BaseModel: model.BaseModel{
|
||||||
ID: model.StableID(id),
|
ID: model.StableID(id),
|
||||||
|
Tags: tags,
|
||||||
},
|
},
|
||||||
SnapshotID: "iid",
|
SnapshotID: "iid",
|
||||||
},
|
},
|
||||||
@ -285,7 +293,7 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() {
|
|||||||
sm: mockEmptySnapshotManager{},
|
sm: mockEmptySnapshotManager{},
|
||||||
bg: mockEmptyModelGetter{},
|
bg: mockEmptyModelGetter{},
|
||||||
}
|
}
|
||||||
reasons := []Reasoner{
|
reasons := []identity.Reasoner{
|
||||||
NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
|
NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -304,7 +312,7 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
|
|||||||
sm: &mockSnapshotManager{findErr: assert.AnError},
|
sm: &mockSnapshotManager{findErr: assert.AnError},
|
||||||
bg: mockEmptyModelGetter{},
|
bg: mockEmptyModelGetter{},
|
||||||
}
|
}
|
||||||
reasons := []Reasoner{
|
reasons := []identity.Reasoner{
|
||||||
NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
|
NewReason("", "a-user", path.ExchangeService, path.EmailCategory),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -316,18 +324,21 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
|
|||||||
func (suite *BaseFinderUnitSuite) TestGetBases() {
|
func (suite *BaseFinderUnitSuite) TestGetBases() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
input []Reasoner
|
input []identity.Reasoner
|
||||||
manifestData []manifestInfo
|
manifestData []manifestInfo
|
||||||
// Use this to denote the Reasons a base backup or base manifest is
|
// Use this to denote the Reasons a base backup or base manifest is
|
||||||
// selected. The int maps to the index of the backup or manifest in data.
|
// selected. The int maps to the index of the backup or manifest in data.
|
||||||
expectedBaseReasons map[int][]Reasoner
|
expectedBaseReasons map[int][]identity.Reasoner
|
||||||
// Use this to denote the Reasons a kopia assised incrementals manifest is
|
// Use this to denote the Reasons a kopia assised incrementals manifest is
|
||||||
// selected. The int maps to the index of the manifest in data.
|
// selected. The int maps to the index of the manifest in data.
|
||||||
expectedAssistManifestReasons map[int][]Reasoner
|
// TODO(pandeyabs): Remove this once we have 1:1 mapping between snapshots
|
||||||
|
// and backup models.
|
||||||
|
expectedAssistManifestReasons map[int][]identity.Reasoner
|
||||||
|
expectedAssistReasons map[int][]identity.Reasoner
|
||||||
backupData []backupInfo
|
backupData []backupInfo
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Return Older Base If Fail To Get Manifest",
|
name: "Return Older Merge Base If Fail To Get Manifest",
|
||||||
input: testUser1Mail,
|
input: testUser1Mail,
|
||||||
manifestData: []manifestInfo{
|
manifestData: []manifestInfo{
|
||||||
newManifestInfo(
|
newManifestInfo(
|
||||||
@ -349,19 +360,61 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup2, true, true, false, nil),
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Return Older Base If Fail To Get Backup",
|
name: "Return Older Assist Base If Fail To Get Manifest",
|
||||||
|
input: testUser1Mail,
|
||||||
|
manifestData: []manifestInfo{
|
||||||
|
newManifestInfo(
|
||||||
|
testID2,
|
||||||
|
testT2,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup2,
|
||||||
|
assert.AnError,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID1,
|
||||||
|
testT1,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup1,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedBaseReasons: map[int][]identity.Reasoner{},
|
||||||
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
1: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{
|
||||||
|
1: testUser1Mail,
|
||||||
|
},
|
||||||
|
backupData: []backupInfo{
|
||||||
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
|
newBackupModel(
|
||||||
|
testBackup1,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Return Older Merge Base If Fail To Get Backup",
|
||||||
input: testUser1Mail,
|
input: testUser1Mail,
|
||||||
manifestData: []manifestInfo{
|
manifestData: []manifestInfo{
|
||||||
newManifestInfo(
|
newManifestInfo(
|
||||||
@ -383,16 +436,15 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup2, false, false, false, assert.AnError),
|
newBackupModel(testBackup2, false, false, false, nil, assert.AnError),
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -418,16 +470,16 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup2, true, false, false, nil),
|
newBackupModel(testBackup2, true, false, false, nil, nil),
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -447,18 +499,19 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser3,
|
testUser3,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup1, true, true, true, nil),
|
newBackupModel(testBackup1, true, true, true, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "All One Snapshot",
|
name: "All One Snapshot With Merge Base",
|
||||||
input: testAllUsersAllCats,
|
input: testAllUsersAllCats,
|
||||||
manifestData: []manifestInfo{
|
manifestData: []manifestInfo{
|
||||||
newManifestInfo(
|
newManifestInfo(
|
||||||
@ -474,14 +527,49 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser3,
|
testUser3,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
0: testAllUsersAllCats,
|
0: testAllUsersAllCats,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testAllUsersAllCats,
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{},
|
||||||
|
backupData: []backupInfo{
|
||||||
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "All One Snapshot with Assist Base",
|
||||||
|
input: testAllUsersAllCats,
|
||||||
|
manifestData: []manifestInfo{
|
||||||
|
newManifestInfo(
|
||||||
|
testID1,
|
||||||
|
testT1,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup1,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testEvents,
|
||||||
|
testUser1,
|
||||||
|
testUser2,
|
||||||
|
testUser3,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedBaseReasons: map[int][]identity.Reasoner{},
|
||||||
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testAllUsersAllCats,
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{
|
||||||
0: testAllUsersAllCats,
|
0: testAllUsersAllCats,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(
|
||||||
|
testBackup1,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -512,7 +600,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser3,
|
testUser3,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
0: {
|
0: {
|
||||||
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
@ -524,7 +612,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
|
NewReason("", testUser3, path.ExchangeService, path.EventsCategory),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
0: {
|
0: {
|
||||||
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
@ -537,8 +625,96 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
newBackupModel(testBackup2, true, true, false, nil),
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Unique assist bases with common merge Base, overlapping reasons",
|
||||||
|
input: testAllUsersAllCats,
|
||||||
|
manifestData: []manifestInfo{
|
||||||
|
newManifestInfo(
|
||||||
|
testID3,
|
||||||
|
testT3,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup3,
|
||||||
|
nil,
|
||||||
|
testEvents,
|
||||||
|
testUser1,
|
||||||
|
testUser2,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID2,
|
||||||
|
testT2,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup2,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
testUser2,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID1,
|
||||||
|
testT1,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup1,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testEvents,
|
||||||
|
testUser1,
|
||||||
|
testUser2,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
|
2: {
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
0: {
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
|
||||||
|
},
|
||||||
|
1: {
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
|
},
|
||||||
|
2: {
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{
|
||||||
|
0: {
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EventsCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EventsCategory),
|
||||||
|
},
|
||||||
|
1: {
|
||||||
|
NewReason("", testUser1, path.ExchangeService, path.EmailCategory),
|
||||||
|
NewReason("", testUser2, path.ExchangeService, path.EmailCategory),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
backupData: []backupInfo{
|
||||||
|
newBackupModel(
|
||||||
|
testBackup3,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
newBackupModel(
|
||||||
|
testBackup2,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -564,17 +740,16 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
1: testUser1Mail,
|
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
// Shouldn't be returned but have here just so we can see.
|
// Shouldn't be returned but have here just so we can see.
|
||||||
newBackupModel(testBackup2, true, true, false, nil),
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -600,16 +775,16 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
1: testUser1Mail,
|
1: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
// Shouldn't be returned but have here just so we can see.
|
// Shouldn't be returned but have here just so we can see.
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
newBackupModel(testBackup2, true, true, false, nil),
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -635,14 +810,12 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{},
|
expectedBaseReasons: map[int][]identity.Reasoner{},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{},
|
||||||
1: testUser1Mail,
|
|
||||||
},
|
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
// Shouldn't be returned but have here just so we can see.
|
// Shouldn't be returned but have here just so we can see.
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
newBackupModel(testBackup2, true, true, false, nil),
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -659,14 +832,14 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -694,16 +867,206 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
testUser1,
|
testUser1,
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedBaseReasons: map[int][]Reasoner{
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
expectedAssistManifestReasons: map[int][]Reasoner{
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
backupData: []backupInfo{
|
backupData: []backupInfo{
|
||||||
newBackupModel(testBackup2, true, true, false, nil),
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
// Shouldn't be returned but here just so we can check.
|
// Shouldn't be returned but here just so we can check.
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Return latest assist & merge base pair",
|
||||||
|
input: testUser1Mail,
|
||||||
|
manifestData: []manifestInfo{
|
||||||
|
newManifestInfo(
|
||||||
|
testID4,
|
||||||
|
testT4,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup4,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID3,
|
||||||
|
testT3,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup3,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID2,
|
||||||
|
testT2,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup2,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID1,
|
||||||
|
testT1,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup1,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
|
2: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
2: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
},
|
||||||
|
backupData: []backupInfo{
|
||||||
|
newBackupModel(
|
||||||
|
testBackup4,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
newBackupModel(
|
||||||
|
testBackup3,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Newer merge base than assist base",
|
||||||
|
input: testUser1Mail,
|
||||||
|
manifestData: []manifestInfo{
|
||||||
|
newManifestInfo(
|
||||||
|
testID2,
|
||||||
|
testT2,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup2,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID1,
|
||||||
|
testT1,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup1,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{},
|
||||||
|
backupData: []backupInfo{
|
||||||
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
|
newBackupModel(
|
||||||
|
testBackup1,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Only assist bases",
|
||||||
|
input: testUser1Mail,
|
||||||
|
manifestData: []manifestInfo{
|
||||||
|
newManifestInfo(
|
||||||
|
testID2,
|
||||||
|
testT2,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup2,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
newManifestInfo(
|
||||||
|
testID1,
|
||||||
|
testT1,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup1,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedBaseReasons: map[int][]identity.Reasoner{},
|
||||||
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
},
|
||||||
|
backupData: []backupInfo{
|
||||||
|
newBackupModel(
|
||||||
|
testBackup2,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
newBackupModel(
|
||||||
|
testBackup1,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.AssistBackup},
|
||||||
|
nil),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Merge base with tag",
|
||||||
|
input: testUser1Mail,
|
||||||
|
manifestData: []manifestInfo{
|
||||||
|
newManifestInfo(
|
||||||
|
testID2,
|
||||||
|
testT2,
|
||||||
|
testCompleteMan,
|
||||||
|
testBackup2,
|
||||||
|
nil,
|
||||||
|
testMail,
|
||||||
|
testUser1,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedBaseReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistManifestReasons: map[int][]identity.Reasoner{
|
||||||
|
0: testUser1Mail,
|
||||||
|
},
|
||||||
|
expectedAssistReasons: map[int][]identity.Reasoner{},
|
||||||
|
backupData: []backupInfo{
|
||||||
|
newBackupModel(testBackup2, true, true, false, nil, nil),
|
||||||
|
newBackupModel(
|
||||||
|
testBackup1,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
map[string]string{model.BackupTypeTag: model.MergeBackup},
|
||||||
|
nil),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -730,6 +1093,12 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
|
|||||||
bb.Backups(),
|
bb.Backups(),
|
||||||
test.backupData,
|
test.backupData,
|
||||||
test.expectedBaseReasons)
|
test.expectedBaseReasons)
|
||||||
|
checkBackupEntriesMatch(
|
||||||
|
t,
|
||||||
|
bb.AssistBackups(),
|
||||||
|
test.backupData,
|
||||||
|
test.expectedAssistReasons)
|
||||||
|
|
||||||
checkManifestEntriesMatch(
|
checkManifestEntriesMatch(
|
||||||
t,
|
t,
|
||||||
bb.MergeBases(),
|
bb.MergeBases(),
|
||||||
@ -759,22 +1128,22 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
backupData := []backupInfo{
|
backupData := []backupInfo{
|
||||||
newBackupModel(testBackup1, true, true, false, nil),
|
newBackupModel(testBackup1, true, true, false, nil, nil),
|
||||||
}
|
}
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
input []Reasoner
|
input []identity.Reasoner
|
||||||
tags map[string]string
|
tags map[string]string
|
||||||
// Use this to denote which manifests in data should be expected. Allows
|
// Use this to denote which manifests in data should be expected. Allows
|
||||||
// defining data in a table while not repeating things between data and
|
// defining data in a table while not repeating things between data and
|
||||||
// expected.
|
// expected.
|
||||||
expectedIdxs map[int][]Reasoner
|
expectedIdxs map[int][]identity.Reasoner
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no tags specified",
|
name: "no tags specified",
|
||||||
tags: nil,
|
tags: nil,
|
||||||
expectedIdxs: map[int][]Reasoner{
|
expectedIdxs: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -784,14 +1153,14 @@ func (suite *BaseFinderUnitSuite) TestFindBases_CustomTags() {
|
|||||||
"fnords": "",
|
"fnords": "",
|
||||||
"smarf": "",
|
"smarf": "",
|
||||||
},
|
},
|
||||||
expectedIdxs: map[int][]Reasoner{
|
expectedIdxs: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "subset of custom tags",
|
name: "subset of custom tags",
|
||||||
tags: map[string]string{"fnords": ""},
|
tags: map[string]string{"fnords": ""},
|
||||||
expectedIdxs: map[int][]Reasoner{
|
expectedIdxs: map[int][]identity.Reasoner{
|
||||||
0: testUser1Mail,
|
0: testUser1Mail,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -832,7 +1201,7 @@ func checkManifestEntriesMatch(
|
|||||||
t *testing.T,
|
t *testing.T,
|
||||||
retSnaps []ManifestEntry,
|
retSnaps []ManifestEntry,
|
||||||
allExpected []manifestInfo,
|
allExpected []manifestInfo,
|
||||||
expectedIdxsAndReasons map[int][]Reasoner,
|
expectedIdxsAndReasons map[int][]identity.Reasoner,
|
||||||
) {
|
) {
|
||||||
// Check the proper snapshot manifests were returned.
|
// Check the proper snapshot manifests were returned.
|
||||||
expected := make([]*snapshot.Manifest, 0, len(expectedIdxsAndReasons))
|
expected := make([]*snapshot.Manifest, 0, len(expectedIdxsAndReasons))
|
||||||
@ -848,7 +1217,7 @@ func checkManifestEntriesMatch(
|
|||||||
assert.ElementsMatch(t, expected, got)
|
assert.ElementsMatch(t, expected, got)
|
||||||
|
|
||||||
// Check the reasons for selecting each manifest are correct.
|
// Check the reasons for selecting each manifest are correct.
|
||||||
expectedReasons := make(map[manifest.ID][]Reasoner, len(expectedIdxsAndReasons))
|
expectedReasons := make(map[manifest.ID][]identity.Reasoner, len(expectedIdxsAndReasons))
|
||||||
for idx, reasons := range expectedIdxsAndReasons {
|
for idx, reasons := range expectedIdxsAndReasons {
|
||||||
expectedReasons[allExpected[idx].man.ID] = reasons
|
expectedReasons[allExpected[idx].man.ID] = reasons
|
||||||
}
|
}
|
||||||
@ -874,7 +1243,7 @@ func checkBackupEntriesMatch(
|
|||||||
t *testing.T,
|
t *testing.T,
|
||||||
retBups []BackupEntry,
|
retBups []BackupEntry,
|
||||||
allExpected []backupInfo,
|
allExpected []backupInfo,
|
||||||
expectedIdxsAndReasons map[int][]Reasoner,
|
expectedIdxsAndReasons map[int][]identity.Reasoner,
|
||||||
) {
|
) {
|
||||||
// Check the proper snapshot manifests were returned.
|
// Check the proper snapshot manifests were returned.
|
||||||
expected := make([]*backup.Backup, 0, len(expectedIdxsAndReasons))
|
expected := make([]*backup.Backup, 0, len(expectedIdxsAndReasons))
|
||||||
@ -890,7 +1259,7 @@ func checkBackupEntriesMatch(
|
|||||||
assert.ElementsMatch(t, expected, got)
|
assert.ElementsMatch(t, expected, got)
|
||||||
|
|
||||||
// Check the reasons for selecting each manifest are correct.
|
// Check the reasons for selecting each manifest are correct.
|
||||||
expectedReasons := make(map[model.StableID][]Reasoner, len(expectedIdxsAndReasons))
|
expectedReasons := make(map[model.StableID][]identity.Reasoner, len(expectedIdxsAndReasons))
|
||||||
for idx, reasons := range expectedIdxsAndReasons {
|
for idx, reasons := range expectedIdxsAndReasons {
|
||||||
expectedReasons[allExpected[idx].b.ID] = reasons
|
expectedReasons[allExpected[idx].b.ID] = reasons
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,7 +14,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -15,7 +16,7 @@ type (
|
|||||||
BackupConsumer interface {
|
BackupConsumer interface {
|
||||||
ConsumeBackupCollections(
|
ConsumeBackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupReasons []kopia.Reasoner,
|
backupReasons []identity.Reasoner,
|
||||||
bases kopia.BackupBases,
|
bases kopia.BackupBases,
|
||||||
cs []data.BackupCollection,
|
cs []data.BackupCollection,
|
||||||
pmr prefixmatcher.StringSetReader,
|
pmr prefixmatcher.StringSetReader,
|
||||||
@ -38,7 +39,7 @@ type (
|
|||||||
BaseFinder interface {
|
BaseFinder interface {
|
||||||
FindBases(
|
FindBases(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
reasons []kopia.Reasoner,
|
reasons []identity.Reasoner,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) kopia.BackupBases
|
) kopia.BackupBases
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
"github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
|||||||
@ -1,6 +1,8 @@
|
|||||||
package kopia
|
package kopia
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
@ -12,14 +14,11 @@ type DetailsMergeInfoer interface {
|
|||||||
// ItemsToMerge returns the number of items that need to be merged.
|
// ItemsToMerge returns the number of items that need to be merged.
|
||||||
ItemsToMerge() int
|
ItemsToMerge() int
|
||||||
// GetNewPathRefs takes the old RepoRef and old LocationRef of an item and
|
// GetNewPathRefs takes the old RepoRef and old LocationRef of an item and
|
||||||
// returns the new RepoRef, a prefix of the old LocationRef to replace, and
|
// returns the new RepoRef and the new location of the item the item. If the
|
||||||
// the new LocationRefPrefix of the item if the item should be merged. If the
|
|
||||||
// item shouldn't be merged nils are returned.
|
// item shouldn't be merged nils are returned.
|
||||||
//
|
|
||||||
// If the returned old LocationRef prefix is equal to the old LocationRef then
|
|
||||||
// the entire LocationRef should be replaced with the returned value.
|
|
||||||
GetNewPathRefs(
|
GetNewPathRefs(
|
||||||
oldRef *path.Builder,
|
oldRef *path.Builder,
|
||||||
|
modTime time.Time,
|
||||||
oldLoc details.LocationIDer,
|
oldLoc details.LocationIDer,
|
||||||
) (path.Path, *path.Builder, error)
|
) (path.Path, *path.Builder, error)
|
||||||
}
|
}
|
||||||
@ -27,6 +26,7 @@ type DetailsMergeInfoer interface {
|
|||||||
type prevRef struct {
|
type prevRef struct {
|
||||||
repoRef path.Path
|
repoRef path.Path
|
||||||
locRef *path.Builder
|
locRef *path.Builder
|
||||||
|
modTime *time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type mergeDetails struct {
|
type mergeDetails struct {
|
||||||
@ -42,8 +42,12 @@ func (m *mergeDetails) ItemsToMerge() int {
|
|||||||
return len(m.repoRefs)
|
return len(m.repoRefs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addRepoRef adds an entry in mergeDetails that can be looked up later. If
|
||||||
|
// modTime is non-nil then it's checked during lookup. If it is nil then the
|
||||||
|
// mod time provided during lookup is ignored.
|
||||||
func (m *mergeDetails) addRepoRef(
|
func (m *mergeDetails) addRepoRef(
|
||||||
oldRef *path.Builder,
|
oldRef *path.Builder,
|
||||||
|
modTime *time.Time,
|
||||||
newRef path.Path,
|
newRef path.Path,
|
||||||
newLocRef *path.Builder,
|
newLocRef *path.Builder,
|
||||||
) error {
|
) error {
|
||||||
@ -58,6 +62,7 @@ func (m *mergeDetails) addRepoRef(
|
|||||||
pr := prevRef{
|
pr := prevRef{
|
||||||
repoRef: newRef,
|
repoRef: newRef,
|
||||||
locRef: newLocRef,
|
locRef: newLocRef,
|
||||||
|
modTime: modTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
m.repoRefs[oldRef.ShortRef()] = pr
|
m.repoRefs[oldRef.ShortRef()] = pr
|
||||||
@ -67,6 +72,7 @@ func (m *mergeDetails) addRepoRef(
|
|||||||
|
|
||||||
func (m *mergeDetails) GetNewPathRefs(
|
func (m *mergeDetails) GetNewPathRefs(
|
||||||
oldRef *path.Builder,
|
oldRef *path.Builder,
|
||||||
|
modTime time.Time,
|
||||||
oldLoc details.LocationIDer,
|
oldLoc details.LocationIDer,
|
||||||
) (path.Path, *path.Builder, error) {
|
) (path.Path, *path.Builder, error) {
|
||||||
pr, ok := m.repoRefs[oldRef.ShortRef()]
|
pr, ok := m.repoRefs[oldRef.ShortRef()]
|
||||||
@ -74,6 +80,14 @@ func (m *mergeDetails) GetNewPathRefs(
|
|||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ModTimes don't match which means we're attempting to merge a different
|
||||||
|
// version of the item (i.e. an older version from an assist base). We
|
||||||
|
// shouldn't return a match because it could cause us to source out-of-date
|
||||||
|
// details for the item.
|
||||||
|
if pr.modTime != nil && !pr.modTime.Equal(modTime) {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// This was a location specified directly by a collection.
|
// This was a location specified directly by a collection.
|
||||||
if pr.locRef != nil {
|
if pr.locRef != nil {
|
||||||
return pr.repoRef, pr.locRef, nil
|
return pr.repoRef, pr.locRef, nil
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package kopia
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -47,10 +48,10 @@ func (suite *DetailsMergeInfoerUnitSuite) TestAddRepoRef_DuplicateFails() {
|
|||||||
|
|
||||||
dm := newMergeDetails()
|
dm := newMergeDetails()
|
||||||
|
|
||||||
err := dm.addRepoRef(oldRef1.ToBuilder(), oldRef1, nil)
|
err := dm.addRepoRef(oldRef1.ToBuilder(), nil, oldRef1, nil)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = dm.addRepoRef(oldRef1.ToBuilder(), oldRef1, nil)
|
err = dm.addRepoRef(oldRef1.ToBuilder(), nil, oldRef1, nil)
|
||||||
require.Error(t, err, clues.ToCore(err))
|
require.Error(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,6 +59,10 @@ func (suite *DetailsMergeInfoerUnitSuite) TestAddRepoRef_DuplicateFails() {
|
|||||||
// for stored RepoRefs.
|
// for stored RepoRefs.
|
||||||
func (suite *DetailsMergeInfoerUnitSuite) TestGetNewPathRefs() {
|
func (suite *DetailsMergeInfoerUnitSuite) TestGetNewPathRefs() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
|
t1 := time.Now()
|
||||||
|
t2 := t1.Add(time.Second * 30)
|
||||||
|
|
||||||
oldRef1 := makePath(
|
oldRef1 := makePath(
|
||||||
t,
|
t,
|
||||||
[]string{
|
[]string{
|
||||||
@ -110,10 +115,13 @@ func (suite *DetailsMergeInfoerUnitSuite) TestGetNewPathRefs() {
|
|||||||
|
|
||||||
dm := newMergeDetails()
|
dm := newMergeDetails()
|
||||||
|
|
||||||
err := dm.addRepoRef(oldRef1.ToBuilder(), newRef1, newLoc1)
|
err := dm.addRepoRef(oldRef1.ToBuilder(), &t1, newRef1, newLoc1)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = dm.addRepoRef(oldRef2.ToBuilder(), newRef2, nil)
|
err = dm.addRepoRef(oldRef2.ToBuilder(), &t2, newRef2, nil)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
err = dm.addRepoRef(newRef1.ToBuilder(), nil, oldRef1, oldLoc1)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// Add prefix matcher entry.
|
// Add prefix matcher entry.
|
||||||
@ -121,58 +129,89 @@ func (suite *DetailsMergeInfoerUnitSuite) TestGetNewPathRefs() {
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
searchRef *path.Builder
|
searchRef *path.Builder
|
||||||
searchLoc mockLocationIDer
|
searchModTime time.Time
|
||||||
errCheck require.ErrorAssertionFunc
|
searchLoc mockLocationIDer
|
||||||
expectedRef path.Path
|
errCheck require.ErrorAssertionFunc
|
||||||
expectedLoc *path.Builder
|
expectFound bool
|
||||||
|
expectedRef path.Path
|
||||||
|
expectedLoc *path.Builder
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Exact Match With Loc",
|
name: "Exact Match With Loc",
|
||||||
searchRef: oldRef1.ToBuilder(),
|
searchRef: oldRef1.ToBuilder(),
|
||||||
searchLoc: searchLoc1,
|
searchModTime: t1,
|
||||||
errCheck: require.NoError,
|
searchLoc: searchLoc1,
|
||||||
expectedRef: newRef1,
|
errCheck: require.NoError,
|
||||||
expectedLoc: newLoc1,
|
expectFound: true,
|
||||||
|
expectedRef: newRef1,
|
||||||
|
expectedLoc: newLoc1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Exact Match Without Loc",
|
name: "Exact Match Without Loc",
|
||||||
searchRef: oldRef1.ToBuilder(),
|
searchRef: oldRef1.ToBuilder(),
|
||||||
errCheck: require.NoError,
|
searchModTime: t1,
|
||||||
expectedRef: newRef1,
|
errCheck: require.NoError,
|
||||||
expectedLoc: newLoc1,
|
expectFound: true,
|
||||||
|
expectedRef: newRef1,
|
||||||
|
expectedLoc: newLoc1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Prefix Match",
|
name: "Exact Match Without Loc ModTime Not In Merger",
|
||||||
searchRef: oldRef2.ToBuilder(),
|
searchRef: newRef1.ToBuilder(),
|
||||||
searchLoc: searchLoc2,
|
searchModTime: time.Now(),
|
||||||
errCheck: require.NoError,
|
errCheck: require.NoError,
|
||||||
expectedRef: newRef2,
|
expectFound: true,
|
||||||
expectedLoc: newLoc2,
|
expectedRef: oldRef1,
|
||||||
|
expectedLoc: oldLoc1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Would Be Prefix Match Without Old Loc Errors",
|
name: "Prefix Match",
|
||||||
searchRef: oldRef2.ToBuilder(),
|
searchRef: oldRef2.ToBuilder(),
|
||||||
errCheck: require.Error,
|
searchModTime: t2,
|
||||||
|
searchLoc: searchLoc2,
|
||||||
|
errCheck: require.NoError,
|
||||||
|
expectFound: true,
|
||||||
|
expectedRef: newRef2,
|
||||||
|
expectedLoc: newLoc2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Not Found With Old Loc",
|
name: "Would Be Prefix Match Without Old Loc Errors",
|
||||||
searchRef: newRef1.ToBuilder(),
|
searchRef: oldRef2.ToBuilder(),
|
||||||
searchLoc: searchLoc2,
|
searchModTime: t2,
|
||||||
errCheck: require.NoError,
|
errCheck: require.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Not Found Without Old Loc",
|
name: "Not Found With Old Loc",
|
||||||
searchRef: newRef1.ToBuilder(),
|
searchRef: newRef2.ToBuilder(),
|
||||||
errCheck: require.NoError,
|
searchModTime: t1,
|
||||||
|
searchLoc: searchLoc2,
|
||||||
|
errCheck: require.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Not Found Without Old Loc",
|
||||||
|
searchRef: newRef2.ToBuilder(),
|
||||||
|
searchModTime: t1,
|
||||||
|
errCheck: require.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Not Found Due To Mod Time",
|
||||||
|
searchRef: oldRef1.ToBuilder(),
|
||||||
|
searchModTime: time.Now(),
|
||||||
|
searchLoc: searchLoc1,
|
||||||
|
errCheck: require.NoError,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
newRef, newLoc, err := dm.GetNewPathRefs(test.searchRef, test.searchLoc)
|
newRef, newLoc, err := dm.GetNewPathRefs(
|
||||||
|
test.searchRef,
|
||||||
|
test.searchModTime,
|
||||||
|
test.searchLoc)
|
||||||
test.errCheck(t, err, clues.ToCore(err))
|
test.errCheck(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Equal(t, test.expectedRef, newRef, "RepoRef")
|
assert.Equal(t, test.expectedRef, newRef, "RepoRef")
|
||||||
|
|||||||
@ -14,13 +14,17 @@ func AssertBackupBasesEqual(t *testing.T, expect, got BackupBases) {
|
|||||||
if expect == nil {
|
if expect == nil {
|
||||||
assert.Empty(t, got.Backups(), "backups")
|
assert.Empty(t, got.Backups(), "backups")
|
||||||
assert.Empty(t, got.MergeBases(), "merge bases")
|
assert.Empty(t, got.MergeBases(), "merge bases")
|
||||||
|
assert.Empty(t, got.AssistBackups(), "assist backups")
|
||||||
assert.Empty(t, got.AssistBases(), "assist bases")
|
assert.Empty(t, got.AssistBases(), "assist bases")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if got == nil {
|
if got == nil {
|
||||||
if len(expect.Backups()) > 0 && len(expect.MergeBases()) > 0 && len(expect.AssistBases()) > 0 {
|
if len(expect.Backups()) > 0 &&
|
||||||
|
len(expect.MergeBases()) > 0 &&
|
||||||
|
len(expect.AssistBackups()) > 0 &&
|
||||||
|
len(expect.AssistBases()) > 0 {
|
||||||
assert.Fail(t, "got was nil but expected non-nil result %v", expect)
|
assert.Fail(t, "got was nil but expected non-nil result %v", expect)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -29,6 +33,7 @@ func AssertBackupBasesEqual(t *testing.T, expect, got BackupBases) {
|
|||||||
|
|
||||||
assert.ElementsMatch(t, expect.Backups(), got.Backups(), "backups")
|
assert.ElementsMatch(t, expect.Backups(), got.Backups(), "backups")
|
||||||
assert.ElementsMatch(t, expect.MergeBases(), got.MergeBases(), "merge bases")
|
assert.ElementsMatch(t, expect.MergeBases(), got.MergeBases(), "merge bases")
|
||||||
|
assert.ElementsMatch(t, expect.AssistBackups(), got.AssistBackups(), "assist backups")
|
||||||
assert.ElementsMatch(t, expect.AssistBases(), got.AssistBases(), "assist bases")
|
assert.ElementsMatch(t, expect.AssistBases(), got.AssistBases(), "assist bases")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,6 +57,11 @@ func (bb *MockBackupBases) WithMergeBases(m ...ManifestEntry) *MockBackupBases {
|
|||||||
return bb
|
return bb
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bb *MockBackupBases) WithAssistBackups(b ...BackupEntry) *MockBackupBases {
|
||||||
|
bb.backupBases.assistBackups = append(bb.AssistBackups(), b...)
|
||||||
|
return bb
|
||||||
|
}
|
||||||
|
|
||||||
func (bb *MockBackupBases) WithAssistBases(m ...ManifestEntry) *MockBackupBases {
|
func (bb *MockBackupBases) WithAssistBases(m ...ManifestEntry) *MockBackupBases {
|
||||||
bb.backupBases.assistBases = append(bb.AssistBases(), m...)
|
bb.backupBases.assistBases = append(bb.AssistBases(), m...)
|
||||||
return bb
|
return bb
|
||||||
|
|||||||
@ -23,6 +23,7 @@ import (
|
|||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
@ -137,6 +138,7 @@ type itemDetails struct {
|
|||||||
prevPath path.Path
|
prevPath path.Path
|
||||||
locationPath *path.Builder
|
locationPath *path.Builder
|
||||||
cached bool
|
cached bool
|
||||||
|
modTime *time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type corsoProgress struct {
|
type corsoProgress struct {
|
||||||
@ -148,9 +150,11 @@ type corsoProgress struct {
|
|||||||
|
|
||||||
snapshotfs.UploadProgress
|
snapshotfs.UploadProgress
|
||||||
pending map[string]*itemDetails
|
pending map[string]*itemDetails
|
||||||
deets *details.Builder
|
// deets contains entries that are complete and don't need merged with base
|
||||||
// toMerge represents items that we don't have in-memory item info for. The
|
// backup data at all.
|
||||||
// item info for these items should be sourced from a base snapshot later on.
|
deets *details.Builder
|
||||||
|
// toMerge represents items that we either don't have in-memory item info or
|
||||||
|
// that need sourced from a base backup due to caching etc.
|
||||||
toMerge *mergeDetails
|
toMerge *mergeDetails
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
totalBytes int64
|
totalBytes int64
|
||||||
@ -194,7 +198,7 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
|||||||
|
|
||||||
// These items were sourced from a base snapshot or were cached in kopia so we
|
// These items were sourced from a base snapshot or were cached in kopia so we
|
||||||
// never had to materialize their details in-memory.
|
// never had to materialize their details in-memory.
|
||||||
if d.info == nil {
|
if d.info == nil || d.cached {
|
||||||
if d.prevPath == nil {
|
if d.prevPath == nil {
|
||||||
cp.errs.AddRecoverable(cp.ctx, clues.New("item sourced from previous backup with no previous path").
|
cp.errs.AddRecoverable(cp.ctx, clues.New("item sourced from previous backup with no previous path").
|
||||||
With(
|
With(
|
||||||
@ -208,7 +212,11 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
|||||||
cp.mu.Lock()
|
cp.mu.Lock()
|
||||||
defer cp.mu.Unlock()
|
defer cp.mu.Unlock()
|
||||||
|
|
||||||
err := cp.toMerge.addRepoRef(d.prevPath.ToBuilder(), d.repoPath, d.locationPath)
|
err := cp.toMerge.addRepoRef(
|
||||||
|
d.prevPath.ToBuilder(),
|
||||||
|
d.modTime,
|
||||||
|
d.repoPath,
|
||||||
|
d.locationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "adding item to merge list").
|
cp.errs.AddRecoverable(cp.ctx, clues.Wrap(err, "adding item to merge list").
|
||||||
With(
|
With(
|
||||||
@ -375,6 +383,11 @@ func collectionEntries(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
modTime := time.Now()
|
||||||
|
if smt, ok := e.(data.StreamModTime); ok {
|
||||||
|
modTime = smt.ModTime()
|
||||||
|
}
|
||||||
|
|
||||||
// Not all items implement StreamInfo. For example, the metadata files
|
// Not all items implement StreamInfo. For example, the metadata files
|
||||||
// do not because they don't contain information directly backed up or
|
// do not because they don't contain information directly backed up or
|
||||||
// used for restore. If progress does not contain information about a
|
// used for restore. If progress does not contain information about a
|
||||||
@ -391,18 +404,22 @@ func collectionEntries(
|
|||||||
// info nil.
|
// info nil.
|
||||||
itemInfo := ei.Info()
|
itemInfo := ei.Info()
|
||||||
d := &itemDetails{
|
d := &itemDetails{
|
||||||
info: &itemInfo,
|
info: &itemInfo,
|
||||||
repoPath: itemPath,
|
repoPath: itemPath,
|
||||||
|
// Also use the current path as the previous path for this item. This
|
||||||
|
// is so that if the item is marked as cached and we need to merge
|
||||||
|
// details with an assist backup base which sourced the cached item we
|
||||||
|
// can find it with the lookup in DetailsMergeInfoer.
|
||||||
|
//
|
||||||
|
// This all works out because cached item checks in kopia are direct
|
||||||
|
// path + metadata comparisons.
|
||||||
|
prevPath: itemPath,
|
||||||
locationPath: locationPath,
|
locationPath: locationPath,
|
||||||
|
modTime: &modTime,
|
||||||
}
|
}
|
||||||
progress.put(encodeAsPath(itemPath.PopFront().Elements()...), d)
|
progress.put(encodeAsPath(itemPath.PopFront().Elements()...), d)
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime := time.Now()
|
|
||||||
if smt, ok := e.(data.StreamModTime); ok {
|
|
||||||
modTime = smt.ModTime()
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodedName,
|
encodedName,
|
||||||
modTime,
|
modTime,
|
||||||
@ -508,6 +525,7 @@ func streamBaseEntries(
|
|||||||
repoPath: itemPath,
|
repoPath: itemPath,
|
||||||
prevPath: prevItemPath,
|
prevPath: prevItemPath,
|
||||||
locationPath: locationPath,
|
locationPath: locationPath,
|
||||||
|
modTime: ptr.To(entry.ModTime()),
|
||||||
}
|
}
|
||||||
progress.put(encodeAsPath(itemPath.PopFront().Elements()...), d)
|
progress.put(encodeAsPath(itemPath.PopFront().Elements()...), d)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,9 +21,10 @@ import (
|
|||||||
|
|
||||||
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -386,7 +387,9 @@ var finishedFileTable = []struct {
|
|||||||
cachedItems func(fname string, fpath path.Path) map[string]testInfo
|
cachedItems func(fname string, fpath path.Path) map[string]testInfo
|
||||||
expectedBytes int64
|
expectedBytes int64
|
||||||
expectedNumEntries int
|
expectedNumEntries int
|
||||||
err error
|
// Non-folder items.
|
||||||
|
expectedNumItems int
|
||||||
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "DetailsExist",
|
name: "DetailsExist",
|
||||||
@ -410,6 +413,7 @@ var finishedFileTable = []struct {
|
|||||||
expectedBytes: 100,
|
expectedBytes: 100,
|
||||||
// 1 file and 5 folders.
|
// 1 file and 5 folders.
|
||||||
expectedNumEntries: 2,
|
expectedNumEntries: 2,
|
||||||
|
expectedNumItems: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "PendingNoDetails",
|
name: "PendingNoDetails",
|
||||||
@ -453,16 +457,34 @@ var finishedFileTable = []struct {
|
|||||||
|
|
||||||
func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
cached bool
|
cached bool
|
||||||
|
differentPrevPath bool
|
||||||
|
dropInfo bool
|
||||||
|
expectToMergeEntries bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "all updated",
|
name: "all updated",
|
||||||
cached: false,
|
cached: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "all cached",
|
name: "all cached from assist base",
|
||||||
cached: true,
|
cached: true,
|
||||||
|
expectToMergeEntries: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all cached from merge base",
|
||||||
|
cached: true,
|
||||||
|
differentPrevPath: true,
|
||||||
|
dropInfo: true,
|
||||||
|
expectToMergeEntries: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all not cached from merge base",
|
||||||
|
cached: false,
|
||||||
|
differentPrevPath: true,
|
||||||
|
dropInfo: true,
|
||||||
|
expectToMergeEntries: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -480,6 +502,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
|||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||||
deets: bd,
|
deets: bd,
|
||||||
|
toMerge: newMergeDetails(),
|
||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
errs: fault.New(true),
|
errs: fault.New(true),
|
||||||
}
|
}
|
||||||
@ -487,6 +510,29 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
|||||||
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
||||||
|
|
||||||
for k, v := range ci {
|
for k, v := range ci {
|
||||||
|
if v.info != nil {
|
||||||
|
v.info.prevPath = v.info.repoPath
|
||||||
|
|
||||||
|
if cachedTest.differentPrevPath {
|
||||||
|
// Doesn't really matter how we change the path as long as it's
|
||||||
|
// different somehow.
|
||||||
|
p, err := path.FromDataLayerPath(
|
||||||
|
suite.targetFilePath.String()+"2",
|
||||||
|
true)
|
||||||
|
require.NoError(
|
||||||
|
t,
|
||||||
|
err,
|
||||||
|
"making prevPath: %v",
|
||||||
|
clues.ToCore(err))
|
||||||
|
|
||||||
|
v.info.prevPath = p
|
||||||
|
}
|
||||||
|
|
||||||
|
if cachedTest.dropInfo {
|
||||||
|
v.info.info = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cp.put(k, v.info)
|
cp.put(k, v.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -509,6 +555,17 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
|||||||
assert.Empty(t, cp.pending)
|
assert.Empty(t, cp.pending)
|
||||||
|
|
||||||
entries := bd.Details().Entries
|
entries := bd.Details().Entries
|
||||||
|
|
||||||
|
if cachedTest.expectToMergeEntries {
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
test.expectedNumItems,
|
||||||
|
cp.toMerge.ItemsToMerge(),
|
||||||
|
"merge entries")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
assert.Len(t, entries, test.expectedNumEntries)
|
assert.Len(t, entries, test.expectedNumEntries)
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
@ -616,7 +673,10 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
|||||||
assert.Empty(t, cp.deets)
|
assert.Empty(t, cp.deets)
|
||||||
|
|
||||||
for _, expected := range expectedToMerge {
|
for _, expected := range expectedToMerge {
|
||||||
gotRef, _, _ := cp.toMerge.GetNewPathRefs(expected.oldRef, nil)
|
gotRef, _, _ := cp.toMerge.GetNewPathRefs(
|
||||||
|
expected.oldRef,
|
||||||
|
time.Now(),
|
||||||
|
nil)
|
||||||
if !assert.NotNil(t, gotRef) {
|
if !assert.NotNil(t, gotRef) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -951,7 +1011,7 @@ func makeManifestEntry(
|
|||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
categories ...path.CategoryType,
|
categories ...path.CategoryType,
|
||||||
) ManifestEntry {
|
) ManifestEntry {
|
||||||
var reasons []Reasoner
|
var reasons []identity.Reasoner
|
||||||
|
|
||||||
for _, c := range categories {
|
for _, c := range categories {
|
||||||
reasons = append(reasons, NewReason(tenant, resourceOwner, service, c))
|
reasons = append(reasons, NewReason(tenant, resourceOwner, service, c))
|
||||||
|
|||||||
@ -23,6 +23,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/internal/stats"
|
"github.com/alcionai/corso/src/internal/stats"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -137,7 +138,7 @@ func (w *Wrapper) Close(ctx context.Context) error {
|
|||||||
// complete backup of all data.
|
// complete backup of all data.
|
||||||
func (w Wrapper) ConsumeBackupCollections(
|
func (w Wrapper) ConsumeBackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupReasons []Reasoner,
|
backupReasons []identity.Reasoner,
|
||||||
bases BackupBases,
|
bases BackupBases,
|
||||||
collections []data.BackupCollection,
|
collections []data.BackupCollection,
|
||||||
globalExcludeSet prefixmatcher.StringSetReader,
|
globalExcludeSet prefixmatcher.StringSetReader,
|
||||||
|
|||||||
@ -25,10 +25,11 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/data/mock"
|
"github.com/alcionai/corso/src/internal/data/mock"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/control/repository"
|
"github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -800,7 +801,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
|||||||
"brunhilda": "",
|
"brunhilda": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
reasons := []Reasoner{
|
reasons := []identity.Reasoner{
|
||||||
NewReason(
|
NewReason(
|
||||||
testTenant,
|
testTenant,
|
||||||
suite.storePath1.ResourceOwner(),
|
suite.storePath1.ResourceOwner(),
|
||||||
@ -964,9 +965,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
|||||||
collections: collections,
|
collections: collections,
|
||||||
expectedUploadedFiles: 0,
|
expectedUploadedFiles: 0,
|
||||||
expectedCachedFiles: 47,
|
expectedCachedFiles: 47,
|
||||||
deetsUpdated: assert.False,
|
// Entries go to details merger since cached files are merged too.
|
||||||
hashedBytesCheck: assert.Zero,
|
expectMerge: true,
|
||||||
uploadedBytes: []int64{4000, 6000},
|
deetsUpdated: assert.False,
|
||||||
|
hashedBytesCheck: assert.Zero,
|
||||||
|
uploadedBytes: []int64{4000, 6000},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Kopia Assist And Merge No Files Changed",
|
name: "Kopia Assist And Merge No Files Changed",
|
||||||
@ -998,6 +1001,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
|||||||
collections: collections,
|
collections: collections,
|
||||||
expectedUploadedFiles: 0,
|
expectedUploadedFiles: 0,
|
||||||
expectedCachedFiles: 47,
|
expectedCachedFiles: 47,
|
||||||
|
expectMerge: true,
|
||||||
deetsUpdated: assert.False,
|
deetsUpdated: assert.False,
|
||||||
hashedBytesCheck: assert.Zero,
|
hashedBytesCheck: assert.Zero,
|
||||||
uploadedBytes: []int64{4000, 6000},
|
uploadedBytes: []int64{4000, 6000},
|
||||||
@ -1072,7 +1076,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_NoDetailsForMeta() {
|
|||||||
"brunhilda": "",
|
"brunhilda": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
reasons := []Reasoner{
|
reasons := []identity.Reasoner{
|
||||||
NewReason(
|
NewReason(
|
||||||
testTenant,
|
testTenant,
|
||||||
storePath.ResourceOwner(),
|
storePath.ResourceOwner(),
|
||||||
@ -1267,7 +1271,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
|
|
||||||
stats, _, _, err := w.ConsumeBackupCollections(
|
stats, _, _, err := w.ConsumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
[]Reasoner{r},
|
[]identity.Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
[]data.BackupCollection{dc1, dc2},
|
[]data.BackupCollection{dc1, dc2},
|
||||||
nil,
|
nil,
|
||||||
@ -1296,6 +1300,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
testForFiles(t, ctx, expected, result)
|
testForFiles(t, ctx, expected, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(pandeyabs): Switch to m365/mock/BackupCollection.
|
||||||
type mockBackupCollection struct {
|
type mockBackupCollection struct {
|
||||||
path path.Path
|
path path.Path
|
||||||
loc *path.Builder
|
loc *path.Builder
|
||||||
@ -1385,7 +1390,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
|
|
||||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
[]Reasoner{r},
|
[]identity.Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
collections,
|
collections,
|
||||||
nil,
|
nil,
|
||||||
@ -1618,7 +1623,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
|
|
||||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
[]Reasoner{r},
|
[]identity.Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
collections,
|
collections,
|
||||||
nil,
|
nil,
|
||||||
@ -1745,11 +1750,11 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
|
|
||||||
stats, _, _, err := suite.w.ConsumeBackupCollections(
|
stats, _, _, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
[]Reasoner{r},
|
[]identity.Reasoner{r},
|
||||||
NewMockBackupBases().WithMergeBases(
|
NewMockBackupBases().WithMergeBases(
|
||||||
ManifestEntry{
|
ManifestEntry{
|
||||||
Manifest: man,
|
Manifest: man,
|
||||||
Reasons: []Reasoner{r},
|
Reasons: []identity.Reasoner{r},
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
test.cols(),
|
test.cols(),
|
||||||
|
|||||||
@ -8,10 +8,10 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
"github.com/alcionai/corso/src/internal/m365/service/exchange"
|
||||||
"github.com/alcionai/corso/src/internal/m365/sharepoint"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/service/sharepoint"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/filters"
|
"github.com/alcionai/corso/src/pkg/filters"
|
||||||
@ -110,7 +110,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
ctrl.credentials,
|
ctrl.credentials,
|
||||||
ctrl,
|
ctrl.UpdateStatus,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
|
|||||||
@ -11,9 +11,9 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||||
"github.com/alcionai/corso/src/internal/m365/sharepoint"
|
"github.com/alcionai/corso/src/internal/m365/service/exchange"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/service/sharepoint"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
@ -307,7 +307,7 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
|||||||
bpc,
|
bpc,
|
||||||
suite.ac,
|
suite.ac,
|
||||||
ctrl.credentials,
|
ctrl.credentials,
|
||||||
ctrl,
|
ctrl.UpdateStatus,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
// Package onedrive provides support for retrieving M365 OneDrive objects
|
// Package drive provides support for retrieving M365 Drive objects
|
||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -15,8 +15,8 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -512,10 +512,14 @@ func (oc *Collection) populateDriveItem(
|
|||||||
metaSuffix = metadata.DirMetaFileSuffix
|
metaSuffix = metadata.DirMetaFileSuffix
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch metadata for the file
|
// Fetch metadata for the item
|
||||||
itemMeta, itemMetaSize, err = downloadItemMeta(ctx, oc.handler, oc.driveID, item)
|
itemMeta, itemMetaSize, err = downloadItemMeta(ctx, oc.handler, oc.driveID, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs.AddRecoverable(ctx, clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation))
|
// Skip deleted items
|
||||||
|
if !clues.HasLabel(err, graph.LabelStatus(http.StatusNotFound)) && !graph.IsErrDeletedInFlight(err) {
|
||||||
|
errs.AddRecoverable(ctx, clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation))
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -20,11 +20,11 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
|
metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||||
metaTD "github.com/alcionai/corso/src/internal/m365/onedrive/metadata/testdata"
|
odTD "github.com/alcionai/corso/src/internal/m365/service/onedrive/testdata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/mock"
|
|
||||||
odTD "github.com/alcionai/corso/src/internal/m365/onedrive/testdata"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -14,9 +14,9 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -17,10 +17,10 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/mock"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -8,14 +8,21 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/common/str"
|
"github.com/alcionai/corso/src/internal/common/str"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
acceptHeaderKey = "Accept"
|
||||||
|
acceptHeaderValue = "*/*"
|
||||||
|
)
|
||||||
|
|
||||||
// downloadUrlKeys is used to find the download URL in a DriveItem response.
|
// downloadUrlKeys is used to find the download URL in a DriveItem response.
|
||||||
var downloadURLKeys = []string{
|
var downloadURLKeys = []string{
|
||||||
"@microsoft.graph.downloadUrl",
|
"@microsoft.graph.downloadUrl",
|
||||||
@ -59,25 +66,42 @@ func downloadItem(
|
|||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func downloadFile(
|
type downloadWithRetries struct {
|
||||||
ctx context.Context,
|
getter api.Getter
|
||||||
ag api.Getter,
|
url string
|
||||||
url string,
|
}
|
||||||
) (io.ReadCloser, error) {
|
|
||||||
if len(url) == 0 {
|
|
||||||
return nil, clues.New("empty file url")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := ag.Get(ctx, url, nil)
|
func (dg *downloadWithRetries) SupportsRange() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dg *downloadWithRetries) Get(
|
||||||
|
ctx context.Context,
|
||||||
|
additionalHeaders map[string]string,
|
||||||
|
) (io.ReadCloser, error) {
|
||||||
|
headers := maps.Clone(additionalHeaders)
|
||||||
|
// Set the accept header like curl does. Local testing showed range headers
|
||||||
|
// wouldn't work without it (get 416 responses instead of 206).
|
||||||
|
headers[acceptHeaderKey] = acceptHeaderValue
|
||||||
|
|
||||||
|
resp, err := dg.getter.Get(ctx, dg.url, headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "getting file")
|
return nil, clues.Wrap(err, "getting file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if graph.IsMalwareResp(ctx, resp) {
|
if graph.IsMalwareResp(ctx, resp) {
|
||||||
|
if resp != nil && resp.Body != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
|
||||||
return nil, clues.New("malware detected").Label(graph.LabelsMalware)
|
return nil, clues.New("malware detected").Label(graph.LabelsMalware)
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil && (resp.StatusCode/100) != 2 {
|
if resp != nil && (resp.StatusCode/100) != 2 {
|
||||||
|
if resp.Body != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// upstream error checks can compare the status with
|
// upstream error checks can compare the status with
|
||||||
// clues.HasLabel(err, graph.LabelStatus(http.KnownStatusCode))
|
// clues.HasLabel(err, graph.LabelStatus(http.KnownStatusCode))
|
||||||
return nil, clues.
|
return nil, clues.
|
||||||
@ -88,6 +112,25 @@ func downloadFile(
|
|||||||
return resp.Body, nil
|
return resp.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func downloadFile(
|
||||||
|
ctx context.Context,
|
||||||
|
ag api.Getter,
|
||||||
|
url string,
|
||||||
|
) (io.ReadCloser, error) {
|
||||||
|
if len(url) == 0 {
|
||||||
|
return nil, clues.New("empty file url").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := readers.NewResetRetryHandler(
|
||||||
|
ctx,
|
||||||
|
&downloadWithRetries{
|
||||||
|
getter: ag,
|
||||||
|
url: url,
|
||||||
|
})
|
||||||
|
|
||||||
|
return rc, clues.Stack(err).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
func downloadItemMeta(
|
func downloadItemMeta(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
gip GetItemPermissioner,
|
gip GetItemPermissioner,
|
||||||
@ -1,14 +1,11 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -148,105 +145,3 @@ func newItem(name string, folder bool) *models.DriveItem {
|
|||||||
|
|
||||||
return itemToCreate
|
return itemToCreate
|
||||||
}
|
}
|
||||||
|
|
||||||
type Displayable struct {
|
|
||||||
models.DriveItemable
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *Displayable) GetDisplayName() *string {
|
|
||||||
return op.GetName()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllFolders returns all folders in all drives for the given user. If a
|
|
||||||
// prefix is given, returns all folders with that prefix, regardless of if they
|
|
||||||
// are a subfolder or top-level folder in the hierarchy.
|
|
||||||
func GetAllFolders(
|
|
||||||
ctx context.Context,
|
|
||||||
bh BackupHandler,
|
|
||||||
pager api.DrivePager,
|
|
||||||
prefix string,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) ([]*Displayable, error) {
|
|
||||||
ds, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.Wrap(err, "getting OneDrive folders")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
folders = map[string]*Displayable{}
|
|
||||||
el = errs.Local()
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, drive := range ds {
|
|
||||||
if el.Failure() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
id = ptr.Val(drive.GetId())
|
|
||||||
name = ptr.Val(drive.GetName())
|
|
||||||
)
|
|
||||||
|
|
||||||
ictx := clues.Add(ctx, "drive_id", id, "drive_name", clues.Hide(name))
|
|
||||||
collector := func(
|
|
||||||
_ context.Context,
|
|
||||||
_, _ string,
|
|
||||||
items []models.DriveItemable,
|
|
||||||
_ map[string]string,
|
|
||||||
_ map[string]string,
|
|
||||||
_ map[string]struct{},
|
|
||||||
_ map[string]map[string]string,
|
|
||||||
_ bool,
|
|
||||||
_ *fault.Bus,
|
|
||||||
) error {
|
|
||||||
for _, item := range items {
|
|
||||||
// Skip the root item.
|
|
||||||
if item.GetRoot() != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only selecting folders right now, not packages.
|
|
||||||
if item.GetFolder() == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
itemID := ptr.Val(item.GetId())
|
|
||||||
if len(itemID) == 0 {
|
|
||||||
logger.Ctx(ctx).Info("folder missing ID")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(ptr.Val(item.GetName()), prefix) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the item instead of the folder because the item has more
|
|
||||||
// functionality.
|
|
||||||
folders[itemID] = &Displayable{item}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err = collectItems(
|
|
||||||
ictx,
|
|
||||||
bh.NewItemPager(id, "", nil),
|
|
||||||
id,
|
|
||||||
name,
|
|
||||||
collector,
|
|
||||||
map[string]string{},
|
|
||||||
"",
|
|
||||||
errs)
|
|
||||||
if err != nil {
|
|
||||||
el.AddRecoverable(ctx, clues.Wrap(err, "enumerating items in drive"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res := make([]*Displayable, 0, len(folders))
|
|
||||||
|
|
||||||
for _, f := range folders {
|
|
||||||
res = append(res, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, el.Failure()
|
|
||||||
}
|
|
||||||
@ -1,8 +1,7 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
@ -13,17 +12,13 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||||
@ -317,113 +312,6 @@ func (suite *OneDriveIntgSuite) SetupSuite() {
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
var (
|
|
||||||
folderIDs = []string{}
|
|
||||||
folderName1 = "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting)
|
|
||||||
folderElements = []string{folderName1}
|
|
||||||
)
|
|
||||||
|
|
||||||
pager := suite.ac.Drives().NewUserDrivePager(suite.userID, nil)
|
|
||||||
|
|
||||||
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
require.NotEmpty(t, drives)
|
|
||||||
|
|
||||||
// TODO: Verify the intended drive
|
|
||||||
driveID := ptr.Val(drives[0].GetId())
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
for _, id := range folderIDs {
|
|
||||||
ictx := clues.Add(ctx, "folder_id", id)
|
|
||||||
|
|
||||||
// deletes require unique http clients
|
|
||||||
// https://github.com/alcionai/corso/issues/2707
|
|
||||||
err := suite.ac.Drives().DeleteItem(ictx, driveID, id)
|
|
||||||
if err != nil {
|
|
||||||
logger.CtxErr(ictx, err).Errorw("deleting folder")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
rootFolder, err := suite.ac.Drives().GetRootFolder(ctx, driveID)
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
restoreDir := path.Builder{}.Append(folderElements...)
|
|
||||||
drivePath := path.DrivePath{
|
|
||||||
DriveID: driveID,
|
|
||||||
Root: "root:",
|
|
||||||
Folders: folderElements,
|
|
||||||
}
|
|
||||||
|
|
||||||
caches := NewRestoreCaches(nil)
|
|
||||||
caches.DriveIDToDriveInfo.Store(driveID, driveInfo{rootFolderID: ptr.Val(rootFolder.GetId())})
|
|
||||||
|
|
||||||
rh := NewRestoreHandler(suite.ac)
|
|
||||||
|
|
||||||
folderID, err := createRestoreFolders(ctx, rh, &drivePath, restoreDir, caches)
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
folderIDs = append(folderIDs, folderID)
|
|
||||||
|
|
||||||
folderName2 := "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting)
|
|
||||||
restoreDir = restoreDir.Append(folderName2)
|
|
||||||
|
|
||||||
folderID, err = createRestoreFolders(ctx, rh, &drivePath, restoreDir, caches)
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
folderIDs = append(folderIDs, folderID)
|
|
||||||
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
prefix string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "NoPrefix",
|
|
||||||
prefix: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Prefix",
|
|
||||||
prefix: "Corso_Folder_Test",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
bh := itemBackupHandler{
|
|
||||||
suite.ac.Drives(),
|
|
||||||
(&selectors.OneDriveBackup{}).Folders(selectors.Any())[0],
|
|
||||||
}
|
|
||||||
pager := suite.ac.Drives().NewUserDrivePager(suite.userID, nil)
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
allFolders, err := GetAllFolders(ctx, bh, pager, test.prefix, fault.New(true))
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
foundFolderIDs := []string{}
|
|
||||||
|
|
||||||
for _, f := range allFolders {
|
|
||||||
|
|
||||||
if ptr.Val(f.GetName()) == folderName1 || ptr.Val(f.GetName()) == folderName2 {
|
|
||||||
foundFolderIDs = append(foundFolderIDs, ptr.Val(f.GetId()))
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, strings.HasPrefix(ptr.Val(f.GetName()), test.prefix), "folder prefix")
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.ElementsMatch(t, folderIDs, foundFolderIDs)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
||||||
creds, err := tconfig.NewM365Account(suite.T()).M365Config()
|
creds, err := tconfig.NewM365Account(suite.T()).M365Config()
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -29,6 +29,10 @@ type itemBackupHandler struct {
|
|||||||
scope selectors.OneDriveScope
|
scope selectors.OneDriveScope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewItemBackupHandler(ac api.Drives, scope selectors.OneDriveScope) *itemBackupHandler {
|
||||||
|
return &itemBackupHandler{ac, scope}
|
||||||
|
}
|
||||||
|
|
||||||
func (h itemBackupHandler) Get(
|
func (h itemBackupHandler) Get(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
url string,
|
url string,
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -1,14 +1,16 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
@ -438,3 +440,64 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type errReader struct{}
|
||||||
|
|
||||||
|
func (r errReader) Read(p []byte) (int, error) {
|
||||||
|
return 0, syscall.ECONNRESET
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead() {
|
||||||
|
var (
|
||||||
|
callCount int
|
||||||
|
|
||||||
|
testData = []byte("test")
|
||||||
|
testRc = io.NopCloser(bytes.NewReader(testData))
|
||||||
|
url = "https://example.com"
|
||||||
|
|
||||||
|
itemFunc = func() models.DriveItemable {
|
||||||
|
di := newItem("test", false)
|
||||||
|
di.SetAdditionalData(map[string]any{
|
||||||
|
"@microsoft.graph.downloadUrl": url,
|
||||||
|
})
|
||||||
|
|
||||||
|
return di
|
||||||
|
}
|
||||||
|
|
||||||
|
GetFunc = func(ctx context.Context, url string) (*http.Response, error) {
|
||||||
|
defer func() {
|
||||||
|
callCount++
|
||||||
|
}()
|
||||||
|
|
||||||
|
if callCount == 0 {
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: io.NopCloser(errReader{}),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: testRc,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
errorExpected = require.NoError
|
||||||
|
rcExpected = require.NotNil
|
||||||
|
)
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
mg := mockGetter{
|
||||||
|
GetFunc: GetFunc,
|
||||||
|
}
|
||||||
|
rc, err := downloadItem(ctx, mg, itemFunc())
|
||||||
|
errorExpected(t, err, clues.ToCore(err))
|
||||||
|
rcExpected(t, rc)
|
||||||
|
|
||||||
|
data, err := io.ReadAll(rc)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.Equal(t, testData, data)
|
||||||
|
}
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -9,8 +9,7 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -18,13 +17,17 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ onedrive.BackupHandler = &libraryBackupHandler{}
|
var _ BackupHandler = &libraryBackupHandler{}
|
||||||
|
|
||||||
type libraryBackupHandler struct {
|
type libraryBackupHandler struct {
|
||||||
ac api.Drives
|
ac api.Drives
|
||||||
scope selectors.SharePointScope
|
scope selectors.SharePointScope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewLibraryBackupHandler(ac api.Drives, scope selectors.SharePointScope) libraryBackupHandler {
|
||||||
|
return libraryBackupHandler{ac, scope}
|
||||||
|
}
|
||||||
|
|
||||||
func (h libraryBackupHandler) Get(
|
func (h libraryBackupHandler) Get(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
url string,
|
url string,
|
||||||
@ -78,7 +81,7 @@ func (h libraryBackupHandler) AugmentItemInfo(
|
|||||||
size int64,
|
size int64,
|
||||||
parentPath *path.Builder,
|
parentPath *path.Builder,
|
||||||
) details.ItemInfo {
|
) details.ItemInfo {
|
||||||
return augmentItemInfo(dii, item, size, parentPath)
|
return augmentLibraryItemInfo(dii, item, size, parentPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// constructWebURL is a helper function for recreating the webURL
|
// constructWebURL is a helper function for recreating the webURL
|
||||||
@ -154,12 +157,16 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
|
|||||||
// Restore
|
// Restore
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
var _ onedrive.RestoreHandler = &libraryRestoreHandler{}
|
var _ RestoreHandler = &libraryRestoreHandler{}
|
||||||
|
|
||||||
type libraryRestoreHandler struct {
|
type libraryRestoreHandler struct {
|
||||||
ac api.Client
|
ac api.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewLibraryRestoreHandler(ac api.Client) libraryRestoreHandler {
|
||||||
|
return libraryRestoreHandler{ac}
|
||||||
|
}
|
||||||
|
|
||||||
func (h libraryRestoreHandler) PostDrive(
|
func (h libraryRestoreHandler) PostDrive(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
siteID, driveName string,
|
siteID, driveName string,
|
||||||
@ -167,10 +174,6 @@ func (h libraryRestoreHandler) PostDrive(
|
|||||||
return h.ac.Lists().PostDrive(ctx, siteID, driveName)
|
return h.ac.Lists().PostDrive(ctx, siteID, driveName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRestoreHandler(ac api.Client) *libraryRestoreHandler {
|
|
||||||
return &libraryRestoreHandler{ac}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h libraryRestoreHandler) NewDrivePager(
|
func (h libraryRestoreHandler) NewDrivePager(
|
||||||
resourceOwner string,
|
resourceOwner string,
|
||||||
fields []string,
|
fields []string,
|
||||||
@ -184,7 +187,7 @@ func (h libraryRestoreHandler) AugmentItemInfo(
|
|||||||
size int64,
|
size int64,
|
||||||
parentPath *path.Builder,
|
parentPath *path.Builder,
|
||||||
) details.ItemInfo {
|
) details.ItemInfo {
|
||||||
return augmentItemInfo(dii, item, size, parentPath)
|
return augmentLibraryItemInfo(dii, item, size, parentPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h libraryRestoreHandler) DeleteItem(
|
func (h libraryRestoreHandler) DeleteItem(
|
||||||
@ -263,7 +266,7 @@ func (h libraryRestoreHandler) GetRootFolder(
|
|||||||
// Common
|
// Common
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
func augmentItemInfo(
|
func augmentLibraryItemInfo(
|
||||||
dii details.ItemInfo,
|
dii details.ItemInfo,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
size int64,
|
size int64,
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -6,7 +6,7 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
func AssertMetadataEqual(t *testing.T, expect, got metadata.Metadata) {
|
func AssertMetadataEqual(t *testing.T, expect, got metadata.Metadata) {
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -76,7 +76,7 @@ func getCollectionMetadata(
|
|||||||
metaName = metadata.DirMetaFileSuffix
|
metaName = metadata.DirMetaFileSuffix
|
||||||
}
|
}
|
||||||
|
|
||||||
meta, err := fetchAndReadMetadata(ctx, dc, metaName)
|
meta, err := FetchAndReadMetadata(ctx, dc, metaName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return metadata.Metadata{}, clues.Wrap(err, "collection metadata")
|
return metadata.Metadata{}, clues.Wrap(err, "collection metadata")
|
||||||
}
|
}
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"runtime/trace"
|
"runtime/trace"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -15,12 +14,11 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
@ -39,81 +37,6 @@ const (
|
|||||||
maxUploadRetries = 3
|
maxUploadRetries = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConsumeRestoreCollections will restore the specified data collections into OneDrive
|
|
||||||
func ConsumeRestoreCollections(
|
|
||||||
ctx context.Context,
|
|
||||||
rh RestoreHandler,
|
|
||||||
rcc inject.RestoreConsumerConfig,
|
|
||||||
backupDriveIDNames idname.Cacher,
|
|
||||||
dcs []data.RestoreCollection,
|
|
||||||
deets *details.Builder,
|
|
||||||
errs *fault.Bus,
|
|
||||||
ctr *count.Bus,
|
|
||||||
) (*support.ControllerOperationStatus, error) {
|
|
||||||
var (
|
|
||||||
restoreMetrics support.CollectionMetrics
|
|
||||||
el = errs.Local()
|
|
||||||
caches = NewRestoreCaches(backupDriveIDNames)
|
|
||||||
fallbackDriveName = rcc.RestoreConfig.Location
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "backup_version", rcc.BackupVersion)
|
|
||||||
|
|
||||||
err := caches.Populate(ctx, rh, rcc.ProtectedResource.ID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.Wrap(err, "initializing restore caches")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reorder collections so that the parents directories are created
|
|
||||||
// before the child directories; a requirement for permissions.
|
|
||||||
data.SortRestoreCollections(dcs)
|
|
||||||
|
|
||||||
// Iterate through the data collections and restore the contents of each
|
|
||||||
for _, dc := range dcs {
|
|
||||||
if el.Failure() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
metrics support.CollectionMetrics
|
|
||||||
ictx = clues.Add(
|
|
||||||
ctx,
|
|
||||||
"category", dc.FullPath().Category(),
|
|
||||||
"full_path", dc.FullPath())
|
|
||||||
)
|
|
||||||
|
|
||||||
metrics, err = RestoreCollection(
|
|
||||||
ictx,
|
|
||||||
rh,
|
|
||||||
rcc,
|
|
||||||
dc,
|
|
||||||
caches,
|
|
||||||
deets,
|
|
||||||
fallbackDriveName,
|
|
||||||
errs,
|
|
||||||
ctr.Local())
|
|
||||||
if err != nil {
|
|
||||||
el.AddRecoverable(ctx, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
|
|
||||||
|
|
||||||
if errors.Is(err, context.Canceled) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
status := support.CreateStatus(
|
|
||||||
ctx,
|
|
||||||
support.Restore,
|
|
||||||
len(dcs),
|
|
||||||
restoreMetrics,
|
|
||||||
rcc.RestoreConfig.Location)
|
|
||||||
|
|
||||||
return status, el.Failure()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestoreCollection handles restoration of an individual collection.
|
// RestoreCollection handles restoration of an individual collection.
|
||||||
// returns:
|
// returns:
|
||||||
// - the collection's item and byte count metrics
|
// - the collection's item and byte count metrics
|
||||||
@ -518,7 +441,7 @@ func restoreV1File(
|
|||||||
// Fetch item permissions from the collection and restore them.
|
// Fetch item permissions from the collection and restore them.
|
||||||
metaName := trimmedName + metadata.MetaFileSuffix
|
metaName := trimmedName + metadata.MetaFileSuffix
|
||||||
|
|
||||||
meta, err := fetchAndReadMetadata(ctx, fibn, metaName)
|
meta, err := FetchAndReadMetadata(ctx, fibn, metaName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return details.ItemInfo{}, clues.Wrap(err, "restoring file")
|
return details.ItemInfo{}, clues.Wrap(err, "restoring file")
|
||||||
}
|
}
|
||||||
@ -556,7 +479,7 @@ func restoreV6File(
|
|||||||
// Get metadata file so we can determine the file name.
|
// Get metadata file so we can determine the file name.
|
||||||
metaName := trimmedName + metadata.MetaFileSuffix
|
metaName := trimmedName + metadata.MetaFileSuffix
|
||||||
|
|
||||||
meta, err := fetchAndReadMetadata(ctx, fibn, metaName)
|
meta, err := FetchAndReadMetadata(ctx, fibn, metaName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return details.ItemInfo{}, clues.Wrap(err, "restoring file")
|
return details.ItemInfo{}, clues.Wrap(err, "restoring file")
|
||||||
}
|
}
|
||||||
@ -932,7 +855,7 @@ func restoreFile(
|
|||||||
return ptr.Val(newItem.GetId()), dii, nil
|
return ptr.Val(newItem.GetId()), dii, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchAndReadMetadata(
|
func FetchAndReadMetadata(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
fibn data.FetchItemByNamer,
|
fibn data.FetchItemByNamer,
|
||||||
metaName string,
|
metaName string,
|
||||||
@ -974,132 +897,6 @@ func getMetadata(metar io.ReadCloser) (metadata.Metadata, error) {
|
|||||||
return meta, nil
|
return meta, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Augment restore path to add extra files(meta) needed for restore as
|
|
||||||
// well as do any other ordering operations on the paths
|
|
||||||
//
|
|
||||||
// Only accepts StoragePath/RestorePath pairs where the RestorePath is
|
|
||||||
// at least as long as the StoragePath. If the RestorePath is longer than the
|
|
||||||
// StoragePath then the first few (closest to the root) directories will use
|
|
||||||
// default permissions during restore.
|
|
||||||
func AugmentRestorePaths(
|
|
||||||
backupVersion int,
|
|
||||||
paths []path.RestorePaths,
|
|
||||||
) ([]path.RestorePaths, error) {
|
|
||||||
// Keyed by each value's StoragePath.String() which corresponds to the RepoRef
|
|
||||||
// of the directory.
|
|
||||||
colPaths := map[string]path.RestorePaths{}
|
|
||||||
|
|
||||||
for _, p := range paths {
|
|
||||||
first := true
|
|
||||||
|
|
||||||
for {
|
|
||||||
sp, err := p.StoragePath.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
drivePath, err := path.ToDrivePath(sp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(drivePath.Folders) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p.RestorePath.Elements()) < len(sp.Elements()) {
|
|
||||||
return nil, clues.New("restorePath shorter than storagePath").
|
|
||||||
With("restore_path", p.RestorePath, "storage_path", sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
rp := p.RestorePath
|
|
||||||
|
|
||||||
// Make sure the RestorePath always points to the level of the current
|
|
||||||
// collection. We need to track if it's the first iteration because the
|
|
||||||
// RestorePath starts out at the collection level to begin with.
|
|
||||||
if !first {
|
|
||||||
rp, err = p.RestorePath.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
paths := path.RestorePaths{
|
|
||||||
StoragePath: sp,
|
|
||||||
RestorePath: rp,
|
|
||||||
}
|
|
||||||
|
|
||||||
colPaths[sp.String()] = paths
|
|
||||||
p = paths
|
|
||||||
first = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds dirmeta files as we need to make sure collections for all
|
|
||||||
// directories involved are created and not just the final one. No
|
|
||||||
// need to add `.meta` files (metadata for files) as they will
|
|
||||||
// anyways be looked up automatically.
|
|
||||||
// TODO: Stop populating .dirmeta for newer versions once we can
|
|
||||||
// get files from parent directory via `Fetch` in a collection.
|
|
||||||
// As of now look up metadata for parent directories from a
|
|
||||||
// collection.
|
|
||||||
for _, p := range colPaths {
|
|
||||||
el := p.StoragePath.Elements()
|
|
||||||
|
|
||||||
if backupVersion >= version.OneDrive6NameInMeta {
|
|
||||||
mPath, err := p.StoragePath.AppendItem(".dirmeta")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
paths = append(
|
|
||||||
paths,
|
|
||||||
path.RestorePaths{StoragePath: mPath, RestorePath: p.RestorePath})
|
|
||||||
} else if backupVersion >= version.OneDrive4DirIncludesPermissions {
|
|
||||||
mPath, err := p.StoragePath.AppendItem(el.Last() + ".dirmeta")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
paths = append(
|
|
||||||
paths,
|
|
||||||
path.RestorePaths{StoragePath: mPath, RestorePath: p.RestorePath})
|
|
||||||
} else if backupVersion >= version.OneDrive1DataAndMetaFiles {
|
|
||||||
pp, err := p.StoragePath.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mPath, err := pp.AppendItem(el.Last() + ".dirmeta")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
prp, err := p.RestorePath.Dir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
paths = append(
|
|
||||||
paths,
|
|
||||||
path.RestorePaths{StoragePath: mPath, RestorePath: prp})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This sort is done primarily to order `.meta` files after `.data`
|
|
||||||
// files. This is only a necessity for OneDrive as we are storing
|
|
||||||
// metadata for files/folders in separate meta files and we the
|
|
||||||
// data to be restored before we can restore the metadata.
|
|
||||||
//
|
|
||||||
// This sorting assumes stuff in the same StoragePath directory end up in the
|
|
||||||
// same RestorePath collection.
|
|
||||||
sort.Slice(paths, func(i, j int) bool {
|
|
||||||
return paths[i].StoragePath.String() < paths[j].StoragePath.String()
|
|
||||||
})
|
|
||||||
|
|
||||||
return paths, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type PostDriveAndGetRootFolderer interface {
|
type PostDriveAndGetRootFolderer interface {
|
||||||
PostDriver
|
PostDriver
|
||||||
GetRootFolderer
|
GetRootFolderer
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -10,8 +10,8 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -14,8 +14,8 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/mock"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
@ -34,301 +34,6 @@ func TestRestoreUnitSuite(t *testing.T) {
|
|||||||
suite.Run(t, &RestoreUnitSuite{Suite: tester.NewUnitSuite(t)})
|
suite.Run(t, &RestoreUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *RestoreUnitSuite) TestAugmentRestorePaths() {
|
|
||||||
// Adding a simple test here so that we can be sure that this
|
|
||||||
// function gets updated whenever we add a new version.
|
|
||||||
require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version")
|
|
||||||
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
version int
|
|
||||||
input []string
|
|
||||||
output []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "no change v0",
|
|
||||||
version: 0,
|
|
||||||
input: []string{
|
|
||||||
"file.txt.data",
|
|
||||||
"file.txt", // v0 does not have `.data`
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"file.txt", // ordering artifact of sorting
|
|
||||||
"file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one folder v0",
|
|
||||||
version: 0,
|
|
||||||
input: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/file.txt",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"folder/file.txt",
|
|
||||||
"folder/file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no change v1",
|
|
||||||
version: version.OneDrive1DataAndMetaFiles,
|
|
||||||
input: []string{
|
|
||||||
"file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one folder v1",
|
|
||||||
version: version.OneDrive1DataAndMetaFiles,
|
|
||||||
input: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"folder.dirmeta",
|
|
||||||
"folder/file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested folders v1",
|
|
||||||
version: version.OneDrive1DataAndMetaFiles,
|
|
||||||
input: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/folder2/file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"folder.dirmeta",
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/folder2.dirmeta",
|
|
||||||
"folder/folder2/file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no change v4",
|
|
||||||
version: version.OneDrive4DirIncludesPermissions,
|
|
||||||
input: []string{
|
|
||||||
"file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one folder v4",
|
|
||||||
version: version.OneDrive4DirIncludesPermissions,
|
|
||||||
input: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/folder.dirmeta",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested folders v4",
|
|
||||||
version: version.OneDrive4DirIncludesPermissions,
|
|
||||||
input: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/folder2/file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/folder.dirmeta",
|
|
||||||
"folder/folder2/file.txt.data",
|
|
||||||
"folder/folder2/folder2.dirmeta",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no change v6",
|
|
||||||
version: version.OneDrive6NameInMeta,
|
|
||||||
input: []string{
|
|
||||||
"file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one folder v6",
|
|
||||||
version: version.OneDrive6NameInMeta,
|
|
||||||
input: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"folder/.dirmeta",
|
|
||||||
"folder/file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested folders v6",
|
|
||||||
version: version.OneDrive6NameInMeta,
|
|
||||||
input: []string{
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/folder2/file.txt.data",
|
|
||||||
},
|
|
||||||
output: []string{
|
|
||||||
"folder/.dirmeta",
|
|
||||||
"folder/file.txt.data",
|
|
||||||
"folder/folder2/.dirmeta",
|
|
||||||
"folder/folder2/file.txt.data",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
_, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
base := "id/onedrive/user/files/drives/driveID/root:/"
|
|
||||||
|
|
||||||
inPaths := []path.RestorePaths{}
|
|
||||||
for _, ps := range test.input {
|
|
||||||
p, err := path.FromDataLayerPath(base+ps, true)
|
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
|
||||||
|
|
||||||
pd, err := p.Dir()
|
|
||||||
require.NoError(t, err, "creating collection path", clues.ToCore(err))
|
|
||||||
|
|
||||||
inPaths = append(
|
|
||||||
inPaths,
|
|
||||||
path.RestorePaths{StoragePath: p, RestorePath: pd})
|
|
||||||
}
|
|
||||||
|
|
||||||
outPaths := []path.RestorePaths{}
|
|
||||||
for _, ps := range test.output {
|
|
||||||
p, err := path.FromDataLayerPath(base+ps, true)
|
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
|
||||||
|
|
||||||
pd, err := p.Dir()
|
|
||||||
require.NoError(t, err, "creating collection path", clues.ToCore(err))
|
|
||||||
|
|
||||||
outPaths = append(
|
|
||||||
outPaths,
|
|
||||||
path.RestorePaths{StoragePath: p, RestorePath: pd})
|
|
||||||
}
|
|
||||||
|
|
||||||
actual, err := AugmentRestorePaths(test.version, inPaths)
|
|
||||||
require.NoError(t, err, "augmenting paths", clues.ToCore(err))
|
|
||||||
|
|
||||||
// Ordering of paths matter here as we need dirmeta files
|
|
||||||
// to show up before file in dir
|
|
||||||
assert.Equal(t, outPaths, actual, "augmented paths")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestAugmentRestorePaths_DifferentRestorePath tests that RestorePath
|
|
||||||
// substitution works properly. Since it's only possible for future backup
|
|
||||||
// versions to need restore path substitution (i.e. due to storing folders by
|
|
||||||
// ID instead of name) this is only tested against the most recent backup
|
|
||||||
// version at the moment.
|
|
||||||
func (suite *RestoreUnitSuite) TestAugmentRestorePaths_DifferentRestorePath() {
|
|
||||||
// Adding a simple test here so that we can be sure that this
|
|
||||||
// function gets updated whenever we add a new version.
|
|
||||||
require.LessOrEqual(suite.T(), version.Backup, version.All8MigrateUserPNToID, "unsupported backup version")
|
|
||||||
|
|
||||||
type pathPair struct {
|
|
||||||
storage string
|
|
||||||
restore string
|
|
||||||
}
|
|
||||||
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
version int
|
|
||||||
input []pathPair
|
|
||||||
output []pathPair
|
|
||||||
errCheck assert.ErrorAssertionFunc
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "nested folders",
|
|
||||||
version: version.Backup,
|
|
||||||
input: []pathPair{
|
|
||||||
{storage: "folder-id/file.txt.data", restore: "folder"},
|
|
||||||
{storage: "folder-id/folder2-id/file.txt.data", restore: "folder/folder2"},
|
|
||||||
},
|
|
||||||
output: []pathPair{
|
|
||||||
{storage: "folder-id/.dirmeta", restore: "folder"},
|
|
||||||
{storage: "folder-id/file.txt.data", restore: "folder"},
|
|
||||||
{storage: "folder-id/folder2-id/.dirmeta", restore: "folder/folder2"},
|
|
||||||
{storage: "folder-id/folder2-id/file.txt.data", restore: "folder/folder2"},
|
|
||||||
},
|
|
||||||
errCheck: assert.NoError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "restore path longer one folder",
|
|
||||||
version: version.Backup,
|
|
||||||
input: []pathPair{
|
|
||||||
{storage: "folder-id/file.txt.data", restore: "corso_restore/folder"},
|
|
||||||
},
|
|
||||||
output: []pathPair{
|
|
||||||
{storage: "folder-id/.dirmeta", restore: "corso_restore/folder"},
|
|
||||||
{storage: "folder-id/file.txt.data", restore: "corso_restore/folder"},
|
|
||||||
},
|
|
||||||
errCheck: assert.NoError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "restore path shorter one folder",
|
|
||||||
version: version.Backup,
|
|
||||||
input: []pathPair{
|
|
||||||
{storage: "folder-id/file.txt.data", restore: ""},
|
|
||||||
},
|
|
||||||
errCheck: assert.Error,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
_, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
base := "id/onedrive/user/files/drives/driveID/root:/"
|
|
||||||
|
|
||||||
inPaths := []path.RestorePaths{}
|
|
||||||
for _, ps := range test.input {
|
|
||||||
p, err := path.FromDataLayerPath(base+ps.storage, true)
|
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
|
||||||
|
|
||||||
r, err := path.FromDataLayerPath(base+ps.restore, false)
|
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
|
||||||
|
|
||||||
inPaths = append(
|
|
||||||
inPaths,
|
|
||||||
path.RestorePaths{StoragePath: p, RestorePath: r})
|
|
||||||
}
|
|
||||||
|
|
||||||
outPaths := []path.RestorePaths{}
|
|
||||||
for _, ps := range test.output {
|
|
||||||
p, err := path.FromDataLayerPath(base+ps.storage, true)
|
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
|
||||||
|
|
||||||
r, err := path.FromDataLayerPath(base+ps.restore, false)
|
|
||||||
require.NoError(t, err, "creating path", clues.ToCore(err))
|
|
||||||
|
|
||||||
outPaths = append(
|
|
||||||
outPaths,
|
|
||||||
path.RestorePaths{StoragePath: p, RestorePath: r})
|
|
||||||
}
|
|
||||||
|
|
||||||
actual, err := AugmentRestorePaths(test.version, inPaths)
|
|
||||||
test.errCheck(t, err, "augmenting paths", clues.ToCore(err))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ordering of paths matter here as we need dirmeta files
|
|
||||||
// to show up before file in dir
|
|
||||||
assert.Equal(t, outPaths, actual, "augmented paths")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *RestoreUnitSuite) TestRestoreItem_collisionHandling() {
|
func (suite *RestoreUnitSuite) TestRestoreItem_collisionHandling() {
|
||||||
const mndiID = "mndi-id"
|
const mndiID = "mndi-id"
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package onedrive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
168
src/internal/m365/collection/site/backup.go
Normal file
168
src/internal/m365/collection/site/backup.go
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
package site
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
|
betaAPI "github.com/alcionai/corso/src/internal/m365/service/sharepoint/api"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CollectLibraries constructs a onedrive Collections struct and Get()s
|
||||||
|
// all the drives associated with the site.
|
||||||
|
func CollectLibraries(
|
||||||
|
ctx context.Context,
|
||||||
|
bpc inject.BackupProducerConfig,
|
||||||
|
ad api.Drives,
|
||||||
|
tenantID string,
|
||||||
|
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||||
|
scope selectors.SharePointScope,
|
||||||
|
su support.StatusUpdater,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) ([]data.BackupCollection, bool, error) {
|
||||||
|
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
||||||
|
|
||||||
|
var (
|
||||||
|
collections = []data.BackupCollection{}
|
||||||
|
colls = drive.NewCollections(
|
||||||
|
drive.NewLibraryBackupHandler(ad, scope),
|
||||||
|
tenantID,
|
||||||
|
bpc.ProtectedResource.ID(),
|
||||||
|
su,
|
||||||
|
bpc.Options)
|
||||||
|
)
|
||||||
|
|
||||||
|
odcs, canUsePreviousBackup, err := colls.Get(ctx, bpc.MetadataCollections, ssmb, errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, graph.Wrap(ctx, err, "getting library")
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(collections, odcs...), canUsePreviousBackup, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectPages constructs a sharepoint Collections struct and Get()s the associated
|
||||||
|
// M365 IDs for the associated Pages.
|
||||||
|
func CollectPages(
|
||||||
|
ctx context.Context,
|
||||||
|
bpc inject.BackupProducerConfig,
|
||||||
|
creds account.M365Config,
|
||||||
|
ac api.Client,
|
||||||
|
su support.StatusUpdater,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) ([]data.BackupCollection, error) {
|
||||||
|
logger.Ctx(ctx).Debug("creating SharePoint Pages collections")
|
||||||
|
|
||||||
|
var (
|
||||||
|
el = errs.Local()
|
||||||
|
spcs = make([]data.BackupCollection, 0)
|
||||||
|
)
|
||||||
|
|
||||||
|
// make the betaClient
|
||||||
|
// Need to receive From DataCollection Call
|
||||||
|
adpt, err := graph.CreateAdapter(
|
||||||
|
creds.AzureTenantID,
|
||||||
|
creds.AzureClientID,
|
||||||
|
creds.AzureClientSecret)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "creating azure client adapter")
|
||||||
|
}
|
||||||
|
|
||||||
|
betaService := betaAPI.NewBetaService(adpt)
|
||||||
|
|
||||||
|
tuples, err := betaAPI.FetchPages(ctx, betaService, bpc.ProtectedResource.ID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tuple := range tuples {
|
||||||
|
if el.Failure() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := path.Build(
|
||||||
|
creds.AzureTenantID,
|
||||||
|
bpc.ProtectedResource.ID(),
|
||||||
|
path.SharePointService,
|
||||||
|
path.PagesCategory,
|
||||||
|
false,
|
||||||
|
tuple.Name)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(ctx, clues.Wrap(err, "creating page collection path").WithClues(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
collection := NewCollection(
|
||||||
|
dir,
|
||||||
|
ac,
|
||||||
|
Pages,
|
||||||
|
su,
|
||||||
|
bpc.Options)
|
||||||
|
collection.SetBetaService(betaService)
|
||||||
|
collection.AddJob(tuple.ID)
|
||||||
|
|
||||||
|
spcs = append(spcs, collection)
|
||||||
|
}
|
||||||
|
|
||||||
|
return spcs, el.Failure()
|
||||||
|
}
|
||||||
|
|
||||||
|
func CollectLists(
|
||||||
|
ctx context.Context,
|
||||||
|
bpc inject.BackupProducerConfig,
|
||||||
|
ac api.Client,
|
||||||
|
tenantID string,
|
||||||
|
su support.StatusUpdater,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) ([]data.BackupCollection, error) {
|
||||||
|
logger.Ctx(ctx).Debug("Creating SharePoint List Collections")
|
||||||
|
|
||||||
|
var (
|
||||||
|
el = errs.Local()
|
||||||
|
spcs = make([]data.BackupCollection, 0)
|
||||||
|
)
|
||||||
|
|
||||||
|
lists, err := PreFetchLists(ctx, ac.Stable, bpc.ProtectedResource.ID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tuple := range lists {
|
||||||
|
if el.Failure() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := path.Build(
|
||||||
|
tenantID,
|
||||||
|
bpc.ProtectedResource.ID(),
|
||||||
|
path.SharePointService,
|
||||||
|
path.ListsCategory,
|
||||||
|
false,
|
||||||
|
tuple.Name)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(ctx, clues.Wrap(err, "creating list collection path").WithClues(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
collection := NewCollection(
|
||||||
|
dir,
|
||||||
|
ac,
|
||||||
|
List,
|
||||||
|
su,
|
||||||
|
bpc.Options)
|
||||||
|
collection.AddJob(tuple.ID)
|
||||||
|
|
||||||
|
spcs = append(spcs, collection)
|
||||||
|
}
|
||||||
|
|
||||||
|
return spcs, el.Failure()
|
||||||
|
}
|
||||||
73
src/internal/m365/collection/site/backup_test.go
Normal file
73
src/internal/m365/collection/site/backup_test.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package site
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SharePointPagesSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSharePointPagesSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &SharePointPagesSuite{
|
||||||
|
Suite: tester.NewIntegrationSuite(
|
||||||
|
t,
|
||||||
|
[][]string{tconfig.M365AcctCredEnvs}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SharePointPagesSuite) SetupSuite() {
|
||||||
|
ctx, flush := tester.NewContext(suite.T())
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
graph.InitializeConcurrencyLimiter(ctx, false, 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SharePointPagesSuite) TestCollectPages() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
var (
|
||||||
|
siteID = tconfig.M365SiteID(t)
|
||||||
|
a = tconfig.NewM365Account(t)
|
||||||
|
)
|
||||||
|
|
||||||
|
creds, err := a.M365Config()
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
ac, err := api.NewClient(creds, control.DefaultOptions())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
bpc := inject.BackupProducerConfig{
|
||||||
|
LastBackupVersion: version.NoBackup,
|
||||||
|
Options: control.DefaultOptions(),
|
||||||
|
ProtectedResource: mock.NewProvider(siteID, siteID),
|
||||||
|
}
|
||||||
|
|
||||||
|
col, err := CollectPages(
|
||||||
|
ctx,
|
||||||
|
bpc,
|
||||||
|
creds,
|
||||||
|
ac,
|
||||||
|
(&MockGraphService{}).UpdateStatus,
|
||||||
|
fault.New(true))
|
||||||
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.NotEmpty(t, col)
|
||||||
|
}
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
betaAPI "github.com/alcionai/corso/src/internal/m365/sharepoint/api"
|
betaAPI "github.com/alcionai/corso/src/internal/m365/service/sharepoint/api"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -81,6 +81,10 @@ func NewCollection(
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sc *Collection) SetBetaService(betaService *betaAPI.BetaService) {
|
||||||
|
sc.betaService = betaService
|
||||||
|
}
|
||||||
|
|
||||||
// AddJob appends additional objectID to job field
|
// AddJob appends additional objectID to job field
|
||||||
func (sc *Collection) AddJob(objID string) {
|
func (sc *Collection) AddJob(objID string) {
|
||||||
sc.jobs = append(sc.jobs, objID)
|
sc.jobs = append(sc.jobs, objID)
|
||||||
@ -254,7 +258,7 @@ func (sc *Collection) retrieveLists(
|
|||||||
sc.data <- &Item{
|
sc.data <- &Item{
|
||||||
id: ptr.Val(lst.GetId()),
|
id: ptr.Val(lst.GetId()),
|
||||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
info: listToSPInfo(lst, size),
|
info: ListToSPInfo(lst, size),
|
||||||
modTime: t,
|
modTime: t,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -14,8 +14,8 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
betaAPI "github.com/alcionai/corso/src/internal/m365/sharepoint/api"
|
betaAPI "github.com/alcionai/corso/src/internal/m365/service/sharepoint/api"
|
||||||
spMock "github.com/alcionai/corso/src/internal/m365/sharepoint/mock"
|
spMock "github.com/alcionai/corso/src/internal/m365/service/sharepoint/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
@ -118,7 +118,7 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
|||||||
data := &Item{
|
data := &Item{
|
||||||
id: name,
|
id: name,
|
||||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
info: listToSPInfo(listing, int64(len(byteArray))),
|
info: ListToSPInfo(listing, int64(len(byteArray))),
|
||||||
}
|
}
|
||||||
|
|
||||||
return data
|
return data
|
||||||
@ -207,7 +207,7 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
|||||||
listData := &Item{
|
listData := &Item{
|
||||||
id: testName,
|
id: testName,
|
||||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
info: listToSPInfo(listing, int64(len(byteArray))),
|
info: ListToSPInfo(listing, int64(len(byteArray))),
|
||||||
}
|
}
|
||||||
|
|
||||||
destName := testdata.DefaultRestoreConfig("").Location
|
destName := testdata.DefaultRestoreConfig("").Location
|
||||||
@ -1,6 +1,6 @@
|
|||||||
// Code generated by "stringer -type=DataCategory"; DO NOT EDIT.
|
// Code generated by "stringer -type=DataCategory"; DO NOT EDIT.
|
||||||
|
|
||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import "strconv"
|
import "strconv"
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -43,7 +43,7 @@ func (ms *MockGraphService) UpdateStatus(*support.ControllerOperationStatus) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Helper Functions
|
// Helper functions
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
func createTestService(t *testing.T, credentials account.M365Config) *graph.Service {
|
func createTestService(t *testing.T, credentials account.M365Config) *graph.Service {
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -14,9 +14,9 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
)
|
)
|
||||||
|
|
||||||
// listToSPInfo translates models.Listable metadata into searchable content
|
// ListToSPInfo translates models.Listable metadata into searchable content
|
||||||
// List Details: https://learn.microsoft.com/en-us/graph/api/resources/list?view=graph-rest-1.0
|
// List Details: https://learn.microsoft.com/en-us/graph/api/resources/list?view=graph-rest-1.0
|
||||||
func listToSPInfo(lst models.Listable, size int64) *details.SharePointInfo {
|
func ListToSPInfo(lst models.Listable, size int64) *details.SharePointInfo {
|
||||||
var (
|
var (
|
||||||
name = ptr.Val(lst.GetDisplayName())
|
name = ptr.Val(lst.GetDisplayName())
|
||||||
webURL = ptr.Val(lst.GetWebUrl())
|
webURL = ptr.Val(lst.GetWebUrl())
|
||||||
@ -34,9 +34,9 @@ func listToSPInfo(lst models.Listable, size int64) *details.SharePointInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type listTuple struct {
|
type ListTuple struct {
|
||||||
name string
|
ID string
|
||||||
id string
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func preFetchListOptions() *sites.ItemListsRequestBuilderGetRequestConfiguration {
|
func preFetchListOptions() *sites.ItemListsRequestBuilderGetRequestConfiguration {
|
||||||
@ -51,15 +51,15 @@ func preFetchListOptions() *sites.ItemListsRequestBuilderGetRequestConfiguration
|
|||||||
return options
|
return options
|
||||||
}
|
}
|
||||||
|
|
||||||
func preFetchLists(
|
func PreFetchLists(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
siteID string,
|
siteID string,
|
||||||
) ([]listTuple, error) {
|
) ([]ListTuple, error) {
|
||||||
var (
|
var (
|
||||||
builder = gs.Client().Sites().BySiteId(siteID).Lists()
|
builder = gs.Client().Sites().BySiteId(siteID).Lists()
|
||||||
options = preFetchListOptions()
|
options = preFetchListOptions()
|
||||||
listTuples = make([]listTuple, 0)
|
listTuples = make([]ListTuple, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@ -72,11 +72,11 @@ func preFetchLists(
|
|||||||
var (
|
var (
|
||||||
id = ptr.Val(entry.GetId())
|
id = ptr.Val(entry.GetId())
|
||||||
name = ptr.Val(entry.GetDisplayName())
|
name = ptr.Val(entry.GetDisplayName())
|
||||||
temp = listTuple{id: id, name: name}
|
temp = ListTuple{ID: id, Name: name}
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
temp.name = id
|
temp.Name = id
|
||||||
}
|
}
|
||||||
|
|
||||||
listTuples = append(listTuples, temp)
|
listTuples = append(listTuples, temp)
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
@ -28,6 +29,11 @@ func (suite *ListsUnitSuite) SetupSuite() {
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.creds = m365
|
suite.creds = m365
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(suite.T())
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
graph.InitializeConcurrencyLimiter(ctx, false, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListsUnitSuite(t *testing.T) {
|
func TestListsUnitSuite(t *testing.T) {
|
||||||
@ -57,10 +63,10 @@ func (suite *ListsUnitSuite) TestLoadList() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
service := createTestService(t, suite.creds)
|
service := createTestService(t, suite.creds)
|
||||||
tuples, err := preFetchLists(ctx, service, "root")
|
tuples, err := PreFetchLists(ctx, service, "root")
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
job := []string{tuples[0].id}
|
job := []string{tuples[0].ID}
|
||||||
lists, err := loadSiteLists(ctx, service, "root", job, fault.New(true))
|
lists, err := loadSiteLists(ctx, service, "root", job, fault.New(true))
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
assert.Greater(t, len(lists), 0)
|
assert.Greater(t, len(lists), 0)
|
||||||
@ -98,7 +104,7 @@ func (suite *ListsUnitSuite) TestSharePointInfo() {
|
|||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
list, expected := test.listAndDeets()
|
list, expected := test.listAndDeets()
|
||||||
info := listToSPInfo(list, 10)
|
info := ListToSPInfo(list, 10)
|
||||||
assert.Equal(t, expected.ItemType, info.ItemType)
|
assert.Equal(t, expected.ItemType, info.ItemType)
|
||||||
assert.Equal(t, expected.ItemName, info.ItemName)
|
assert.Equal(t, expected.ItemName, info.ItemName)
|
||||||
assert.Equal(t, expected.WebURL, info.WebURL)
|
assert.Equal(t, expected.WebURL, info.WebURL)
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package sharepoint
|
package site
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -15,9 +15,9 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
betaAPI "github.com/alcionai/corso/src/internal/m365/service/sharepoint/api"
|
||||||
betaAPI "github.com/alcionai/corso/src/internal/m365/sharepoint/api"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -41,9 +41,9 @@ func ConsumeRestoreCollections(
|
|||||||
ctr *count.Bus,
|
ctr *count.Bus,
|
||||||
) (*support.ControllerOperationStatus, error) {
|
) (*support.ControllerOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
lrh = libraryRestoreHandler{ac}
|
lrh = drive.NewLibraryRestoreHandler(ac)
|
||||||
restoreMetrics support.CollectionMetrics
|
restoreMetrics support.CollectionMetrics
|
||||||
caches = onedrive.NewRestoreCaches(backupDriveIDNames)
|
caches = drive.NewRestoreCaches(backupDriveIDNames)
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ func ConsumeRestoreCollections(
|
|||||||
|
|
||||||
switch dc.FullPath().Category() {
|
switch dc.FullPath().Category() {
|
||||||
case path.LibrariesCategory:
|
case path.LibrariesCategory:
|
||||||
metrics, err = onedrive.RestoreCollection(
|
metrics, err = drive.RestoreCollection(
|
||||||
ictx,
|
ictx,
|
||||||
lrh,
|
lrh,
|
||||||
rcc,
|
rcc,
|
||||||
@ -200,7 +200,7 @@ func restoreListItem(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dii.SharePoint = listToSPInfo(restoredList, int64(len(byteArray)))
|
dii.SharePoint = ListToSPInfo(restoredList, int64(len(byteArray)))
|
||||||
|
|
||||||
return dii, nil
|
return dii, nil
|
||||||
}
|
}
|
||||||
@ -17,10 +17,10 @@ import (
|
|||||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||||
exchMock "github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/mock"
|
"github.com/alcionai/corso/src/internal/m365/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||||
|
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/stub"
|
"github.com/alcionai/corso/src/internal/m365/stub"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
|
|||||||
@ -8,7 +8,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
|||||||
@ -4,11 +4,12 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
khttp "github.com/microsoft/kiota-http-go"
|
khttp "github.com/microsoft/kiota-http-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/events"
|
"github.com/alcionai/corso/src/internal/events"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
@ -56,7 +57,7 @@ func NewHTTPWrapper(opts ...Option) *httpWrapper {
|
|||||||
|
|
||||||
cc.apply(hc)
|
cc.apply(hc)
|
||||||
|
|
||||||
return &httpWrapper{hc}
|
return &httpWrapper{hc, cc}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNoTimeoutHTTPWrapper constructs a http wrapper with no context timeout.
|
// NewNoTimeoutHTTPWrapper constructs a http wrapper with no context timeout.
|
||||||
@ -74,8 +75,6 @@ func NewNoTimeoutHTTPWrapper(opts ...Option) *httpWrapper {
|
|||||||
// requests
|
// requests
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
var streamErrRE = regexp.MustCompile(`stream error: stream ID \d+; .+; received from peer`)
|
|
||||||
|
|
||||||
// Request does the provided request.
|
// Request does the provided request.
|
||||||
func (hw httpWrapper) Request(
|
func (hw httpWrapper) Request(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@ -105,18 +104,20 @@ func (hw httpWrapper) Request(
|
|||||||
// retry wrapper is unsophisticated, but should only
|
// retry wrapper is unsophisticated, but should only
|
||||||
// retry in the event of a `stream error`, which is not
|
// retry in the event of a `stream error`, which is not
|
||||||
// a common expectation.
|
// a common expectation.
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < hw.config.maxConnectionRetries+1; i++ {
|
||||||
ictx := clues.Add(ctx, "request_retry_iter", i)
|
ictx := clues.Add(ctx, "request_retry_iter", i)
|
||||||
|
|
||||||
resp, err = hw.client.Do(req)
|
resp, err = hw.client.Do(req)
|
||||||
if err != nil && !streamErrRE.MatchString(err.Error()) {
|
|
||||||
return nil, Stack(ictx, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var http2StreamErr http2.StreamError
|
||||||
|
if !errors.As(err, &http2StreamErr) {
|
||||||
|
return nil, Stack(ictx, err)
|
||||||
|
}
|
||||||
|
|
||||||
logger.Ctx(ictx).Debug("http2 stream error")
|
logger.Ctx(ictx).Debug("http2 stream error")
|
||||||
events.Inc(events.APICall, "streamerror")
|
events.Inc(events.APICall, "streamerror")
|
||||||
|
|
||||||
@ -137,6 +138,7 @@ func (hw httpWrapper) Request(
|
|||||||
type (
|
type (
|
||||||
httpWrapper struct {
|
httpWrapper struct {
|
||||||
client *http.Client
|
client *http.Client
|
||||||
|
config *clientConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
customTransport struct {
|
customTransport struct {
|
||||||
|
|||||||
@ -7,8 +7,10 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
khttp "github.com/microsoft/kiota-http-go"
|
khttp "github.com/microsoft/kiota-http-go"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
@ -116,3 +118,70 @@ func (suite *HTTPWrapperUnitSuite) TestNewHTTPWrapper_redirectMiddleware() {
|
|||||||
// require.Equal(t, 1, calledCorrectly, "test server was called with expected path")
|
// require.Equal(t, 1, calledCorrectly, "test server was called with expected path")
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *HTTPWrapperUnitSuite) TestNewHTTPWrapper_http2StreamErrorRetries() {
|
||||||
|
var (
|
||||||
|
url = "https://graph.microsoft.com/fnords/beaux/regard"
|
||||||
|
streamErr = http2.StreamError{
|
||||||
|
StreamID: 1,
|
||||||
|
Code: http2.ErrCodeEnhanceYourCalm,
|
||||||
|
Cause: assert.AnError,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
retries int
|
||||||
|
expectRetries int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "zero retries",
|
||||||
|
retries: 0,
|
||||||
|
expectRetries: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "negative max",
|
||||||
|
retries: -1,
|
||||||
|
expectRetries: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "upper limit",
|
||||||
|
retries: 9001,
|
||||||
|
expectRetries: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "four",
|
||||||
|
retries: 4,
|
||||||
|
expectRetries: 4,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
// -1 to account for the first try,
|
||||||
|
// which isn't a retry.
|
||||||
|
tries := -1
|
||||||
|
|
||||||
|
mwResp := mwForceResp{
|
||||||
|
err: streamErr,
|
||||||
|
alternate: func(*http.Request) (bool, *http.Response, error) {
|
||||||
|
tries++
|
||||||
|
return false, nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
hw := NewHTTPWrapper(
|
||||||
|
appendMiddleware(&mwResp),
|
||||||
|
MaxConnectionRetries(test.retries))
|
||||||
|
|
||||||
|
_, err := hw.Request(ctx, http.MethodGet, url, nil, nil)
|
||||||
|
require.ErrorAs(t, err, &http2.StreamError{}, clues.ToCore(err))
|
||||||
|
|
||||||
|
require.Equal(t, test.expectRetries, tries, "count of retries")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
package metadata
|
package metadata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
odmetadata "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph/metadata"
|
"github.com/alcionai/corso/src/internal/m365/graph/metadata"
|
||||||
odmetadata "github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -33,7 +33,7 @@ func CreateAdapter(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := graph.KiotaHTTPClient(opts...)
|
httpClient, _ := graph.KiotaHTTPClient(opts...)
|
||||||
|
|
||||||
// This makes sure that we are able to intercept any requests via
|
// This makes sure that we are able to intercept any requests via
|
||||||
// gock. Only necessary for testing.
|
// gock. Only necessary for testing.
|
||||||
|
|||||||
@ -120,7 +120,7 @@ func CreateAdapter(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := KiotaHTTPClient(opts...)
|
httpClient, cc := KiotaHTTPClient(opts...)
|
||||||
|
|
||||||
adpt, err := msgraphsdkgo.NewGraphRequestAdapterWithParseNodeFactoryAndSerializationWriterFactoryAndHttpClient(
|
adpt, err := msgraphsdkgo.NewGraphRequestAdapterWithParseNodeFactoryAndSerializationWriterFactoryAndHttpClient(
|
||||||
auth,
|
auth,
|
||||||
@ -130,7 +130,7 @@ func CreateAdapter(
|
|||||||
return nil, clues.Stack(err)
|
return nil, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return wrapAdapter(adpt), nil
|
return wrapAdapter(adpt, cc), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetAuth(tenant string, client string, secret string) (*kauth.AzureIdentityAuthenticationProvider, error) {
|
func GetAuth(tenant string, client string, secret string) (*kauth.AzureIdentityAuthenticationProvider, error) {
|
||||||
@ -158,7 +158,7 @@ func GetAuth(tenant string, client string, secret string) (*kauth.AzureIdentityA
|
|||||||
// and consume relatively unbound socket connections. It is important
|
// and consume relatively unbound socket connections. It is important
|
||||||
// to centralize this client to be passed downstream where api calls
|
// to centralize this client to be passed downstream where api calls
|
||||||
// can utilize it on a per-download basis.
|
// can utilize it on a per-download basis.
|
||||||
func KiotaHTTPClient(opts ...Option) *http.Client {
|
func KiotaHTTPClient(opts ...Option) (*http.Client, *clientConfig) {
|
||||||
var (
|
var (
|
||||||
clientOptions = msgraphsdkgo.GetDefaultClientOptions()
|
clientOptions = msgraphsdkgo.GetDefaultClientOptions()
|
||||||
cc = populateConfig(opts...)
|
cc = populateConfig(opts...)
|
||||||
@ -170,7 +170,7 @@ func KiotaHTTPClient(opts ...Option) *http.Client {
|
|||||||
|
|
||||||
cc.apply(httpClient)
|
cc.apply(httpClient)
|
||||||
|
|
||||||
return httpClient
|
return httpClient, cc
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -179,11 +179,14 @@ func KiotaHTTPClient(opts ...Option) *http.Client {
|
|||||||
|
|
||||||
type clientConfig struct {
|
type clientConfig struct {
|
||||||
noTimeout bool
|
noTimeout bool
|
||||||
// MaxRetries before failure
|
// MaxConnectionRetries is the number of connection-level retries that
|
||||||
|
// attempt to re-run the request due to a broken or closed connection.
|
||||||
|
maxConnectionRetries int
|
||||||
|
// MaxRetries is the number of middleware retires attempted
|
||||||
|
// before returning with failure
|
||||||
maxRetries int
|
maxRetries int
|
||||||
// The minimum delay in seconds between retries
|
// The minimum delay in seconds between retries
|
||||||
minDelay time.Duration
|
minDelay time.Duration
|
||||||
overrideRetryCount bool
|
|
||||||
|
|
||||||
appendMiddleware []khttp.Middleware
|
appendMiddleware []khttp.Middleware
|
||||||
}
|
}
|
||||||
@ -193,8 +196,9 @@ type Option func(*clientConfig)
|
|||||||
// populate constructs a clientConfig according to the provided options.
|
// populate constructs a clientConfig according to the provided options.
|
||||||
func populateConfig(opts ...Option) *clientConfig {
|
func populateConfig(opts ...Option) *clientConfig {
|
||||||
cc := clientConfig{
|
cc := clientConfig{
|
||||||
maxRetries: defaultMaxRetries,
|
maxConnectionRetries: defaultMaxRetries,
|
||||||
minDelay: defaultDelay,
|
maxRetries: defaultMaxRetries,
|
||||||
|
minDelay: defaultDelay,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
@ -227,14 +231,25 @@ func NoTimeout() Option {
|
|||||||
|
|
||||||
func MaxRetries(max int) Option {
|
func MaxRetries(max int) Option {
|
||||||
return func(c *clientConfig) {
|
return func(c *clientConfig) {
|
||||||
c.overrideRetryCount = true
|
if max < 0 {
|
||||||
|
max = 0
|
||||||
|
} else if max > 5 {
|
||||||
|
max = 5
|
||||||
|
}
|
||||||
|
|
||||||
c.maxRetries = max
|
c.maxRetries = max
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func MinimumBackoff(dur time.Duration) Option {
|
func MinimumBackoff(min time.Duration) Option {
|
||||||
return func(c *clientConfig) {
|
return func(c *clientConfig) {
|
||||||
c.minDelay = dur
|
if min < 100*time.Millisecond {
|
||||||
|
min = 100 * time.Millisecond
|
||||||
|
} else if min > 5*time.Second {
|
||||||
|
min = 5 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
c.minDelay = min
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,6 +261,18 @@ func appendMiddleware(mw ...khttp.Middleware) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MaxConnectionRetries(max int) Option {
|
||||||
|
return func(c *clientConfig) {
|
||||||
|
if max < 0 {
|
||||||
|
max = 0
|
||||||
|
} else if max > 5 {
|
||||||
|
max = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
c.maxConnectionRetries = max
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Middleware Control
|
// Middleware Control
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -302,10 +329,11 @@ var _ abstractions.RequestAdapter = &adapterWrap{}
|
|||||||
// 3. Error and debug conditions are logged.
|
// 3. Error and debug conditions are logged.
|
||||||
type adapterWrap struct {
|
type adapterWrap struct {
|
||||||
abstractions.RequestAdapter
|
abstractions.RequestAdapter
|
||||||
|
config *clientConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func wrapAdapter(gra *msgraphsdkgo.GraphRequestAdapter) *adapterWrap {
|
func wrapAdapter(gra *msgraphsdkgo.GraphRequestAdapter, cc *clientConfig) *adapterWrap {
|
||||||
return &adapterWrap{gra}
|
return &adapterWrap{gra, cc}
|
||||||
}
|
}
|
||||||
|
|
||||||
var connectionEnded = filters.Contains([]string{
|
var connectionEnded = filters.Contains([]string{
|
||||||
@ -331,7 +359,7 @@ func (aw *adapterWrap) Send(
|
|||||||
// retry wrapper is unsophisticated, but should only
|
// retry wrapper is unsophisticated, but should only
|
||||||
// retry in the event of a `stream error`, which is not
|
// retry in the event of a `stream error`, which is not
|
||||||
// a common expectation.
|
// a common expectation.
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < aw.config.maxConnectionRetries+1; i++ {
|
||||||
ictx := clues.Add(ctx, "request_retry_iter", i)
|
ictx := clues.Add(ctx, "request_retry_iter", i)
|
||||||
|
|
||||||
sp, err = aw.RequestAdapter.Send(ctx, requestInfo, constructor, errorMappings)
|
sp, err = aw.RequestAdapter.Send(ctx, requestInfo, constructor, errorMappings)
|
||||||
|
|||||||
@ -67,9 +67,10 @@ func (suite *GraphIntgSuite) TestCreateAdapter() {
|
|||||||
|
|
||||||
func (suite *GraphIntgSuite) TestHTTPClient() {
|
func (suite *GraphIntgSuite) TestHTTPClient() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
opts []Option
|
opts []Option
|
||||||
check func(*testing.T, *http.Client)
|
check func(*testing.T, *http.Client)
|
||||||
|
checkConfig func(*testing.T, *clientConfig)
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no options",
|
name: "no options",
|
||||||
@ -77,23 +78,75 @@ func (suite *GraphIntgSuite) TestHTTPClient() {
|
|||||||
check: func(t *testing.T, c *http.Client) {
|
check: func(t *testing.T, c *http.Client) {
|
||||||
assert.Equal(t, defaultHTTPClientTimeout, c.Timeout, "default timeout")
|
assert.Equal(t, defaultHTTPClientTimeout, c.Timeout, "default timeout")
|
||||||
},
|
},
|
||||||
|
checkConfig: func(t *testing.T, c *clientConfig) {
|
||||||
|
assert.Equal(t, defaultDelay, c.minDelay, "default delay")
|
||||||
|
assert.Equal(t, defaultMaxRetries, c.maxRetries, "max retries")
|
||||||
|
assert.Equal(t, defaultMaxRetries, c.maxConnectionRetries, "max connection retries")
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "no timeout",
|
name: "configured options",
|
||||||
opts: []Option{NoTimeout()},
|
opts: []Option{
|
||||||
|
NoTimeout(),
|
||||||
|
MaxRetries(4),
|
||||||
|
MaxConnectionRetries(2),
|
||||||
|
MinimumBackoff(999 * time.Millisecond),
|
||||||
|
},
|
||||||
check: func(t *testing.T, c *http.Client) {
|
check: func(t *testing.T, c *http.Client) {
|
||||||
// FIXME: Change to 0 one upstream issue is fixed
|
// FIXME: Change to 0 one upstream issue is fixed
|
||||||
assert.Equal(t, time.Duration(48*time.Hour), c.Timeout, "unlimited timeout")
|
assert.Equal(t, time.Duration(48*time.Hour), c.Timeout, "unlimited timeout")
|
||||||
},
|
},
|
||||||
|
checkConfig: func(t *testing.T, c *clientConfig) {
|
||||||
|
assert.Equal(t, 999*time.Millisecond, c.minDelay, "minimum delay")
|
||||||
|
assert.Equal(t, 4, c.maxRetries, "max retries")
|
||||||
|
assert.Equal(t, 2, c.maxConnectionRetries, "max connection retries")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "below minimums",
|
||||||
|
opts: []Option{
|
||||||
|
NoTimeout(),
|
||||||
|
MaxRetries(-1),
|
||||||
|
MaxConnectionRetries(-1),
|
||||||
|
MinimumBackoff(0),
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, c *http.Client) {
|
||||||
|
// FIXME: Change to 0 one upstream issue is fixed
|
||||||
|
assert.Equal(t, time.Duration(48*time.Hour), c.Timeout, "unlimited timeout")
|
||||||
|
},
|
||||||
|
checkConfig: func(t *testing.T, c *clientConfig) {
|
||||||
|
assert.Equal(t, 100*time.Millisecond, c.minDelay, "minimum delay")
|
||||||
|
assert.Equal(t, 0, c.maxRetries, "max retries")
|
||||||
|
assert.Equal(t, 0, c.maxConnectionRetries, "max connection retries")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "above maximums",
|
||||||
|
opts: []Option{
|
||||||
|
NoTimeout(),
|
||||||
|
MaxRetries(9001),
|
||||||
|
MaxConnectionRetries(9001),
|
||||||
|
MinimumBackoff(999 * time.Second),
|
||||||
|
},
|
||||||
|
check: func(t *testing.T, c *http.Client) {
|
||||||
|
// FIXME: Change to 0 one upstream issue is fixed
|
||||||
|
assert.Equal(t, time.Duration(48*time.Hour), c.Timeout, "unlimited timeout")
|
||||||
|
},
|
||||||
|
checkConfig: func(t *testing.T, c *clientConfig) {
|
||||||
|
assert.Equal(t, 5*time.Second, c.minDelay, "minimum delay")
|
||||||
|
assert.Equal(t, 5, c.maxRetries, "max retries")
|
||||||
|
assert.Equal(t, 5, c.maxConnectionRetries, "max connection retries")
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
cli := KiotaHTTPClient(test.opts...)
|
cli, cc := KiotaHTTPClient(test.opts...)
|
||||||
assert.NotNil(t, cli)
|
assert.NotNil(t, cli)
|
||||||
test.check(t, cli)
|
test.check(t, cli)
|
||||||
|
test.checkConfig(t, cc)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -178,12 +231,12 @@ func (suite *GraphIntgSuite) TestAdapterWrap_retriesConnectionClose() {
|
|||||||
// the query doesn't matter
|
// the query doesn't matter
|
||||||
_, err = users.NewItemCalendarsItemEventsDeltaRequestBuilder(url, adpt).Get(ctx, nil)
|
_, err = users.NewItemCalendarsItemEventsDeltaRequestBuilder(url, adpt).Get(ctx, nil)
|
||||||
require.ErrorIs(t, err, syscall.ECONNRESET, clues.ToCore(err))
|
require.ErrorIs(t, err, syscall.ECONNRESET, clues.ToCore(err))
|
||||||
require.Equal(t, 12, count, "number of retries")
|
require.Equal(t, 16, count, "number of retries")
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
|
|
||||||
// the query doesn't matter
|
// the query doesn't matter
|
||||||
_, err = NewService(adpt).Client().Users().Get(ctx, nil)
|
_, err = NewService(adpt).Client().Users().Get(ctx, nil)
|
||||||
require.ErrorIs(t, err, syscall.ECONNRESET, clues.ToCore(err))
|
require.ErrorIs(t, err, syscall.ECONNRESET, clues.ToCore(err))
|
||||||
require.Equal(t, 12, count, "number of retries")
|
require.Equal(t, 16, count, "number of retries")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,10 +17,10 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
odStub "github.com/alcionai/corso/src/internal/m365/onedrive/stub"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||||
|
odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
|
||||||
m365Stub "github.com/alcionai/corso/src/internal/m365/stub"
|
m365Stub "github.com/alcionai/corso/src/internal/m365/stub"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
@ -737,7 +737,7 @@ func compareDriveItem(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if !isMeta {
|
if !isMeta {
|
||||||
oitem := item.(*onedrive.Item)
|
oitem := item.(*drive.Item)
|
||||||
info := oitem.Info()
|
info := oitem.Info()
|
||||||
|
|
||||||
if info.OneDrive != nil {
|
if info.OneDrive != nil {
|
||||||
|
|||||||
@ -4,6 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RestoreCollection struct {
|
type RestoreCollection struct {
|
||||||
@ -22,3 +24,44 @@ func (rc RestoreCollection) FetchItemByName(
|
|||||||
|
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BackupCollection struct {
|
||||||
|
Path path.Path
|
||||||
|
Loc *path.Builder
|
||||||
|
Streams []data.Stream
|
||||||
|
CState data.CollectionState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BackupCollection) Items(context.Context, *fault.Bus) <-chan data.Stream {
|
||||||
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(res)
|
||||||
|
|
||||||
|
for _, s := range c.Streams {
|
||||||
|
res <- s
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c BackupCollection) FullPath() path.Path {
|
||||||
|
return c.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c BackupCollection) PreviousPath() path.Path {
|
||||||
|
return c.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c BackupCollection) LocationPath() *path.Builder {
|
||||||
|
return c.Loc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c BackupCollection) State() data.CollectionState {
|
||||||
|
return c.CState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c BackupCollection) DoNotMergeItems() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@ -14,11 +14,11 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/onedrive/consts"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive/stub"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/resource"
|
"github.com/alcionai/corso/src/internal/m365/resource"
|
||||||
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
|
|||||||
@ -7,10 +7,11 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/internal/m365/exchange"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/onedrive"
|
"github.com/alcionai/corso/src/internal/m365/service/exchange"
|
||||||
"github.com/alcionai/corso/src/internal/m365/sharepoint"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/service/sharepoint"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -71,7 +72,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
|||||||
case path.OneDriveService:
|
case path.OneDriveService:
|
||||||
status, err = onedrive.ConsumeRestoreCollections(
|
status, err = onedrive.ConsumeRestoreCollections(
|
||||||
ctx,
|
ctx,
|
||||||
onedrive.NewRestoreHandler(ctrl.AC),
|
drive.NewRestoreHandler(ctrl.AC),
|
||||||
rcc,
|
rcc,
|
||||||
ctrl.backupDriveIDNames,
|
ctrl.backupDriveIDNames,
|
||||||
dcs,
|
dcs,
|
||||||
|
|||||||
@ -10,8 +10,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/exchange/mock"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user