Merge branch 'main' into sharepoint-restore-selectors
This commit is contained in:
commit
8d5ee37c53
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@ -501,7 +501,7 @@ jobs:
|
|||||||
|
|
||||||
# deploy the image
|
# deploy the image
|
||||||
- name: Build image and push to GitHub Container Registry
|
- name: Build image and push to GitHub Container Registry
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ./build/Dockerfile
|
file: ./build/Dockerfile
|
||||||
|
|||||||
17
CHANGELOG.md
17
CHANGELOG.md
@ -6,6 +6,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
## [Unreleased] (alpha)
|
## [Unreleased] (alpha)
|
||||||
|
### Added
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Support for item.Attachment:Mail restore
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
- Nested attachments are currently not restored due to an [issue](https://github.com/microsoft/kiota-serialization-json-go/issues/61) discovered in the Graph APIs
|
||||||
|
|
||||||
|
## [v0.3.0] (alpha) - 2023-2-07
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
@ -17,9 +28,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Add versions to backups so that we can understand/handle older backup formats
|
- Add versions to backups so that we can understand/handle older backup formats
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Backing up a calendar that has the same name as the default calendar
|
|
||||||
- Added additional backoff-retry to all OneDrive queries.
|
- Added additional backoff-retry to all OneDrive queries.
|
||||||
- Users with `null` userType values are no longer excluded from user queries.
|
- Users with `null` userType values are no longer excluded from user queries.
|
||||||
|
- Fix bug when backing up a calendar that has the same name as the default calendar
|
||||||
|
|
||||||
### Known Issues
|
### Known Issues
|
||||||
|
|
||||||
@ -156,7 +168,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Miscellaneous
|
- Miscellaneous
|
||||||
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
||||||
|
|
||||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.2.0...HEAD
|
[Unreleased]: https://github.com/alcionai/corso/compare/v0.3.0...HEAD
|
||||||
|
[v0.3.0]: https://github.com/alcionai/corso/compare/v0.2.0...v0.3.0
|
||||||
[v0.2.0]: https://github.com/alcionai/corso/compare/v0.1.0...v0.2.0
|
[v0.2.0]: https://github.com/alcionai/corso/compare/v0.1.0...v0.2.0
|
||||||
[v0.1.0]: https://github.com/alcionai/corso/compare/v0.0.4...v0.1.0
|
[v0.1.0]: https://github.com/alcionai/corso/compare/v0.0.4...v0.1.0
|
||||||
[v0.0.4]: https://github.com/alcionai/corso/compare/v0.0.3...v0.0.4
|
[v0.0.4]: https://github.com/alcionai/corso/compare/v0.0.3...v0.0.4
|
||||||
|
|||||||
@ -6,7 +6,7 @@ COPY src .
|
|||||||
ARG CORSO_BUILD_LDFLAGS=""
|
ARG CORSO_BUILD_LDFLAGS=""
|
||||||
RUN go build -o corso -ldflags "$CORSO_BUILD_LDFLAGS"
|
RUN go build -o corso -ldflags "$CORSO_BUILD_LDFLAGS"
|
||||||
|
|
||||||
FROM alpine:3.16
|
FROM alpine:3.17
|
||||||
|
|
||||||
LABEL org.opencontainers.image.title="Corso"
|
LABEL org.opencontainers.image.title="Corso"
|
||||||
LABEL org.opencontainers.image.description="Free, Secure, and Open-Source Backup for Microsoft 365"
|
LABEL org.opencontainers.image.description="Free, Secure, and Open-Source Backup for Microsoft 365"
|
||||||
|
|||||||
@ -15,6 +15,11 @@ lint: check-lint-version
|
|||||||
golangci-lint run
|
golangci-lint run
|
||||||
staticcheck ./...
|
staticcheck ./...
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
gofumpt -w .
|
||||||
|
goimports -w .
|
||||||
|
gci write --skip-generated -s 'standard,default,prefix(github.com/alcionai/corso)' .
|
||||||
|
|
||||||
check-lint-version: check-lint
|
check-lint-version: check-lint
|
||||||
@if [ "$(LINT_VERSION)" != "$(WANTED_LINT_VERSION)" ]; then \
|
@if [ "$(LINT_VERSION)" != "$(WANTED_LINT_VERSION)" ]; then \
|
||||||
echo >&2 $(BAD_LINT_MSG); \
|
echo >&2 $(BAD_LINT_MSG); \
|
||||||
@ -74,4 +79,4 @@ load-test:
|
|||||||
-mutexprofile=mutex.prof \
|
-mutexprofile=mutex.prof \
|
||||||
-trace=trace.out \
|
-trace=trace.out \
|
||||||
-outputdir=test_results \
|
-outputdir=test_results \
|
||||||
./pkg/repository/repository_load_test.go
|
./pkg/repository/loadtest/repository_load_test.go
|
||||||
@ -12,10 +12,11 @@ import (
|
|||||||
"github.com/alcionai/corso/src/cli/options"
|
"github.com/alcionai/corso/src/cli/options"
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -272,20 +273,23 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
sel := exchangeBackupCreateSelectors(user, exchangeData)
|
sel := exchangeBackupCreateSelectors(user, exchangeData)
|
||||||
|
|
||||||
users, err := m365.UserPNs(ctx, acct)
|
// TODO: log/print recoverable errors
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
users, err := m365.UserPNs(ctx, acct, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 user(s)"))
|
return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 user(s)"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errs *multierror.Error
|
merrs *multierror.Error
|
||||||
bIDs []model.StableID
|
bIDs []model.StableID
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, discSel := range sel.SplitByResourceOwner(users) {
|
for _, discSel := range sel.SplitByResourceOwner(users) {
|
||||||
bo, err := r.NewBackup(ctx, discSel.Selector)
|
bo, err := r.NewBackup(ctx, discSel.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, errors.Wrapf(
|
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"Failed to initialize Exchange backup for user %s",
|
"Failed to initialize Exchange backup for user %s",
|
||||||
discSel.DiscreteOwner,
|
discSel.DiscreteOwner,
|
||||||
@ -296,7 +300,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
err = bo.Run(ctx)
|
err = bo.Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, errors.Wrapf(
|
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"Failed to run Exchange backup for user %s",
|
"Failed to run Exchange backup for user %s",
|
||||||
discSel.DiscreteOwner,
|
discSel.DiscreteOwner,
|
||||||
@ -308,30 +312,31 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
bIDs = append(bIDs, bo.Results.BackupID)
|
bIDs = append(bIDs, bo.Results.BackupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
bups, err := r.Backups(ctx, bIDs)
|
bups, ferrs := r.Backups(ctx, bIDs)
|
||||||
if err != nil {
|
// TODO: print/log recoverable errors
|
||||||
return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage"))
|
if ferrs.Err() != nil {
|
||||||
|
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
||||||
}
|
}
|
||||||
|
|
||||||
backup.PrintAll(ctx, bups)
|
backup.PrintAll(ctx, bups)
|
||||||
|
|
||||||
if e := errs.ErrorOrNil(); e != nil {
|
if e := merrs.ErrorOrNil(); e != nil {
|
||||||
return Only(ctx, e)
|
return Only(ctx, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func exchangeBackupCreateSelectors(userIDs, data []string) *selectors.ExchangeBackup {
|
func exchangeBackupCreateSelectors(userIDs, cats []string) *selectors.ExchangeBackup {
|
||||||
sel := selectors.NewExchangeBackup(userIDs)
|
sel := selectors.NewExchangeBackup(userIDs)
|
||||||
|
|
||||||
if len(data) == 0 {
|
if len(cats) == 0 {
|
||||||
sel.Include(sel.ContactFolders(selectors.Any()))
|
sel.Include(sel.ContactFolders(selectors.Any()))
|
||||||
sel.Include(sel.MailFolders(selectors.Any()))
|
sel.Include(sel.MailFolders(selectors.Any()))
|
||||||
sel.Include(sel.EventCalendars(selectors.Any()))
|
sel.Include(sel.EventCalendars(selectors.Any()))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, d := range data {
|
for _, d := range cats {
|
||||||
switch d {
|
switch d {
|
||||||
case dataContacts:
|
case dataContacts:
|
||||||
sel.Include(sel.ContactFolders(selectors.Any()))
|
sel.Include(sel.ContactFolders(selectors.Any()))
|
||||||
@ -345,12 +350,12 @@ func exchangeBackupCreateSelectors(userIDs, data []string) *selectors.ExchangeBa
|
|||||||
return sel
|
return sel
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateExchangeBackupCreateFlags(userIDs, data []string) error {
|
func validateExchangeBackupCreateFlags(userIDs, cats []string) error {
|
||||||
if len(userIDs) == 0 {
|
if len(userIDs) == 0 {
|
||||||
return errors.New("--user requires one or more email addresses or the wildcard '*'")
|
return errors.New("--user requires one or more email addresses or the wildcard '*'")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, d := range data {
|
for _, d := range cats {
|
||||||
if d != dataContacts && d != dataEmail && d != dataEvents {
|
if d != dataContacts && d != dataEmail && d != dataEvents {
|
||||||
return errors.New(
|
return errors.New(
|
||||||
d + " is an unrecognized data type; must be one of " + dataContacts + ", " + dataEmail + ", or " + dataEvents)
|
d + " is an unrecognized data type; must be one of " + dataContacts + ", " + dataEmail + ", or " + dataEvents)
|
||||||
@ -393,7 +398,7 @@ func listExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
if len(backupID) > 0 {
|
if len(backupID) > 0 {
|
||||||
b, err := r.Backup(ctx, model.StableID(backupID))
|
b, err := r.Backup(ctx, model.StableID(backupID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -486,6 +491,8 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runDetailsExchangeCmd actually performs the lookup in backup details.
|
// runDetailsExchangeCmd actually performs the lookup in backup details.
|
||||||
|
// the fault.Errors return is always non-nil. Callers should check if
|
||||||
|
// errs.Err() == nil.
|
||||||
func runDetailsExchangeCmd(
|
func runDetailsExchangeCmd(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r repository.BackupGetter,
|
r repository.BackupGetter,
|
||||||
@ -496,19 +503,20 @@ func runDetailsExchangeCmd(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d, _, err := r.BackupDetails(ctx, backupID)
|
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||||
if err != nil {
|
// TODO: log/track recoverable errors
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errs.Err() != nil {
|
||||||
|
if errors.Is(errs.Err(), data.ErrNotFound) {
|
||||||
return nil, errors.Errorf("No backup exists with the id %s", backupID)
|
return nil, errors.Errorf("No backup exists with the id %s", backupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(err, "Failed to get backup details in the repository")
|
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||||
|
|
||||||
return sel.Reduce(ctx, d), nil
|
return sel.Reduce(ctx, d, errs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|||||||
@ -296,8 +296,9 @@ func (suite *PreparedBackupExchangeIntegrationSuite) SetupSuite() {
|
|||||||
b, err := suite.repo.Backup(ctx, bop.Results.BackupID)
|
b, err := suite.repo.Backup(ctx, bop.Results.BackupID)
|
||||||
require.NoError(t, err, "retrieving recent backup by ID")
|
require.NoError(t, err, "retrieving recent backup by ID")
|
||||||
require.Equal(t, bIDs, string(b.ID), "repo backup matches results id")
|
require.Equal(t, bIDs, string(b.ID), "repo backup matches results id")
|
||||||
_, b, err = suite.repo.BackupDetails(ctx, bIDs)
|
_, b, errs := suite.repo.BackupDetails(ctx, bIDs)
|
||||||
require.NoError(t, err, "retrieving recent backup details by ID")
|
require.NoError(t, errs.Err(), "retrieving recent backup details by ID")
|
||||||
|
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID")
|
||||||
require.Equal(t, bIDs, string(b.ID), "repo details matches results id")
|
require.Equal(t, bIDs, string(b.ID), "repo details matches results id")
|
||||||
|
|
||||||
suite.backupOps[set] = string(b.ID)
|
suite.backupOps[set] = string(b.ID)
|
||||||
@ -396,8 +397,9 @@ func (suite *PreparedBackupExchangeIntegrationSuite) TestExchangeDetailsCmd() {
|
|||||||
bID := suite.backupOps[set]
|
bID := suite.backupOps[set]
|
||||||
|
|
||||||
// fetch the details from the repo first
|
// fetch the details from the repo first
|
||||||
deets, _, err := suite.repo.BackupDetails(ctx, string(bID))
|
deets, _, errs := suite.repo.BackupDetails(ctx, string(bID))
|
||||||
require.NoError(t, err)
|
require.NoError(t, errs.Err())
|
||||||
|
require.Empty(t, errs.Errs())
|
||||||
|
|
||||||
cmd := tester.StubRootCmd(
|
cmd := tester.StubRootCmd(
|
||||||
"backup", "details", "exchange",
|
"backup", "details", "exchange",
|
||||||
|
|||||||
@ -223,33 +223,13 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectors() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.BackupGetter,
|
test.BackupGetter,
|
||||||
"backup-ID",
|
"backup-ID",
|
||||||
test.Opts,
|
test.Opts)
|
||||||
)
|
assert.NoError(t, err, "failure")
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadBackupID() {
|
|
||||||
t := suite.T()
|
|
||||||
ctx, flush := tester.NewContext()
|
|
||||||
backupGetter := &testdata.MockBackupGetter{}
|
|
||||||
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
output, err := runDetailsExchangeCmd(
|
|
||||||
ctx,
|
|
||||||
backupGetter,
|
|
||||||
"backup-ID",
|
|
||||||
utils.ExchangeOpts{},
|
|
||||||
)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
assert.Empty(t, output)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
@ -260,10 +240,8 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.BackupGetter,
|
test.BackupGetter,
|
||||||
"backup-ID",
|
"backup-ID",
|
||||||
test.Opts,
|
test.Opts)
|
||||||
)
|
assert.Error(t, err, "failure")
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, output)
|
assert.Empty(t, output)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,10 +12,11 @@ import (
|
|||||||
"github.com/alcionai/corso/src/cli/options"
|
"github.com/alcionai/corso/src/cli/options"
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -195,20 +196,23 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
sel := oneDriveBackupCreateSelectors(user)
|
sel := oneDriveBackupCreateSelectors(user)
|
||||||
|
|
||||||
users, err := m365.UserPNs(ctx, acct)
|
// TODO: log/print recoverable errors
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
users, err := m365.UserPNs(ctx, acct, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 users"))
|
return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 users"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errs *multierror.Error
|
merrs *multierror.Error
|
||||||
bIDs []model.StableID
|
bIDs []model.StableID
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, discSel := range sel.SplitByResourceOwner(users) {
|
for _, discSel := range sel.SplitByResourceOwner(users) {
|
||||||
bo, err := r.NewBackup(ctx, discSel.Selector)
|
bo, err := r.NewBackup(ctx, discSel.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, errors.Wrapf(
|
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"Failed to initialize OneDrive backup for user %s",
|
"Failed to initialize OneDrive backup for user %s",
|
||||||
discSel.DiscreteOwner,
|
discSel.DiscreteOwner,
|
||||||
@ -219,7 +223,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
err = bo.Run(ctx)
|
err = bo.Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, errors.Wrapf(
|
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"Failed to run OneDrive backup for user %s",
|
"Failed to run OneDrive backup for user %s",
|
||||||
discSel.DiscreteOwner,
|
discSel.DiscreteOwner,
|
||||||
@ -231,14 +235,15 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
bIDs = append(bIDs, bo.Results.BackupID)
|
bIDs = append(bIDs, bo.Results.BackupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
bups, err := r.Backups(ctx, bIDs)
|
bups, ferrs := r.Backups(ctx, bIDs)
|
||||||
if err != nil {
|
// TODO: print/log recoverable errors
|
||||||
return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage"))
|
if ferrs.Err() != nil {
|
||||||
|
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
||||||
}
|
}
|
||||||
|
|
||||||
backup.PrintAll(ctx, bups)
|
backup.PrintAll(ctx, bups)
|
||||||
|
|
||||||
if e := errs.ErrorOrNil(); e != nil {
|
if e := merrs.ErrorOrNil(); e != nil {
|
||||||
return Only(ctx, e)
|
return Only(ctx, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -293,7 +298,7 @@ func listOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
if len(backupID) > 0 {
|
if len(backupID) > 0 {
|
||||||
b, err := r.Backup(ctx, model.StableID(backupID))
|
b, err := r.Backup(ctx, model.StableID(backupID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -378,6 +383,8 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runDetailsOneDriveCmd actually performs the lookup in backup details.
|
// runDetailsOneDriveCmd actually performs the lookup in backup details.
|
||||||
|
// the fault.Errors return is always non-nil. Callers should check if
|
||||||
|
// errs.Err() == nil.
|
||||||
func runDetailsOneDriveCmd(
|
func runDetailsOneDriveCmd(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r repository.BackupGetter,
|
r repository.BackupGetter,
|
||||||
@ -388,19 +395,20 @@ func runDetailsOneDriveCmd(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d, _, err := r.BackupDetails(ctx, backupID)
|
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||||
if err != nil {
|
// TODO: log/track recoverable errors
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errs.Err() != nil {
|
||||||
|
if errors.Is(errs.Err(), data.ErrNotFound) {
|
||||||
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(err, "Failed to get backup details in the repository")
|
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||||
|
|
||||||
return sel.Reduce(ctx, d), nil
|
return sel.Reduce(ctx, d, errs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// `corso backup delete onedrive [<flag>...]`
|
// `corso backup delete onedrive [<flag>...]`
|
||||||
|
|||||||
@ -98,10 +98,8 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectors() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.BackupGetter,
|
test.BackupGetter,
|
||||||
"backup-ID",
|
"backup-ID",
|
||||||
test.Opts,
|
test.Opts)
|
||||||
)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -117,9 +115,7 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.BackupGetter,
|
test.BackupGetter,
|
||||||
"backup-ID",
|
"backup-ID",
|
||||||
test.Opts,
|
test.Opts)
|
||||||
)
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Empty(t, output)
|
assert.Empty(t, output)
|
||||||
})
|
})
|
||||||
|
|||||||
@ -14,10 +14,11 @@ import (
|
|||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/connector"
|
"github.com/alcionai/corso/src/internal/connector"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -210,7 +211,10 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
defer utils.CloseRepo(ctx, r)
|
defer utils.CloseRepo(ctx, r)
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites)
|
// TODO: log/print recoverable errors
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs"))
|
return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs"))
|
||||||
}
|
}
|
||||||
@ -221,14 +225,14 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errs *multierror.Error
|
merrs *multierror.Error
|
||||||
bIDs []model.StableID
|
bIDs []model.StableID
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, discSel := range sel.SplitByResourceOwner(gc.GetSiteIDs()) {
|
for _, discSel := range sel.SplitByResourceOwner(gc.GetSiteIDs()) {
|
||||||
bo, err := r.NewBackup(ctx, discSel.Selector)
|
bo, err := r.NewBackup(ctx, discSel.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, errors.Wrapf(
|
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"Failed to initialize SharePoint backup for site %s",
|
"Failed to initialize SharePoint backup for site %s",
|
||||||
discSel.DiscreteOwner,
|
discSel.DiscreteOwner,
|
||||||
@ -239,7 +243,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
err = bo.Run(ctx)
|
err = bo.Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, errors.Wrapf(
|
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"Failed to run SharePoint backup for site %s",
|
"Failed to run SharePoint backup for site %s",
|
||||||
discSel.DiscreteOwner,
|
discSel.DiscreteOwner,
|
||||||
@ -251,21 +255,22 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
bIDs = append(bIDs, bo.Results.BackupID)
|
bIDs = append(bIDs, bo.Results.BackupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
bups, err := r.Backups(ctx, bIDs)
|
bups, ferrs := r.Backups(ctx, bIDs)
|
||||||
if err != nil {
|
// TODO: print/log recoverable errors
|
||||||
return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage"))
|
if ferrs.Err() != nil {
|
||||||
|
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
||||||
}
|
}
|
||||||
|
|
||||||
backup.PrintAll(ctx, bups)
|
backup.PrintAll(ctx, bups)
|
||||||
|
|
||||||
if e := errs.ErrorOrNil(); e != nil {
|
if e := merrs.ErrorOrNil(); e != nil {
|
||||||
return Only(ctx, e)
|
return Only(ctx, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateSharePointBackupCreateFlags(sites, weburls, data []string) error {
|
func validateSharePointBackupCreateFlags(sites, weburls, cats []string) error {
|
||||||
if len(sites) == 0 && len(weburls) == 0 {
|
if len(sites) == 0 && len(weburls) == 0 {
|
||||||
return errors.New(
|
return errors.New(
|
||||||
"requires one or more --" +
|
"requires one or more --" +
|
||||||
@ -275,7 +280,7 @@ func validateSharePointBackupCreateFlags(sites, weburls, data []string) error {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, d := range data {
|
for _, d := range cats {
|
||||||
if d != dataLibraries && d != dataPages {
|
if d != dataLibraries && d != dataPages {
|
||||||
return errors.New(
|
return errors.New(
|
||||||
d + " is an unrecognized data type; either " + dataLibraries + "or " + dataPages,
|
d + " is an unrecognized data type; either " + dataLibraries + "or " + dataPages,
|
||||||
@ -289,7 +294,7 @@ func validateSharePointBackupCreateFlags(sites, weburls, data []string) error {
|
|||||||
// TODO: users might specify a data type, this only supports AllData().
|
// TODO: users might specify a data type, this only supports AllData().
|
||||||
func sharePointBackupCreateSelectors(
|
func sharePointBackupCreateSelectors(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
sites, weburls, data []string,
|
sites, weburls, cats []string,
|
||||||
gc *connector.GraphConnector,
|
gc *connector.GraphConnector,
|
||||||
) (*selectors.SharePointBackup, error) {
|
) (*selectors.SharePointBackup, error) {
|
||||||
if len(sites) == 0 && len(weburls) == 0 {
|
if len(sites) == 0 && len(weburls) == 0 {
|
||||||
@ -314,19 +319,22 @@ func sharePointBackupCreateSelectors(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
union, err := gc.UnionSiteIDsAndWebURLs(ctx, sites, weburls)
|
// TODO: log/print recoverable errors
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
union, err := gc.UnionSiteIDsAndWebURLs(ctx, sites, weburls, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sel := selectors.NewSharePointBackup(union)
|
sel := selectors.NewSharePointBackup(union)
|
||||||
if len(data) == 0 {
|
if len(cats) == 0 {
|
||||||
sel.Include(sel.AllData())
|
sel.Include(sel.AllData())
|
||||||
|
|
||||||
return sel, nil
|
return sel, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, d := range data {
|
for _, d := range cats {
|
||||||
switch d {
|
switch d {
|
||||||
case dataLibraries:
|
case dataLibraries:
|
||||||
sel.Include(sel.Libraries(selectors.Any()))
|
sel.Include(sel.Libraries(selectors.Any()))
|
||||||
@ -371,7 +379,7 @@ func listSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
if len(backupID) > 0 {
|
if len(backupID) > 0 {
|
||||||
b, err := r.Backup(ctx, model.StableID(backupID))
|
b, err := r.Backup(ctx, model.StableID(backupID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -497,6 +505,8 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runDetailsSharePointCmd actually performs the lookup in backup details.
|
// runDetailsSharePointCmd actually performs the lookup in backup details.
|
||||||
|
// the fault.Errors return is always non-nil. Callers should check if
|
||||||
|
// errs.Err() == nil.
|
||||||
func runDetailsSharePointCmd(
|
func runDetailsSharePointCmd(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r repository.BackupGetter,
|
r repository.BackupGetter,
|
||||||
@ -507,17 +517,18 @@ func runDetailsSharePointCmd(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d, _, err := r.BackupDetails(ctx, backupID)
|
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||||
if err != nil {
|
// TODO: log/track recoverable errors
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errs.Err() != nil {
|
||||||
|
if errors.Is(errs.Err(), data.ErrNotFound) {
|
||||||
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(err, "Failed to get backup details in the repository")
|
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
|
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
|
||||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||||
|
|
||||||
return sel.Reduce(ctx, d), nil
|
return sel.Reduce(ctx, d, errs), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -213,10 +213,8 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectors() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.BackupGetter,
|
test.BackupGetter,
|
||||||
"backup-ID",
|
"backup-ID",
|
||||||
test.Opts,
|
test.Opts)
|
||||||
)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -232,9 +230,7 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectorsBadFormats() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.BackupGetter,
|
test.BackupGetter,
|
||||||
"backup-ID",
|
"backup-ID",
|
||||||
test.Opts,
|
test.Opts)
|
||||||
)
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Empty(t, output)
|
assert.Empty(t, output)
|
||||||
})
|
})
|
||||||
|
|||||||
@ -10,7 +10,7 @@ import (
|
|||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
)
|
)
|
||||||
@ -228,7 +228,7 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
ds, err := ro.Run(ctx)
|
ds, err := ro.Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -110,8 +110,10 @@ func (suite *RestoreExchangeIntegrationSuite) SetupSuite() {
|
|||||||
// sanity check, ensure we can find the backup and its details immediately
|
// sanity check, ensure we can find the backup and its details immediately
|
||||||
_, err = suite.repo.Backup(ctx, bop.Results.BackupID)
|
_, err = suite.repo.Backup(ctx, bop.Results.BackupID)
|
||||||
require.NoError(t, err, "retrieving recent backup by ID")
|
require.NoError(t, err, "retrieving recent backup by ID")
|
||||||
_, _, err = suite.repo.BackupDetails(ctx, string(bop.Results.BackupID))
|
|
||||||
require.NoError(t, err, "retrieving recent backup details by ID")
|
_, _, errs := suite.repo.BackupDetails(ctx, string(bop.Results.BackupID))
|
||||||
|
require.NoError(t, errs.Err(), "retrieving recent backup details by ID")
|
||||||
|
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ import (
|
|||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
)
|
)
|
||||||
@ -171,7 +171,7 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
ds, err := ro.Run(ctx)
|
ds, err := ro.Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ import (
|
|||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
)
|
)
|
||||||
@ -182,7 +182,7 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
ds, err := ro.Run(ctx)
|
ds, err := ro.Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, kopia.ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
14
src/cli/utils/testdata/opts.go
vendored
14
src/cli/utils/testdata/opts.go
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors/testdata"
|
"github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/store"
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
@ -497,8 +498,11 @@ func (MockBackupGetter) Backup(
|
|||||||
return nil, errors.New("unexpected call to mock")
|
return nil, errors.New("unexpected call to mock")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (MockBackupGetter) Backups(context.Context, []model.StableID) ([]*backup.Backup, error) {
|
func (MockBackupGetter) Backups(
|
||||||
return nil, errors.New("unexpected call to mock")
|
context.Context,
|
||||||
|
[]model.StableID,
|
||||||
|
) ([]*backup.Backup, *fault.Errors) {
|
||||||
|
return nil, fault.New(false).Fail(errors.New("unexpected call to mock"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (MockBackupGetter) BackupsByTag(
|
func (MockBackupGetter) BackupsByTag(
|
||||||
@ -511,10 +515,10 @@ func (MockBackupGetter) BackupsByTag(
|
|||||||
func (bg *MockBackupGetter) BackupDetails(
|
func (bg *MockBackupGetter) BackupDetails(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupID string,
|
backupID string,
|
||||||
) (*details.Details, *backup.Backup, error) {
|
) (*details.Details, *backup.Backup, *fault.Errors) {
|
||||||
if bg == nil {
|
if bg == nil {
|
||||||
return testdata.GetDetailsSet(), nil, nil
|
return testdata.GetDetailsSet(), nil, fault.New(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, errors.New("unexpected call to mock")
|
return nil, nil, fault.New(false).Fail(errors.New("unexpected call to mock"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/credentials"
|
"github.com/alcionai/corso/src/pkg/credentials"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
@ -114,7 +115,10 @@ func getGCAndVerifyUser(ctx context.Context, userID string) (*connector.GraphCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
// build a graph connector
|
// build a graph connector
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
// TODO: log/print recoverable errors
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, account.Account{}, errors.Wrap(err, "connecting to graph api")
|
return nil, account.Account{}, errors.Wrap(err, "connecting to graph api")
|
||||||
}
|
}
|
||||||
@ -152,8 +156,8 @@ func buildCollections(
|
|||||||
tenant, user string,
|
tenant, user string,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
colls []collection,
|
colls []collection,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.RestoreCollection, error) {
|
||||||
collections := make([]data.Collection, 0, len(colls))
|
collections := make([]data.RestoreCollection, 0, len(colls))
|
||||||
|
|
||||||
for _, c := range colls {
|
for _, c := range colls {
|
||||||
pth, err := toDataLayerPath(
|
pth, err := toDataLayerPath(
|
||||||
@ -175,7 +179,7 @@ func buildCollections(
|
|||||||
mc.Data[i] = c.items[i].data
|
mc.Data[i] = c.items[i].data
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, mc)
|
collections = append(collections, data.NotFoundRestoreCollection{Collection: mc})
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, nil
|
return collections, nil
|
||||||
|
|||||||
@ -23,6 +23,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/credentials"
|
"github.com/alcionai/corso/src/pkg/credentials"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -178,7 +179,10 @@ func getGC(ctx context.Context) (*connector.GraphConnector, account.M365Config,
|
|||||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details"))
|
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details"))
|
||||||
}
|
}
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
// TODO: log/print recoverable errors
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API"))
|
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/credentials"
|
"github.com/alcionai/corso/src/pkg/credentials"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -260,7 +261,10 @@ func getGC(ctx context.Context) (*connector.GraphConnector, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// build a graph connector
|
// build a graph connector
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
// TODO: log/print recoverable errors
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, Only(ctx, errors.Wrap(err, "connecting to graph api"))
|
return nil, Only(ctx, errors.Wrap(err, "connecting to graph api"))
|
||||||
}
|
}
|
||||||
|
|||||||
10
src/go.mod
10
src/go.mod
@ -4,8 +4,8 @@ go 1.19
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005
|
github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e
|
||||||
github.com/aws/aws-sdk-go v1.44.192
|
github.com/aws/aws-sdk-go v1.44.197
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0
|
github.com/aws/aws-xray-sdk-go v1.8.0
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
@ -26,7 +26,7 @@ require (
|
|||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.1
|
||||||
github.com/tidwall/pretty v1.2.1
|
github.com/tidwall/pretty v1.2.1
|
||||||
github.com/tomlazar/table v0.1.2
|
github.com/tomlazar/table v0.1.2
|
||||||
github.com/vbauerster/mpb/v8 v8.1.4
|
github.com/vbauerster/mpb/v8 v8.1.6
|
||||||
go.uber.org/zap v1.24.0
|
go.uber.org/zap v1.24.0
|
||||||
golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15
|
golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15
|
||||||
golang.org/x/tools v0.5.0
|
golang.org/x/tools v0.5.0
|
||||||
@ -97,7 +97,7 @@ require (
|
|||||||
github.com/prometheus/client_model v0.3.0 // indirect
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.4.3 // indirect
|
||||||
github.com/rs/xid v1.4.0 // indirect
|
github.com/rs/xid v1.4.0 // indirect
|
||||||
github.com/segmentio/backo-go v1.0.0 // indirect
|
github.com/segmentio/backo-go v1.0.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||||
@ -114,7 +114,7 @@ require (
|
|||||||
golang.org/x/mod v0.7.0 // indirect
|
golang.org/x/mod v0.7.0 // indirect
|
||||||
golang.org/x/net v0.5.0 // indirect
|
golang.org/x/net v0.5.0 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/sync v0.1.0 // indirect
|
||||||
golang.org/x/sys v0.4.0 // indirect
|
golang.org/x/sys v0.5.0 // indirect
|
||||||
golang.org/x/text v0.6.0 // indirect
|
golang.org/x/text v0.6.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||||
google.golang.org/grpc v1.52.0 // indirect
|
google.golang.org/grpc v1.52.0 // indirect
|
||||||
|
|||||||
19
src/go.sum
19
src/go.sum
@ -52,8 +52,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
|
|||||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 h1:eTgICcmcydEWG8J+hgnidf0pzujV3Gd2XqmknykZkzA=
|
github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e h1:KMRGDB9lh0wC/WYVmQ28MJ07qiHszCSH2PRwkw2YElM=
|
||||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
|
github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
|||||||
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
||||||
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
||||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM=
|
github.com/aws/aws-sdk-go v1.44.197 h1:pkg/NZsov9v/CawQWy+qWVzJMIZRQypCtYjUBXFomF8=
|
||||||
github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.197/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
|
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
|
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
|
||||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
@ -342,8 +342,9 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
|||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
|
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
|
||||||
|
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||||
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
|
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
|
||||||
@ -402,8 +403,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
|||||||
github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4=
|
github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4=
|
||||||
github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0=
|
github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0=
|
||||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||||
github.com/vbauerster/mpb/v8 v8.1.4 h1:MOcLTIbbAA892wVjRiuFHa1nRlNvifQMDVh12Bq/xIs=
|
github.com/vbauerster/mpb/v8 v8.1.6 h1:EswHDkAsy4OQ7QBAmU1MUPz4vHzl6KlINjlh7vJoxvY=
|
||||||
github.com/vbauerster/mpb/v8 v8.1.4/go.mod h1:2fRME8lCLU9gwJwghZb1bO9A3Plc8KPeQ/ayGj+Ek4I=
|
github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8=
|
||||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
||||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
||||||
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
|
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
|
||||||
@ -610,8 +611,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
|||||||
@ -5,6 +5,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO: Remove in favor of clues.Stack()
|
||||||
|
|
||||||
// Err provides boiler-plate functions that other types of errors can use
|
// Err provides boiler-plate functions that other types of errors can use
|
||||||
// if they wish to be compared with `errors.As()`. This struct ensures that
|
// if they wish to be compared with `errors.As()`. This struct ensures that
|
||||||
// stack traces are printed when requested (if present) and that Err
|
// stack traces are printed when requested (if present) and that Err
|
||||||
|
|||||||
14
src/internal/common/ptr/pointer.go
Normal file
14
src/internal/common/ptr/pointer.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
package ptr
|
||||||
|
|
||||||
|
// Val helper method for unwrapping strings
|
||||||
|
// Microsoft Graph saves many variables as string pointers.
|
||||||
|
// Function will safely check if the point is nil prior to
|
||||||
|
// dereferencing the pointer. If the pointer is nil,
|
||||||
|
// an empty string is returned.
|
||||||
|
func Val(ptr *string) string {
|
||||||
|
if ptr == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return *ptr
|
||||||
|
}
|
||||||
@ -1,5 +1,6 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
|
// TODO: can be replaced with slices.Contains()
|
||||||
func ContainsString(super []string, sub string) bool {
|
func ContainsString(super []string, sub string) bool {
|
||||||
for _, s := range super {
|
for _, s := range super {
|
||||||
if s == sub {
|
if s == sub {
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -85,7 +86,10 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrNoTimeString = errors.New("no substring contains a known time format")
|
var (
|
||||||
|
ErrNoTimeString = errors.New("no substring contains a known time format")
|
||||||
|
errParsingStringToTime = errors.New("parsing string as time.Time")
|
||||||
|
)
|
||||||
|
|
||||||
// Now produces the current time as a string in the standard format.
|
// Now produces the current time as a string in the standard format.
|
||||||
func Now() string {
|
func Now() string {
|
||||||
@ -132,7 +136,7 @@ func FormatLegacyTime(t time.Time) string {
|
|||||||
// the provided string. Always returns a UTC timezone value.
|
// the provided string. Always returns a UTC timezone value.
|
||||||
func ParseTime(s string) (time.Time, error) {
|
func ParseTime(s string) (time.Time, error) {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return time.Time{}, errors.New("cannot interpret an empty string as time.Time")
|
return time.Time{}, clues.Stack(errParsingStringToTime, errors.New("empty string"))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, form := range formats {
|
for _, form := range formats {
|
||||||
@ -142,14 +146,14 @@ func ParseTime(s string) (time.Time, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return time.Time{}, errors.New("unable to parse time string: " + s)
|
return time.Time{}, clues.Stack(errParsingStringToTime, errors.New(s))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtractTime greedily retrieves a timestamp substring from the provided string.
|
// ExtractTime greedily retrieves a timestamp substring from the provided string.
|
||||||
// returns ErrNoTimeString if no match is found.
|
// returns ErrNoTimeString if no match is found.
|
||||||
func ExtractTime(s string) (time.Time, error) {
|
func ExtractTime(s string) (time.Time, error) {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return time.Time{}, errors.New("cannot extract time.Time from an empty string")
|
return time.Time{}, clues.Stack(errParsingStringToTime, errors.New("empty string"))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, re := range regexes {
|
for _, re := range regexes {
|
||||||
@ -159,5 +163,5 @@ func ExtractTime(s string) (time.Time, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return time.Time{}, errors.Wrap(ErrNoTimeString, s)
|
return time.Time{}, clues.Stack(ErrNoTimeString, errors.New(s))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,6 +16,8 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -34,9 +36,9 @@ import (
|
|||||||
func (gc *GraphConnector) DataCollections(
|
func (gc *GraphConnector) DataCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
sels selectors.Selector,
|
sels selectors.Selector,
|
||||||
metadata []data.Collection,
|
metadata []data.RestoreCollection,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
|
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
@ -51,7 +53,7 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !serviceEnabled {
|
if !serviceEnabled {
|
||||||
return []data.Collection{}, nil, nil
|
return []data.BackupCollection{}, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
switch sels.Service {
|
switch sels.Service {
|
||||||
@ -90,7 +92,7 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
ctx,
|
ctx,
|
||||||
gc.itemClient,
|
gc.itemClient,
|
||||||
sels,
|
sels,
|
||||||
gc.credentials.AzureTenantID,
|
gc.credentials,
|
||||||
gc.Service,
|
gc.Service,
|
||||||
gc,
|
gc,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
@ -182,9 +184,9 @@ func (fm odFolderMatcher) Matches(dir string) bool {
|
|||||||
func (gc *GraphConnector) OneDriveDataCollections(
|
func (gc *GraphConnector) OneDriveDataCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
selector selectors.Selector,
|
selector selectors.Selector,
|
||||||
metadata []data.Collection,
|
metadata []data.RestoreCollection,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
odb, err := selector.ToOneDriveBackup()
|
odb, err := selector.ToOneDriveBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "oneDriveDataCollection: parsing selector")
|
return nil, nil, errors.Wrap(err, "oneDriveDataCollection: parsing selector")
|
||||||
@ -192,7 +194,7 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
user = selector.DiscreteOwner
|
user = selector.DiscreteOwner
|
||||||
collections = []data.Collection{}
|
collections = []data.BackupCollection{}
|
||||||
allExcludes = map[string]struct{}{}
|
allExcludes = map[string]struct{}{}
|
||||||
errs error
|
errs error
|
||||||
)
|
)
|
||||||
@ -226,3 +228,46 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
|||||||
|
|
||||||
return collections, allExcludes, errs
|
return collections, allExcludes, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestoreDataCollections restores data from the specified collections
|
||||||
|
// into M365 using the GraphAPI.
|
||||||
|
// SideEffect: gc.status is updated at the completion of operation
|
||||||
|
func (gc *GraphConnector) RestoreDataCollections(
|
||||||
|
ctx context.Context,
|
||||||
|
backupVersion int,
|
||||||
|
acct account.Account,
|
||||||
|
selector selectors.Selector,
|
||||||
|
dest control.RestoreDestination,
|
||||||
|
opts control.Options,
|
||||||
|
dcs []data.RestoreCollection,
|
||||||
|
) (*details.Details, error) {
|
||||||
|
ctx, end := D.Span(ctx, "connector:restore")
|
||||||
|
defer end()
|
||||||
|
|
||||||
|
var (
|
||||||
|
status *support.ConnectorOperationStatus
|
||||||
|
err error
|
||||||
|
deets = &details.Builder{}
|
||||||
|
)
|
||||||
|
|
||||||
|
creds, err := acct.M365Config()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "malformed azure credentials")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch selector.Service {
|
||||||
|
case selectors.ServiceExchange:
|
||||||
|
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets)
|
||||||
|
case selectors.ServiceOneDrive:
|
||||||
|
status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets)
|
||||||
|
case selectors.ServiceSharePoint:
|
||||||
|
status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets)
|
||||||
|
default:
|
||||||
|
err = errors.Errorf("restore data from service %s not supported", selector.Service.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
gc.incrementAwaitingMessages()
|
||||||
|
gc.UpdateStatus(status)
|
||||||
|
|
||||||
|
return deets.Details(), err
|
||||||
|
}
|
||||||
|
|||||||
@ -249,7 +249,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
|||||||
ctx,
|
ctx,
|
||||||
graph.HTTPClient(graph.NoTimeout()),
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
test.getSelector(),
|
test.getSelector(),
|
||||||
connector.credentials.AzureTenantID,
|
connector.credentials,
|
||||||
connector.Service,
|
connector.Service,
|
||||||
connector,
|
connector,
|
||||||
control.Options{})
|
control.Options{})
|
||||||
|
|||||||
@ -22,7 +22,7 @@ func TestBetaUnitSuite(t *testing.T) {
|
|||||||
|
|
||||||
func (suite *BetaUnitSuite) TestBetaService_Adapter() {
|
func (suite *BetaUnitSuite) TestBetaService_Adapter() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
a := tester.NewM365Account(t)
|
a := tester.NewMockM365Account(t)
|
||||||
m365, err := a.M365Config()
|
m365, err := a.M365Config()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|||||||
@ -291,6 +291,8 @@ func (c Contacts) Serialize(
|
|||||||
return nil, fmt.Errorf("expected Contactable, got %T", item)
|
return nil, fmt.Errorf("expected Contactable, got %T", item)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "item_id", *contact.GetId())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
writer = kioser.NewJsonSerializationWriter()
|
writer = kioser.NewJsonSerializationWriter()
|
||||||
@ -299,7 +301,7 @@ func (c Contacts) Serialize(
|
|||||||
defer writer.Close()
|
defer writer.Close()
|
||||||
|
|
||||||
if err = writer.WriteObjectValue("", contact); err != nil {
|
if err = writer.WriteObjectValue("", contact); err != nil {
|
||||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
bs, err := writer.GetSerializedContent()
|
bs, err := writer.GetSerializedContent()
|
||||||
|
|||||||
@ -340,6 +340,8 @@ func (c Events) Serialize(
|
|||||||
return nil, fmt.Errorf("expected Eventable, got %T", item)
|
return nil, fmt.Errorf("expected Eventable, got %T", item)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "item_id", *event.GetId())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
writer = kioser.NewJsonSerializationWriter()
|
writer = kioser.NewJsonSerializationWriter()
|
||||||
@ -348,7 +350,7 @@ func (c Events) Serialize(
|
|||||||
defer writer.Close()
|
defer writer.Close()
|
||||||
|
|
||||||
if err = writer.WriteObjectValue("", event); err != nil {
|
if err = writer.WriteObjectValue("", event); err != nil {
|
||||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
bs, err := writer.GetSerializedContent()
|
bs, err := writer.GetSerializedContent()
|
||||||
|
|||||||
@ -321,6 +321,8 @@ func (c Mail) Serialize(
|
|||||||
return nil, fmt.Errorf("expected Messageable, got %T", item)
|
return nil, fmt.Errorf("expected Messageable, got %T", item)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "item_id", *msg.GetId())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
writer = kioser.NewJsonSerializationWriter()
|
writer = kioser.NewJsonSerializationWriter()
|
||||||
@ -329,7 +331,7 @@ func (c Mail) Serialize(
|
|||||||
defer writer.Close()
|
defer writer.Close()
|
||||||
|
|
||||||
if err = writer.WriteObjectValue("", msg); err != nil {
|
if err = writer.WriteObjectValue("", msg); err != nil {
|
||||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
bs, err := writer.GetSerializedContent()
|
bs, err := writer.GetSerializedContent()
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/connector/uploadsession"
|
"github.com/alcionai/corso/src/internal/connector/uploadsession"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -63,18 +64,16 @@ func uploadAttachment(
|
|||||||
|
|
||||||
attachment, err = support.ToItemAttachment(attachment)
|
attachment, err = support.ToItemAttachment(attachment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
name := ""
|
name := ptr.Val(prev.GetName())
|
||||||
if prev.GetName() != nil {
|
msg := "item attachment restore not supported for this type. skipping upload."
|
||||||
name = *prev.GetName()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Update to support PII protection
|
// TODO: (rkeepers) Update to support PII protection
|
||||||
logger.Ctx(ctx).Infow("item attachment uploads are not supported ",
|
logger.Ctx(ctx).Infow(msg,
|
||||||
"err", err,
|
"err", err,
|
||||||
"attachment_name", name,
|
"attachment_name", name,
|
||||||
"attachment_type", attachmentType,
|
"attachment_type", attachmentType,
|
||||||
"internal_item_type", getItemAttachmentItemType(prev),
|
"internal_item_type", getItemAttachmentItemType(prev),
|
||||||
"attachment_id", *prev.GetId(),
|
"attachment_id", ptr.Val(prev.GetId()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -128,9 +127,6 @@ func getItemAttachmentItemType(query models.Attachmentable) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
item := attachment.GetItem()
|
item := attachment.GetItem()
|
||||||
if item.GetOdataType() == nil {
|
|
||||||
return empty
|
|
||||||
}
|
|
||||||
|
|
||||||
return *item.GetOdataType()
|
return ptr.Val(item.GetOdataType())
|
||||||
}
|
}
|
||||||
|
|||||||
@ -63,7 +63,7 @@ type DeltaPath struct {
|
|||||||
// and path lookup maps.
|
// and path lookup maps.
|
||||||
func parseMetadataCollections(
|
func parseMetadataCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
colls []data.Collection,
|
colls []data.RestoreCollection,
|
||||||
) (CatDeltaPaths, error) {
|
) (CatDeltaPaths, error) {
|
||||||
// cdp stores metadata
|
// cdp stores metadata
|
||||||
cdp := CatDeltaPaths{
|
cdp := CatDeltaPaths{
|
||||||
@ -163,11 +163,11 @@ func parseMetadataCollections(
|
|||||||
func DataCollections(
|
func DataCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
selector selectors.Selector,
|
selector selectors.Selector,
|
||||||
metadata []data.Collection,
|
metadata []data.RestoreCollection,
|
||||||
acct account.M365Config,
|
acct account.M365Config,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
eb, err := selector.ToExchangeBackup()
|
eb, err := selector.ToExchangeBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "exchangeDataCollection: parsing selector")
|
return nil, nil, errors.Wrap(err, "exchangeDataCollection: parsing selector")
|
||||||
@ -175,7 +175,7 @@ func DataCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
user = selector.DiscreteOwner
|
user = selector.DiscreteOwner
|
||||||
collections = []data.Collection{}
|
collections = []data.BackupCollection{}
|
||||||
errs error
|
errs error
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -231,10 +231,10 @@ func createCollections(
|
|||||||
dps DeltaPaths,
|
dps DeltaPaths,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
var (
|
var (
|
||||||
errs *multierror.Error
|
errs *multierror.Error
|
||||||
allCollections = make([]data.Collection, 0)
|
allCollections = make([]data.BackupCollection, 0)
|
||||||
ac = api.Client{Credentials: creds}
|
ac = api.Client{Credentials: creds}
|
||||||
category = scope.Category().PathType()
|
category = scope.Category().PathType()
|
||||||
)
|
)
|
||||||
@ -245,7 +245,7 @@ func createCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create collection of ExchangeDataCollection
|
// Create collection of ExchangeDataCollection
|
||||||
collections := make(map[string]data.Collection)
|
collections := make(map[string]data.BackupCollection)
|
||||||
|
|
||||||
qp := graph.QueryParams{
|
qp := graph.QueryParams{
|
||||||
Category: category,
|
Category: category,
|
||||||
|
|||||||
@ -174,7 +174,9 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
cdps, err := parseMetadataCollections(ctx, []data.Collection{coll})
|
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||||
|
data.NotFoundRestoreCollection{Collection: coll},
|
||||||
|
})
|
||||||
test.expectError(t, err)
|
test.expectError(t, err)
|
||||||
|
|
||||||
emails := cdps[path.EmailCategory]
|
emails := cdps[path.EmailCategory]
|
||||||
@ -335,7 +337,7 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Less(t, 1, len(collections), "retrieved metadata and data collections")
|
assert.Less(t, 1, len(collections), "retrieved metadata and data collections")
|
||||||
|
|
||||||
var metadata data.Collection
|
var metadata data.BackupCollection
|
||||||
|
|
||||||
for _, coll := range collections {
|
for _, coll := range collections {
|
||||||
if coll.FullPath().Service() == path.ExchangeMetadataService {
|
if coll.FullPath().Service() == path.ExchangeMetadataService {
|
||||||
@ -345,7 +347,9 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
|||||||
|
|
||||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||||
|
|
||||||
cdps, err := parseMetadataCollections(ctx, []data.Collection{metadata})
|
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||||
|
data.NotFoundRestoreCollection{Collection: metadata},
|
||||||
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
dps := cdps[test.scope.Category().PathType()]
|
dps := cdps[test.scope.Category().PathType()]
|
||||||
|
|||||||
@ -24,10 +24,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ data.Collection = &Collection{}
|
_ data.BackupCollection = &Collection{}
|
||||||
_ data.Stream = &Stream{}
|
_ data.Stream = &Stream{}
|
||||||
_ data.StreamInfo = &Stream{}
|
_ data.StreamInfo = &Stream{}
|
||||||
_ data.StreamModTime = &Stream{}
|
_ data.StreamModTime = &Stream{}
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -107,7 +107,7 @@ func NewCollection(
|
|||||||
added: make(map[string]struct{}, 0),
|
added: make(map[string]struct{}, 0),
|
||||||
removed: make(map[string]struct{}, 0),
|
removed: make(map[string]struct{}, 0),
|
||||||
prevPath: prev,
|
prevPath: prev,
|
||||||
state: stateOf(prev, curr),
|
state: data.StateOf(prev, curr),
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
user: user,
|
user: user,
|
||||||
items: items,
|
items: items,
|
||||||
@ -116,22 +116,6 @@ func NewCollection(
|
|||||||
return collection
|
return collection
|
||||||
}
|
}
|
||||||
|
|
||||||
func stateOf(prev, curr path.Path) data.CollectionState {
|
|
||||||
if curr == nil || len(curr.String()) == 0 {
|
|
||||||
return data.DeletedState
|
|
||||||
}
|
|
||||||
|
|
||||||
if prev == nil || len(prev.String()) == 0 {
|
|
||||||
return data.NewState
|
|
||||||
}
|
|
||||||
|
|
||||||
if curr.Folder() != prev.Folder() {
|
|
||||||
return data.MovedState
|
|
||||||
}
|
|
||||||
|
|
||||||
return data.NotMovedState
|
|
||||||
}
|
|
||||||
|
|
||||||
// Items utility function to asynchronously execute process to fill data channel with
|
// Items utility function to asynchronously execute process to fill data channel with
|
||||||
// M365 exchange objects and returns the data channel
|
// M365 exchange objects and returns the data channel
|
||||||
func (col *Collection) Items() <-chan data.Stream {
|
func (col *Collection) Items() <-chan data.Stream {
|
||||||
|
|||||||
@ -12,10 +12,8 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -118,59 +116,6 @@ func (suite *ExchangeDataCollectionSuite) TestExchangeDataCollection_NewExchange
|
|||||||
suite.Equal(fullPath, edc.FullPath())
|
suite.Equal(fullPath, edc.FullPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ExchangeDataCollectionSuite) TestNewCollection_state() {
|
|
||||||
fooP, err := path.Builder{}.
|
|
||||||
Append("foo").
|
|
||||||
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
|
||||||
require.NoError(suite.T(), err)
|
|
||||||
barP, err := path.Builder{}.
|
|
||||||
Append("bar").
|
|
||||||
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
|
||||||
require.NoError(suite.T(), err)
|
|
||||||
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
prev path.Path
|
|
||||||
curr path.Path
|
|
||||||
expect data.CollectionState
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "new",
|
|
||||||
curr: fooP,
|
|
||||||
expect: data.NewState,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "not moved",
|
|
||||||
prev: fooP,
|
|
||||||
curr: fooP,
|
|
||||||
expect: data.NotMovedState,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "moved",
|
|
||||||
prev: fooP,
|
|
||||||
curr: barP,
|
|
||||||
expect: data.MovedState,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "deleted",
|
|
||||||
prev: fooP,
|
|
||||||
expect: data.DeletedState,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range table {
|
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
|
||||||
c := NewCollection(
|
|
||||||
"u",
|
|
||||||
test.curr, test.prev,
|
|
||||||
0,
|
|
||||||
&mockItemer{}, nil,
|
|
||||||
control.Options{},
|
|
||||||
false)
|
|
||||||
assert.Equal(t, test.expect, c.State())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ExchangeDataCollectionSuite) TestGetItemWithRetries() {
|
func (suite *ExchangeDataCollectionSuite) TestGetItemWithRetries() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
|
|||||||
@ -130,12 +130,13 @@ type containerDeleter interface {
|
|||||||
|
|
||||||
// TestRestoreExchangeObject verifies path.Category usage for restored objects
|
// TestRestoreExchangeObject verifies path.Category usage for restored objects
|
||||||
func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
||||||
a := tester.NewM365Account(suite.T())
|
t := suite.T()
|
||||||
|
a := tester.NewM365Account(t)
|
||||||
m365, err := a.M365Config()
|
m365, err := a.M365Config()
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
service, err := createService(m365)
|
service, err := createService(m365)
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
deleters := map[path.CategoryType]containerDeleter{
|
deleters := map[path.CategoryType]containerDeleter{
|
||||||
path.EmailCategory: suite.ac.Mail(),
|
path.EmailCategory: suite.ac.Mail(),
|
||||||
@ -187,6 +188,63 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
return *folder.GetId()
|
return *folder.GetId()
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mail: Item Attachment_Mail",
|
||||||
|
bytes: mockconnector.GetMockMessageWithItemAttachmentMail("Mail Item Attachment"),
|
||||||
|
category: path.EmailCategory,
|
||||||
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
|
folderName := "TestRestoreMailItemAttachment: " + common.FormatSimpleDateTime(now)
|
||||||
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return *folder.GetId()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mail: Hydrated Item Attachment Mail",
|
||||||
|
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentMail(t,
|
||||||
|
mockconnector.GetMockMessageBytes("Basic Item Attachment"),
|
||||||
|
"Mail Item Attachment",
|
||||||
|
),
|
||||||
|
category: path.EmailCategory,
|
||||||
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
|
folderName := "TestRestoreMailBasicItemAttachment: " + common.FormatSimpleDateTime(now)
|
||||||
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return *folder.GetId()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mail: Hydrated Item Attachment Mail One Attach",
|
||||||
|
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentMail(t,
|
||||||
|
mockconnector.GetMockMessageWithDirectAttachment("Item Attachment Included"),
|
||||||
|
"Mail Item Attachment",
|
||||||
|
),
|
||||||
|
category: path.EmailCategory,
|
||||||
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
|
folderName := "ItemMailAttachmentwAttachment " + common.FormatSimpleDateTime(now)
|
||||||
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return *folder.GetId()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mail: Item Attachment_Contact",
|
||||||
|
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentContact(t,
|
||||||
|
mockconnector.GetMockContactBytes("Victor"),
|
||||||
|
"Contact Item Attachment",
|
||||||
|
),
|
||||||
|
category: path.EmailCategory,
|
||||||
|
destination: func(t *testing.T, ctx context.Context) string {
|
||||||
|
folderName := "ItemMailAttachment_Contact " + common.FormatSimpleDateTime(now)
|
||||||
|
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return *folder.GetId()
|
||||||
|
},
|
||||||
|
},
|
||||||
{ // Restore will upload the Message without uploading the attachment
|
{ // Restore will upload the Message without uploading the attachment
|
||||||
name: "Test Mail: Item Attachment_NestedEvent",
|
name: "Test Mail: Item Attachment_NestedEvent",
|
||||||
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
|
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
|
||||||
@ -291,6 +349,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
|||||||
)
|
)
|
||||||
assert.NoError(t, err, support.ConnectorStackErrorTrace(err))
|
assert.NoError(t, err, support.ConnectorStackErrorTrace(err))
|
||||||
assert.NotNil(t, info, "item info was not populated")
|
assert.NotNil(t, info, "item info was not populated")
|
||||||
|
assert.NotNil(t, deleters)
|
||||||
assert.NoError(t, deleters[test.category].DeleteContainer(ctx, userID, destination))
|
assert.NoError(t, deleters[test.category].DeleteContainer(ctx, userID, destination))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -25,14 +25,14 @@ type addedAndRemovedItemIDsGetter interface {
|
|||||||
|
|
||||||
// filterContainersAndFillCollections is a utility function
|
// filterContainersAndFillCollections is a utility function
|
||||||
// that places the M365 object ids belonging to specific directories
|
// that places the M365 object ids belonging to specific directories
|
||||||
// into a Collection. Messages outside of those directories are omitted.
|
// into a BackupCollection. Messages outside of those directories are omitted.
|
||||||
// @param collection is filled with during this function.
|
// @param collection is filled with during this function.
|
||||||
// Supports all exchange applications: Contacts, Events, and Mail
|
// Supports all exchange applications: Contacts, Events, and Mail
|
||||||
func filterContainersAndFillCollections(
|
func filterContainersAndFillCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
qp graph.QueryParams,
|
qp graph.QueryParams,
|
||||||
getter addedAndRemovedItemIDsGetter,
|
getter addedAndRemovedItemIDsGetter,
|
||||||
collections map[string]data.Collection,
|
collections map[string]data.BackupCollection,
|
||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
resolver graph.ContainerResolver,
|
resolver graph.ContainerResolver,
|
||||||
scope selectors.ExchangeScope,
|
scope selectors.ExchangeScope,
|
||||||
|
|||||||
@ -280,7 +280,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
collections := map[string]data.Collection{}
|
collections := map[string]data.BackupCollection{}
|
||||||
|
|
||||||
err := filterContainersAndFillCollections(
|
err := filterContainersAndFillCollections(
|
||||||
ctx,
|
ctx,
|
||||||
@ -433,7 +433,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea
|
|||||||
resolver = newMockResolver(container1)
|
resolver = newMockResolver(container1)
|
||||||
)
|
)
|
||||||
|
|
||||||
collections := map[string]data.Collection{}
|
collections := map[string]data.BackupCollection{}
|
||||||
|
|
||||||
err := filterContainersAndFillCollections(
|
err := filterContainersAndFillCollections(
|
||||||
ctx,
|
ctx,
|
||||||
@ -785,7 +785,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
collections := map[string]data.Collection{}
|
collections := map[string]data.BackupCollection{}
|
||||||
|
|
||||||
err := filterContainersAndFillCollections(
|
err := filterContainersAndFillCollections(
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
@ -283,6 +283,20 @@ func SendMailToBackStore(
|
|||||||
|
|
||||||
for _, attachment := range attached {
|
for _, attachment := range attached {
|
||||||
if err := uploadAttachment(ctx, uploader, attachment); err != nil {
|
if err := uploadAttachment(ctx, uploader, attachment); err != nil {
|
||||||
|
if attachment.GetOdataType() != nil &&
|
||||||
|
*attachment.GetOdataType() == "#microsoft.graph.itemAttachment" {
|
||||||
|
var name string
|
||||||
|
if attachment.GetName() != nil {
|
||||||
|
name = *attachment.GetName()
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"item attachment upload not successful. content not accepted by M365 server",
|
||||||
|
"Attachment Name", name)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
errs = support.WrapAndAppend(
|
errs = support.WrapAndAppend(
|
||||||
fmt.Sprintf("uploading attachment for message %s: %s",
|
fmt.Sprintf("uploading attachment for message %s: %s",
|
||||||
id, support.ConnectorStackErrorTrace(err)),
|
id, support.ConnectorStackErrorTrace(err)),
|
||||||
@ -297,7 +311,7 @@ func SendMailToBackStore(
|
|||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreExchangeDataCollections restores M365 objects in data.Collection to MSFT
|
// RestoreExchangeDataCollections restores M365 objects in data.RestoreCollection to MSFT
|
||||||
// store through GraphAPI.
|
// store through GraphAPI.
|
||||||
// @param dest: container destination to M365
|
// @param dest: container destination to M365
|
||||||
func RestoreExchangeDataCollections(
|
func RestoreExchangeDataCollections(
|
||||||
@ -305,7 +319,7 @@ func RestoreExchangeDataCollections(
|
|||||||
creds account.M365Config,
|
creds account.M365Config,
|
||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
dcs []data.Collection,
|
dcs []data.RestoreCollection,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
@ -364,7 +378,7 @@ func RestoreExchangeDataCollections(
|
|||||||
func restoreCollection(
|
func restoreCollection(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
gs graph.Servicer,
|
gs graph.Servicer,
|
||||||
dc data.Collection,
|
dc data.RestoreCollection,
|
||||||
folderID string,
|
folderID string,
|
||||||
policy control.CollisionPolicy,
|
policy control.CollisionPolicy,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
|
|||||||
@ -18,6 +18,12 @@ type BetaClientSuite struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBetaClientSuite(t *testing.T) {
|
func TestBetaClientSuite(t *testing.T) {
|
||||||
|
tester.RunOnAny(
|
||||||
|
t,
|
||||||
|
tester.CorsoCITests,
|
||||||
|
tester.CorsoGraphConnectorTests,
|
||||||
|
)
|
||||||
|
|
||||||
suite.Run(t, new(BetaClientSuite))
|
suite.Run(t, new(BetaClientSuite))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -21,6 +21,7 @@ const (
|
|||||||
func (i HorizontalSectionLayoutType) String() string {
|
func (i HorizontalSectionLayoutType) String() string {
|
||||||
return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i]
|
return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseHorizontalSectionLayoutType(v string) (interface{}, error) {
|
func ParseHorizontalSectionLayoutType(v string) (interface{}, error) {
|
||||||
result := NONE_HORIZONTALSECTIONLAYOUTTYPE
|
result := NONE_HORIZONTALSECTIONLAYOUTTYPE
|
||||||
switch v {
|
switch v {
|
||||||
@ -45,6 +46,7 @@ func ParseHorizontalSectionLayoutType(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializeHorizontalSectionLayoutType(values []HorizontalSectionLayoutType) []string {
|
func SerializeHorizontalSectionLayoutType(values []HorizontalSectionLayoutType) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -17,6 +17,7 @@ const (
|
|||||||
func (i PageLayoutType) String() string {
|
func (i PageLayoutType) String() string {
|
||||||
return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i]
|
return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParsePageLayoutType(v string) (interface{}, error) {
|
func ParsePageLayoutType(v string) (interface{}, error) {
|
||||||
result := MICROSOFTRESERVED_PAGELAYOUTTYPE
|
result := MICROSOFTRESERVED_PAGELAYOUTTYPE
|
||||||
switch v {
|
switch v {
|
||||||
@ -33,6 +34,7 @@ func ParsePageLayoutType(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializePageLayoutType(values []PageLayoutType) []string {
|
func SerializePageLayoutType(values []PageLayoutType) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -17,6 +17,7 @@ const (
|
|||||||
func (i PagePromotionType) String() string {
|
func (i PagePromotionType) String() string {
|
||||||
return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i]
|
return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParsePagePromotionType(v string) (interface{}, error) {
|
func ParsePagePromotionType(v string) (interface{}, error) {
|
||||||
result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE
|
result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE
|
||||||
switch v {
|
switch v {
|
||||||
@ -33,6 +34,7 @@ func ParsePagePromotionType(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializePagePromotionType(values []PagePromotionType) []string {
|
func SerializePagePromotionType(values []PagePromotionType) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -18,6 +18,7 @@ const (
|
|||||||
func (i SectionEmphasisType) String() string {
|
func (i SectionEmphasisType) String() string {
|
||||||
return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i]
|
return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseSectionEmphasisType(v string) (interface{}, error) {
|
func ParseSectionEmphasisType(v string) (interface{}, error) {
|
||||||
result := NONE_SECTIONEMPHASISTYPE
|
result := NONE_SECTIONEMPHASISTYPE
|
||||||
switch v {
|
switch v {
|
||||||
@ -36,6 +37,7 @@ func ParseSectionEmphasisType(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializeSectionEmphasisType(values []SectionEmphasisType) []string {
|
func SerializeSectionEmphasisType(values []SectionEmphasisType) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -16,6 +16,7 @@ const (
|
|||||||
func (i SiteAccessType) String() string {
|
func (i SiteAccessType) String() string {
|
||||||
return []string{"block", "full", "limited"}[i]
|
return []string{"block", "full", "limited"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseSiteAccessType(v string) (interface{}, error) {
|
func ParseSiteAccessType(v string) (interface{}, error) {
|
||||||
result := BLOCK_SITEACCESSTYPE
|
result := BLOCK_SITEACCESSTYPE
|
||||||
switch v {
|
switch v {
|
||||||
@ -30,6 +31,7 @@ func ParseSiteAccessType(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializeSiteAccessType(values []SiteAccessType) []string {
|
func SerializeSiteAccessType(values []SiteAccessType) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -25,6 +25,7 @@ const (
|
|||||||
func (i SiteSecurityLevel) String() string {
|
func (i SiteSecurityLevel) String() string {
|
||||||
return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i]
|
return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseSiteSecurityLevel(v string) (interface{}, error) {
|
func ParseSiteSecurityLevel(v string) (interface{}, error) {
|
||||||
result := USERDEFINED_SITESECURITYLEVEL
|
result := USERDEFINED_SITESECURITYLEVEL
|
||||||
switch v {
|
switch v {
|
||||||
@ -45,6 +46,7 @@ func ParseSiteSecurityLevel(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializeSiteSecurityLevel(values []SiteSecurityLevel) []string {
|
func SerializeSiteSecurityLevel(values []SiteSecurityLevel) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -18,6 +18,7 @@ const (
|
|||||||
func (i TitleAreaLayoutType) String() string {
|
func (i TitleAreaLayoutType) String() string {
|
||||||
return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i]
|
return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseTitleAreaLayoutType(v string) (interface{}, error) {
|
func ParseTitleAreaLayoutType(v string) (interface{}, error) {
|
||||||
result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE
|
result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE
|
||||||
switch v {
|
switch v {
|
||||||
@ -36,6 +37,7 @@ func ParseTitleAreaLayoutType(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializeTitleAreaLayoutType(values []TitleAreaLayoutType) []string {
|
func SerializeTitleAreaLayoutType(values []TitleAreaLayoutType) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -16,6 +16,7 @@ const (
|
|||||||
func (i TitleAreaTextAlignmentType) String() string {
|
func (i TitleAreaTextAlignmentType) String() string {
|
||||||
return []string{"left", "center", "unknownFutureValue"}[i]
|
return []string{"left", "center", "unknownFutureValue"}[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) {
|
func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) {
|
||||||
result := LEFT_TITLEAREATEXTALIGNMENTTYPE
|
result := LEFT_TITLEAREATEXTALIGNMENTTYPE
|
||||||
switch v {
|
switch v {
|
||||||
@ -30,6 +31,7 @@ func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SerializeTitleAreaTextAlignmentType(values []TitleAreaTextAlignmentType) []string {
|
func SerializeTitleAreaTextAlignmentType(values []TitleAreaTextAlignmentType) []string {
|
||||||
result := make([]string, len(values))
|
result := make([]string, len(values))
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package graph
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
@ -176,3 +177,50 @@ func hasErrorCode(err error, codes ...string) bool {
|
|||||||
|
|
||||||
return slices.Contains(codes, *oDataError.GetError().GetCode())
|
return slices.Contains(codes, *oDataError.GetError().GetCode())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrData is a helper function that extracts ODataError metadata from
|
||||||
|
// the error. If the error is not an ODataError type, returns an empty
|
||||||
|
// slice. The returned value is guaranteed to be an even-length pairing
|
||||||
|
// of key, value tuples.
|
||||||
|
func ErrData(e error) []any {
|
||||||
|
result := make([]any, 0)
|
||||||
|
|
||||||
|
if e == nil {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
odErr, ok := e.(odataerrors.ODataErrorable)
|
||||||
|
if !ok {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get MainError
|
||||||
|
mainErr := odErr.GetError()
|
||||||
|
|
||||||
|
result = appendIf(result, "odataerror_code", mainErr.GetCode())
|
||||||
|
result = appendIf(result, "odataerror_message", mainErr.GetMessage())
|
||||||
|
result = appendIf(result, "odataerror_target", mainErr.GetTarget())
|
||||||
|
|
||||||
|
for i, d := range mainErr.GetDetails() {
|
||||||
|
pfx := fmt.Sprintf("odataerror_details_%d_", i)
|
||||||
|
result = appendIf(result, pfx+"code", d.GetCode())
|
||||||
|
result = appendIf(result, pfx+"message", d.GetMessage())
|
||||||
|
result = appendIf(result, pfx+"target", d.GetTarget())
|
||||||
|
}
|
||||||
|
|
||||||
|
inner := mainErr.GetInnererror()
|
||||||
|
if inner != nil {
|
||||||
|
result = appendIf(result, "odataerror_inner_cli_req_id", inner.GetClientRequestId())
|
||||||
|
result = appendIf(result, "odataerror_inner_req_id", inner.GetRequestId())
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendIf(a []any, k string, v *string) []any {
|
||||||
|
if v == nil {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(a, k, *v)
|
||||||
|
}
|
||||||
|
|||||||
@ -14,8 +14,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ data.Collection = &MetadataCollection{}
|
_ data.BackupCollection = &MetadataCollection{}
|
||||||
_ data.Stream = &MetadataItem{}
|
_ data.Stream = &MetadataItem{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// MetadataCollection in a simple collection that assumes all items to be
|
// MetadataCollection in a simple collection that assumes all items to be
|
||||||
@ -67,7 +67,7 @@ func MakeMetadataCollection(
|
|||||||
cat path.CategoryType,
|
cat path.CategoryType,
|
||||||
metadata []MetadataCollectionEntry,
|
metadata []MetadataCollectionEntry,
|
||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
) (data.Collection, error) {
|
) (data.BackupCollection, error) {
|
||||||
if len(metadata) == 0 {
|
if len(metadata) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,7 +8,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
|
||||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
ka "github.com/microsoft/kiota-authentication-azure-go"
|
ka "github.com/microsoft/kiota-authentication-azure-go"
|
||||||
khttp "github.com/microsoft/kiota-http-go"
|
khttp "github.com/microsoft/kiota-http-go"
|
||||||
@ -16,6 +15,7 @@ import (
|
|||||||
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
|||||||
@ -4,11 +4,13 @@ package connector
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime/trace"
|
"runtime/trace"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
@ -17,18 +19,13 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery"
|
"github.com/alcionai/corso/src/internal/connector/discovery"
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
|
||||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
|
||||||
"github.com/alcionai/corso/src/pkg/filters"
|
"github.com/alcionai/corso/src/pkg/filters"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -71,10 +68,11 @@ func NewGraphConnector(
|
|||||||
itemClient *http.Client,
|
itemClient *http.Client,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
r resource,
|
r resource,
|
||||||
|
errs *fault.Errors,
|
||||||
) (*GraphConnector, error) {
|
) (*GraphConnector, error) {
|
||||||
m365, err := acct.M365Config()
|
m365, err := acct.M365Config()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "retrieving m365 account configuration")
|
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
gc := GraphConnector{
|
gc := GraphConnector{
|
||||||
@ -87,12 +85,12 @@ func NewGraphConnector(
|
|||||||
|
|
||||||
gc.Service, err = gc.createService()
|
gc.Service, err = gc.createService()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "creating service connection")
|
return nil, clues.Wrap(err, "creating service connection").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
gc.Owners, err = api.NewClient(m365)
|
gc.Owners, err = api.NewClient(m365)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "creating api client")
|
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ashmrtn): When selectors only encapsulate a single resource owner that
|
// TODO(ashmrtn): When selectors only encapsulate a single resource owner that
|
||||||
@ -106,7 +104,7 @@ func NewGraphConnector(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if r == AllResources || r == Sites {
|
if r == AllResources || r == Sites {
|
||||||
if err = gc.setTenantSites(ctx); err != nil {
|
if err = gc.setTenantSites(ctx, errs); err != nil {
|
||||||
return nil, errors.Wrap(err, "retrieveing tenant site list")
|
return nil, errors.Wrap(err, "retrieveing tenant site list")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -162,7 +160,7 @@ func (gc *GraphConnector) GetUsersIds() []string {
|
|||||||
// setTenantSites queries the M365 to identify the sites in the
|
// setTenantSites queries the M365 to identify the sites in the
|
||||||
// workspace. The sites field is updated during this method
|
// workspace. The sites field is updated during this method
|
||||||
// iff the returned error is nil.
|
// iff the returned error is nil.
|
||||||
func (gc *GraphConnector) setTenantSites(ctx context.Context) error {
|
func (gc *GraphConnector) setTenantSites(ctx context.Context, errs *fault.Errors) error {
|
||||||
gc.Sites = map[string]string{}
|
gc.Sites = map[string]string{}
|
||||||
|
|
||||||
ctx, end := D.Span(ctx, "gc:setTenantSites")
|
ctx, end := D.Span(ctx, "gc:setTenantSites")
|
||||||
@ -175,7 +173,7 @@ func (gc *GraphConnector) setTenantSites(ctx context.Context) error {
|
|||||||
sharepoint.GetAllSitesForTenant,
|
sharepoint.GetAllSitesForTenant,
|
||||||
models.CreateSiteCollectionResponseFromDiscriminatorValue,
|
models.CreateSiteCollectionResponseFromDiscriminatorValue,
|
||||||
identifySite,
|
identifySite,
|
||||||
)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -194,22 +192,23 @@ const personalSitePath = "sharepoint.com/personal/"
|
|||||||
func identifySite(item any) (string, string, error) {
|
func identifySite(item any) (string, string, error) {
|
||||||
m, ok := item.(models.Siteable)
|
m, ok := item.(models.Siteable)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", "", errors.New("iteration retrieved non-Site item")
|
return "", "", clues.New("iteration retrieved non-Site item").With("item_type", fmt.Sprintf("%T", item))
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.GetName() == nil {
|
if m.GetName() == nil {
|
||||||
// the built-in site at "https://{tenant-domain}/search" never has a name.
|
// the built-in site at "https://{tenant-domain}/search" never has a name.
|
||||||
if m.GetWebUrl() != nil && strings.HasSuffix(*m.GetWebUrl(), "/search") {
|
if m.GetWebUrl() != nil && strings.HasSuffix(*m.GetWebUrl(), "/search") {
|
||||||
return "", "", errKnownSkippableCase
|
// TODO: pii siteID, on this and all following cases
|
||||||
|
return "", "", clues.Stack(errKnownSkippableCase).With("site_id", *m.GetId())
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", "", errors.Errorf("no name for Site: %s", *m.GetId())
|
return "", "", clues.New("site has no name").With("site_id", *m.GetId())
|
||||||
}
|
}
|
||||||
|
|
||||||
// personal (ie: oneDrive) sites have to be filtered out server-side.
|
// personal (ie: oneDrive) sites have to be filtered out server-side.
|
||||||
url := m.GetWebUrl()
|
url := m.GetWebUrl()
|
||||||
if url != nil && strings.Contains(*url, personalSitePath) {
|
if url != nil && strings.Contains(*url, personalSitePath) {
|
||||||
return "", "", errKnownSkippableCase
|
return "", "", clues.Stack(errKnownSkippableCase).With("site_id", *m.GetId())
|
||||||
}
|
}
|
||||||
|
|
||||||
return *m.GetWebUrl(), *m.GetId(), nil
|
return *m.GetWebUrl(), *m.GetId(), nil
|
||||||
@ -230,9 +229,13 @@ func (gc *GraphConnector) GetSiteIDs() []string {
|
|||||||
// each element in the url must fully match. Ex: the webURL value "foo" will match "www.ex.com/foo",
|
// each element in the url must fully match. Ex: the webURL value "foo" will match "www.ex.com/foo",
|
||||||
// but not match "www.ex.com/foobar".
|
// but not match "www.ex.com/foobar".
|
||||||
// The returned IDs are reduced to a set of unique values.
|
// The returned IDs are reduced to a set of unique values.
|
||||||
func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls []string) ([]string, error) {
|
func (gc *GraphConnector) UnionSiteIDsAndWebURLs(
|
||||||
|
ctx context.Context,
|
||||||
|
ids, urls []string,
|
||||||
|
errs *fault.Errors,
|
||||||
|
) ([]string, error) {
|
||||||
if len(gc.Sites) == 0 {
|
if len(gc.Sites) == 0 {
|
||||||
if err := gc.setTenantSites(ctx); err != nil {
|
if err := gc.setTenantSites(ctx, errs); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -261,49 +264,6 @@ func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls
|
|||||||
return idsl, nil
|
return idsl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreDataCollections restores data from the specified collections
|
|
||||||
// into M365 using the GraphAPI.
|
|
||||||
// SideEffect: gc.status is updated at the completion of operation
|
|
||||||
func (gc *GraphConnector) RestoreDataCollections(
|
|
||||||
ctx context.Context,
|
|
||||||
backupVersion int,
|
|
||||||
acct account.Account,
|
|
||||||
selector selectors.Selector,
|
|
||||||
dest control.RestoreDestination,
|
|
||||||
opts control.Options,
|
|
||||||
dcs []data.Collection,
|
|
||||||
) (*details.Details, error) {
|
|
||||||
ctx, end := D.Span(ctx, "connector:restore")
|
|
||||||
defer end()
|
|
||||||
|
|
||||||
var (
|
|
||||||
status *support.ConnectorOperationStatus
|
|
||||||
err error
|
|
||||||
deets = &details.Builder{}
|
|
||||||
)
|
|
||||||
|
|
||||||
creds, err := acct.M365Config()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "malformed azure credentials")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch selector.Service {
|
|
||||||
case selectors.ServiceExchange:
|
|
||||||
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets)
|
|
||||||
case selectors.ServiceOneDrive:
|
|
||||||
status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets)
|
|
||||||
case selectors.ServiceSharePoint:
|
|
||||||
status, err = sharepoint.RestoreCollections(ctx, backupVersion, gc.Service, dest, dcs, deets)
|
|
||||||
default:
|
|
||||||
err = errors.Errorf("restore data from service %s not supported", selector.Service.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
gc.incrementAwaitingMessages()
|
|
||||||
gc.UpdateStatus(status)
|
|
||||||
|
|
||||||
return deets.Details(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// AwaitStatus waits for all gc tasks to complete and then returns status
|
// AwaitStatus waits for all gc tasks to complete and then returns status
|
||||||
func (gc *GraphConnector) AwaitStatus() *support.ConnectorOperationStatus {
|
func (gc *GraphConnector) AwaitStatus() *support.ConnectorOperationStatus {
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -354,35 +314,35 @@ func getResources(
|
|||||||
query func(context.Context, graph.Servicer) (serialization.Parsable, error),
|
query func(context.Context, graph.Servicer) (serialization.Parsable, error),
|
||||||
parser func(parseNode serialization.ParseNode) (serialization.Parsable, error),
|
parser func(parseNode serialization.ParseNode) (serialization.Parsable, error),
|
||||||
identify func(any) (string, string, error),
|
identify func(any) (string, string, error),
|
||||||
|
errs *fault.Errors,
|
||||||
) (map[string]string, error) {
|
) (map[string]string, error) {
|
||||||
resources := map[string]string{}
|
resources := map[string]string{}
|
||||||
|
|
||||||
response, err := query(ctx, gs)
|
response, err := query(ctx, gs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(
|
return nil, clues.Wrap(err, "retrieving tenant's resources").
|
||||||
err,
|
WithClues(ctx).
|
||||||
"retrieving resources for tenant %s: %s",
|
WithAll(graph.ErrData(err)...)
|
||||||
tenantID,
|
|
||||||
support.ConnectorStackErrorTrace(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iter, err := msgraphgocore.NewPageIterator(response, gs.Adapter(), parser)
|
iter, err := msgraphgocore.NewPageIterator(response, gs.Adapter(), parser)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, support.ConnectorStackErrorTrace(err))
|
return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
var iterErrs error
|
|
||||||
|
|
||||||
callbackFunc := func(item any) bool {
|
callbackFunc := func(item any) bool {
|
||||||
|
if errs.Failed() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
k, v, err := identify(item)
|
k, v, err := identify(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, errKnownSkippableCase) {
|
if !errors.Is(err, errKnownSkippableCase) {
|
||||||
return true
|
errs.Add(clues.Stack(err).
|
||||||
|
WithClues(ctx).
|
||||||
|
With("query_url", gs.Adapter().GetBaseUrl()))
|
||||||
}
|
}
|
||||||
|
|
||||||
iterErrs = support.WrapAndAppend(gs.Adapter().GetBaseUrl(), err, iterErrs)
|
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,20 +352,8 @@ func getResources(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := iter.Iterate(ctx, callbackFunc); err != nil {
|
if err := iter.Iterate(ctx, callbackFunc); err != nil {
|
||||||
return nil, errors.Wrap(err, support.ConnectorStackErrorTrace(err))
|
return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resources, iterErrs
|
return resources, errs.Err()
|
||||||
}
|
|
||||||
|
|
||||||
// IsRecoverableError returns true iff error is a RecoverableGCEerror
|
|
||||||
func IsRecoverableError(e error) bool {
|
|
||||||
var recoverable support.RecoverableGCError
|
|
||||||
return errors.As(e, &recoverable)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNonRecoverableError returns true iff error is a NonRecoverableGCEerror
|
|
||||||
func IsNonRecoverableError(e error) bool {
|
|
||||||
var nonRecoverable support.NonRecoverableGCError
|
|
||||||
return errors.As(e, &nonRecoverable)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/credentials"
|
"github.com/alcionai/corso/src/pkg/credentials"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,9 +67,9 @@ func (suite *DisconnectedGraphConnectorSuite) TestBadConnection() {
|
|||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
gc, err := NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), test.acct(t), Users)
|
gc, err := NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), test.acct(t), Users, fault.New(true))
|
||||||
assert.Nil(t, gc, test.name+" failed")
|
assert.Nil(t, gc, test.name+" failed")
|
||||||
assert.NotNil(t, err, test.name+"failed")
|
assert.NotNil(t, err, test.name+" failed")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -116,58 +117,6 @@ func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_Status() {
|
|||||||
suite.Equal(2, gc.Status().FolderCount)
|
suite.Equal(2, gc.Status().FolderCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_ErrorChecking() {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
err error
|
|
||||||
returnRecoverable assert.BoolAssertionFunc
|
|
||||||
returnNonRecoverable assert.BoolAssertionFunc
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Neither Option",
|
|
||||||
err: errors.New("regular error"),
|
|
||||||
returnRecoverable: assert.False,
|
|
||||||
returnNonRecoverable: assert.False,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Validate Recoverable",
|
|
||||||
err: support.SetRecoverableError(errors.New("Recoverable")),
|
|
||||||
returnRecoverable: assert.True,
|
|
||||||
returnNonRecoverable: assert.False,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Validate NonRecoverable",
|
|
||||||
err: support.SetNonRecoverableError(errors.New("Non-recoverable")),
|
|
||||||
returnRecoverable: assert.False,
|
|
||||||
returnNonRecoverable: assert.True,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Wrapped Recoverable",
|
|
||||||
err: support.WrapAndAppend(
|
|
||||||
"Wrapped Recoverable",
|
|
||||||
support.SetRecoverableError(errors.New("Recoverable")),
|
|
||||||
nil),
|
|
||||||
returnRecoverable: assert.True,
|
|
||||||
returnNonRecoverable: assert.False,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "On Nil",
|
|
||||||
err: nil,
|
|
||||||
returnRecoverable: assert.False,
|
|
||||||
returnNonRecoverable: assert.False,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range tests {
|
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
|
||||||
recoverable := IsRecoverableError(test.err)
|
|
||||||
nonRecoverable := IsNonRecoverableError(test.err)
|
|
||||||
test.returnRecoverable(suite.T(), recoverable, "Test: %s Recoverable-received %v", test.name, recoverable)
|
|
||||||
test.returnNonRecoverable(suite.T(), nonRecoverable, "Test: %s non-recoverable: %v", test.name, nonRecoverable)
|
|
||||||
t.Logf("Is nil: %v", test.err == nil)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs() {
|
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs() {
|
||||||
users := []string{
|
users := []string{
|
||||||
"elliotReid@someHospital.org",
|
"elliotReid@someHospital.org",
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package connector
|
package connector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
@ -14,6 +15,7 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||||
@ -21,6 +23,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
@ -163,6 +166,10 @@ type colInfo struct {
|
|||||||
pathElements []string
|
pathElements []string
|
||||||
category path.CategoryType
|
category path.CategoryType
|
||||||
items []itemInfo
|
items []itemInfo
|
||||||
|
// auxItems are items that can be retrieved with Fetch but won't be returned
|
||||||
|
// by Items(). These files do not directly participate in comparisosn at the
|
||||||
|
// end of a test.
|
||||||
|
auxItems []itemInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
type restoreBackupInfo struct {
|
type restoreBackupInfo struct {
|
||||||
@ -652,6 +659,35 @@ func compareExchangeEvent(
|
|||||||
checkEvent(t, expectedEvent, itemEvent)
|
checkEvent(t, expectedEvent, itemEvent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func permissionEqual(expected onedrive.UserPermission, got onedrive.UserPermission) bool {
|
||||||
|
if !strings.EqualFold(expected.Email, got.Email) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (expected.Expiration == nil && got.Expiration != nil) ||
|
||||||
|
(expected.Expiration != nil && got.Expiration == nil) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected.Expiration != nil &&
|
||||||
|
got.Expiration != nil &&
|
||||||
|
!expected.Expiration.Equal(*got.Expiration) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(expected.Roles) != len(got.Roles) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range got.Roles {
|
||||||
|
if !slices.Contains(expected.Roles, r) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func compareOneDriveItem(
|
func compareOneDriveItem(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
expected map[string][]byte,
|
expected map[string][]byte,
|
||||||
@ -695,13 +731,7 @@ func compareOneDriveItem(
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, len(expectedMeta.Permissions), len(itemMeta.Permissions), "number of permissions after restore")
|
assert.Equal(t, len(expectedMeta.Permissions), len(itemMeta.Permissions), "number of permissions after restore")
|
||||||
|
testElementsMatch(t, expectedMeta.Permissions, itemMeta.Permissions, permissionEqual)
|
||||||
// FIXME(meain): The permissions before and after might not be in the same order.
|
|
||||||
for i, p := range expectedMeta.Permissions {
|
|
||||||
assert.Equal(t, p.Email, itemMeta.Permissions[i].Email)
|
|
||||||
assert.Equal(t, p.Roles, itemMeta.Permissions[i].Roles)
|
|
||||||
assert.Equal(t, p.Expiration, itemMeta.Permissions[i].Expiration)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func compareItem(
|
func compareItem(
|
||||||
@ -740,7 +770,7 @@ func compareItem(
|
|||||||
func checkHasCollections(
|
func checkHasCollections(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
expected map[string]map[string][]byte,
|
expected map[string]map[string][]byte,
|
||||||
got []data.Collection,
|
got []data.BackupCollection,
|
||||||
) {
|
) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
@ -762,10 +792,10 @@ func checkCollections(
|
|||||||
t *testing.T,
|
t *testing.T,
|
||||||
expectedItems int,
|
expectedItems int,
|
||||||
expected map[string]map[string][]byte,
|
expected map[string]map[string][]byte,
|
||||||
got []data.Collection,
|
got []data.BackupCollection,
|
||||||
restorePermissions bool,
|
restorePermissions bool,
|
||||||
) int {
|
) int {
|
||||||
collectionsWithItems := []data.Collection{}
|
collectionsWithItems := []data.BackupCollection{}
|
||||||
|
|
||||||
skipped := 0
|
skipped := 0
|
||||||
gotItems := 0
|
gotItems := 0
|
||||||
@ -944,14 +974,33 @@ func backupOutputPathFromRestore(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(ashmrtn): Make this an actual mock class that can be used in other
|
||||||
|
// packages.
|
||||||
|
type mockRestoreCollection struct {
|
||||||
|
data.Collection
|
||||||
|
auxItems map[string]data.Stream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc mockRestoreCollection) Fetch(
|
||||||
|
ctx context.Context,
|
||||||
|
name string,
|
||||||
|
) (data.Stream, error) {
|
||||||
|
res := rc.auxItems[name]
|
||||||
|
if res == nil {
|
||||||
|
return nil, data.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
func collectionsForInfo(
|
func collectionsForInfo(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
tenant, user string,
|
tenant, user string,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
allInfo []colInfo,
|
allInfo []colInfo,
|
||||||
) (int, int, []data.Collection, map[string]map[string][]byte) {
|
) (int, int, []data.RestoreCollection, map[string]map[string][]byte) {
|
||||||
collections := make([]data.Collection, 0, len(allInfo))
|
collections := make([]data.RestoreCollection, 0, len(allInfo))
|
||||||
expectedData := make(map[string]map[string][]byte, len(allInfo))
|
expectedData := make(map[string]map[string][]byte, len(allInfo))
|
||||||
totalItems := 0
|
totalItems := 0
|
||||||
kopiaEntries := 0
|
kopiaEntries := 0
|
||||||
@ -966,7 +1015,7 @@ func collectionsForInfo(
|
|||||||
info.pathElements,
|
info.pathElements,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
c := mockconnector.NewMockExchangeCollection(pth, len(info.items))
|
mc := mockconnector.NewMockExchangeCollection(pth, len(info.items))
|
||||||
baseDestPath := backupOutputPathFromRestore(t, dest, pth)
|
baseDestPath := backupOutputPathFromRestore(t, dest, pth)
|
||||||
|
|
||||||
baseExpected := expectedData[baseDestPath.String()]
|
baseExpected := expectedData[baseDestPath.String()]
|
||||||
@ -976,8 +1025,8 @@ func collectionsForInfo(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(info.items); i++ {
|
for i := 0; i < len(info.items); i++ {
|
||||||
c.Names[i] = info.items[i].name
|
mc.Names[i] = info.items[i].name
|
||||||
c.Data[i] = info.items[i].data
|
mc.Data[i] = info.items[i].data
|
||||||
|
|
||||||
baseExpected[info.items[i].lookupKey] = info.items[i].data
|
baseExpected[info.items[i].lookupKey] = info.items[i].data
|
||||||
|
|
||||||
@ -989,6 +1038,15 @@ func collectionsForInfo(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c := mockRestoreCollection{Collection: mc, auxItems: map[string]data.Stream{}}
|
||||||
|
|
||||||
|
for _, aux := range info.auxItems {
|
||||||
|
c.auxItems[aux.name] = &mockconnector.MockExchangeData{
|
||||||
|
ID: aux.name,
|
||||||
|
Reader: io.NopCloser(bytes.NewReader(aux.data)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
collections = append(collections, c)
|
collections = append(collections, c)
|
||||||
kopiaEntries += len(info.items)
|
kopiaEntries += len(info.items)
|
||||||
}
|
}
|
||||||
@ -1002,8 +1060,8 @@ func collectionsForInfoVersion0(
|
|||||||
tenant, user string,
|
tenant, user string,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
allInfo []colInfo,
|
allInfo []colInfo,
|
||||||
) (int, int, []data.Collection, map[string]map[string][]byte) {
|
) (int, int, []data.RestoreCollection, map[string]map[string][]byte) {
|
||||||
collections := make([]data.Collection, 0, len(allInfo))
|
collections := make([]data.RestoreCollection, 0, len(allInfo))
|
||||||
expectedData := make(map[string]map[string][]byte, len(allInfo))
|
expectedData := make(map[string]map[string][]byte, len(allInfo))
|
||||||
totalItems := 0
|
totalItems := 0
|
||||||
kopiaEntries := 0
|
kopiaEntries := 0
|
||||||
@ -1034,7 +1092,9 @@ func collectionsForInfoVersion0(
|
|||||||
baseExpected[info.items[i].lookupKey] = info.items[i].data
|
baseExpected[info.items[i].lookupKey] = info.items[i].data
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, c)
|
collections = append(collections, data.NotFoundRestoreCollection{
|
||||||
|
Collection: c,
|
||||||
|
})
|
||||||
totalItems += len(info.items)
|
totalItems += len(info.items)
|
||||||
kopiaEntries += len(info.items)
|
kopiaEntries += len(info.items)
|
||||||
}
|
}
|
||||||
@ -1079,7 +1139,8 @@ func getSelectorWith(
|
|||||||
|
|
||||||
func loadConnector(ctx context.Context, t *testing.T, itemClient *http.Client, r resource) *GraphConnector {
|
func loadConnector(ctx context.Context, t *testing.T, itemClient *http.Client, r resource) *GraphConnector {
|
||||||
a := tester.NewM365Account(t)
|
a := tester.NewM365Account(t)
|
||||||
connector, err := NewGraphConnector(ctx, itemClient, a, r)
|
|
||||||
|
connector, err := NewGraphConnector(ctx, itemClient, a, r, fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return connector
|
return connector
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
@ -125,9 +126,15 @@ func (suite *GraphConnectorUnitSuite) TestUnionSiteIDsAndWebURLs() {
|
|||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
//nolint
|
ctx, flush := tester.NewContext()
|
||||||
result, err := gc.UnionSiteIDsAndWebURLs(context.Background(), test.ids, test.urls)
|
defer flush()
|
||||||
|
|
||||||
|
errs := fault.New(true)
|
||||||
|
|
||||||
|
result, err := gc.UnionSiteIDsAndWebURLs(ctx, test.ids, test.urls, errs)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, errs.Err())
|
||||||
|
assert.Empty(t, errs.Errs())
|
||||||
assert.ElementsMatch(t, test.expect, result)
|
assert.ElementsMatch(t, test.expect, result)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -204,18 +211,24 @@ func (suite *GraphConnectorIntegrationSuite) TestSetTenantSites() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
service, err := newConnector.createService()
|
service, err := newConnector.createService()
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
newConnector.Service = service
|
newConnector.Service = service
|
||||||
|
assert.Equal(t, 0, len(newConnector.Sites))
|
||||||
|
|
||||||
suite.Equal(0, len(newConnector.Sites))
|
errs := fault.New(true)
|
||||||
err = newConnector.setTenantSites(ctx)
|
|
||||||
suite.NoError(err)
|
err = newConnector.setTenantSites(ctx, errs)
|
||||||
suite.Less(0, len(newConnector.Sites))
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, errs.Err())
|
||||||
|
assert.Empty(t, errs.Errs())
|
||||||
|
assert.Less(t, 0, len(newConnector.Sites))
|
||||||
|
|
||||||
for _, site := range newConnector.Sites {
|
for _, site := range newConnector.Sites {
|
||||||
suite.NotContains("sharepoint.com/personal/", site)
|
assert.NotContains(t, "sharepoint.com/personal/", site)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,7 +270,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
|||||||
dest := tester.DefaultTestRestoreDestination()
|
dest := tester.DefaultTestRestoreDestination()
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
col []data.Collection
|
col []data.RestoreCollection
|
||||||
sel selectors.Selector
|
sel selectors.Selector
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -269,7 +282,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ExchangeEmpty",
|
name: "ExchangeEmpty",
|
||||||
col: []data.Collection{},
|
col: []data.RestoreCollection{},
|
||||||
sel: selectors.Selector{
|
sel: selectors.Selector{
|
||||||
Service: selectors.ServiceExchange,
|
Service: selectors.ServiceExchange,
|
||||||
},
|
},
|
||||||
@ -283,7 +296,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneDriveEmpty",
|
name: "OneDriveEmpty",
|
||||||
col: []data.Collection{},
|
col: []data.RestoreCollection{},
|
||||||
sel: selectors.Selector{
|
sel: selectors.Selector{
|
||||||
Service: selectors.ServiceOneDrive,
|
Service: selectors.ServiceOneDrive,
|
||||||
},
|
},
|
||||||
@ -297,7 +310,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "SharePointEmpty",
|
name: "SharePointEmpty",
|
||||||
col: []data.Collection{},
|
col: []data.RestoreCollection{},
|
||||||
sel: selectors.Selector{
|
sel: selectors.Selector{
|
||||||
Service: selectors.ServiceSharePoint,
|
Service: selectors.ServiceSharePoint,
|
||||||
},
|
},
|
||||||
@ -370,7 +383,7 @@ func runRestoreBackupTest(
|
|||||||
opts control.Options,
|
opts control.Options,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
collections []data.Collection
|
collections []data.RestoreCollection
|
||||||
expectedData = map[string]map[string][]byte{}
|
expectedData = map[string]map[string][]byte{}
|
||||||
totalItems = 0
|
totalItems = 0
|
||||||
totalKopiaItems = 0
|
totalKopiaItems = 0
|
||||||
@ -495,7 +508,7 @@ func runRestoreBackupTestVersion0(
|
|||||||
opts control.Options,
|
opts control.Options,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
collections []data.Collection
|
collections []data.RestoreCollection
|
||||||
expectedData = map[string]map[string][]byte{}
|
expectedData = map[string]map[string][]byte{}
|
||||||
totalItems = 0
|
totalItems = 0
|
||||||
totalKopiaItems = 0
|
totalKopiaItems = 0
|
||||||
@ -885,6 +898,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
|||||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -911,6 +931,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
|||||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -938,6 +965,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
|||||||
lookupKey: "folder-a" + onedrive.DirMetaFileSuffix,
|
lookupKey: "folder-a" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -961,6 +995,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -982,6 +1023,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1014,6 +1062,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
|||||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -1035,6 +1090,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1190,6 +1252,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
|||||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -1216,6 +1285,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
|||||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -1243,6 +1319,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
|||||||
lookupKey: "folder-a" + onedrive.DirMetaFileSuffix,
|
lookupKey: "folder-a" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -1266,6 +1349,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -1287,6 +1377,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1508,6 +1605,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1541,6 +1645,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
|||||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -1562,6 +1673,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1595,6 +1713,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
|||||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pathElements: []string{
|
pathElements: []string{
|
||||||
@ -1616,6 +1741,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1660,6 +1792,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1704,6 +1843,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: []byte("{}"),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1762,6 +1908,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsBackupAndNoRestore()
|
|||||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
auxItems: []itemInfo{
|
||||||
|
{
|
||||||
|
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||||
|
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -27,10 +27,10 @@ type MockExchangeDataCollection struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ data.Collection = &MockExchangeDataCollection{}
|
_ data.BackupCollection = &MockExchangeDataCollection{}
|
||||||
_ data.Stream = &MockExchangeData{}
|
_ data.Stream = &MockExchangeData{}
|
||||||
_ data.StreamInfo = &MockExchangeData{}
|
_ data.StreamInfo = &MockExchangeData{}
|
||||||
_ data.StreamSize = &MockExchangeData{}
|
_ data.StreamSize = &MockExchangeData{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewMockExchangeDataCollection creates an data collection that will return the specified number of
|
// NewMockExchangeDataCollection creates an data collection that will return the specified number of
|
||||||
|
|||||||
@ -14,8 +14,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ data.Stream = &MockListData{}
|
_ data.Stream = &MockListData{}
|
||||||
_ data.Collection = &MockListCollection{}
|
_ data.BackupCollection = &MockListCollection{}
|
||||||
)
|
)
|
||||||
|
|
||||||
type MockListCollection struct {
|
type MockListCollection struct {
|
||||||
|
|||||||
@ -3,6 +3,13 @@ package mockconnector
|
|||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
|
js "github.com/microsoft/kiota-serialization-json-go"
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
)
|
)
|
||||||
@ -360,6 +367,143 @@ func GetMockMessageWithItemAttachmentEvent(subject string) []byte {
|
|||||||
return []byte(message)
|
return []byte(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetMockMessageWithItemAttachmentMail(subject string) []byte {
|
||||||
|
//nolint:lll
|
||||||
|
// Order of fields:
|
||||||
|
// 1. subject
|
||||||
|
// 2. alias
|
||||||
|
// 3. sender address
|
||||||
|
// 4. from address
|
||||||
|
// 5. toRecipients email address
|
||||||
|
template := `{
|
||||||
|
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages(attachments())/$entity",
|
||||||
|
"@odata.etag": "W/\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADKTqr3\"",
|
||||||
|
"id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADKo35SAAA=",
|
||||||
|
"createdDateTime": "2023-02-06T20:03:40Z",
|
||||||
|
"lastModifiedDateTime": "2023-02-06T20:03:42Z",
|
||||||
|
"changeKey": "CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADKTqr3",
|
||||||
|
"categories": [],
|
||||||
|
"receivedDateTime": "2023-02-06T20:03:40Z",
|
||||||
|
"sentDateTime": "2023-02-06T20:03:37Z",
|
||||||
|
"hasAttachments": true,
|
||||||
|
"internetMessageId": "<SJ0PR17MB5622C17321AE356F5202A857C3DA9@SJ0PR17MB5622.namprd17.prod.outlook.com>",
|
||||||
|
"subject": "%[1]s",
|
||||||
|
"bodyPreview": "Nested Items are not encapsulated in a trivial manner. Review the findings.\r\n\r\nBest,\r\n\r\nYour Test Case",
|
||||||
|
"importance": "normal",
|
||||||
|
"parentFolderId": "AQMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4ADVkZWQwNmNlMTgALgAAAw_9XBStqZdPuOVIalVTz7sBAHzBhzS2FNNNiXdGkRghSr4AAAIBDAAAAA==",
|
||||||
|
"conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAPe8pEQOrBxLvFNhfDtMyEI=",
|
||||||
|
"conversationIndex": "AQHZOmYA97ykRA6sHEu8U2F8O0zIQg==",
|
||||||
|
"isDeliveryReceiptRequested": false,
|
||||||
|
"isReadReceiptRequested": false,
|
||||||
|
"isRead": false,
|
||||||
|
"isDraft": false,
|
||||||
|
"webLink": "https://outlook.office365.com/owal=ReadMessageItem",
|
||||||
|
"inferenceClassification": "focused",
|
||||||
|
"body": {
|
||||||
|
"contentType": "html",
|
||||||
|
"content": "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><style type=\"text/css\" style=\"display:none\">\r\n<!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n-->\r\n</style></head><body dir=\"ltr\"><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Nested Items are not encapsulated in a trivial manner. Review the findings.</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Best, </div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Your Test Case</div></body></html>"
|
||||||
|
},
|
||||||
|
"sender": {
|
||||||
|
"emailAddress": {
|
||||||
|
"name": "%[2]s",
|
||||||
|
"address": "%[3]s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"from": {
|
||||||
|
"emailAddress": {
|
||||||
|
"name": "%[2]s",
|
||||||
|
"address": "%[4]s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"toRecipients": [
|
||||||
|
{
|
||||||
|
"emailAddress": {
|
||||||
|
"name": "%[2]s",
|
||||||
|
"address": "%[5]s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ccRecipients": [],
|
||||||
|
"bccRecipients": [],
|
||||||
|
"replyTo": [],
|
||||||
|
"flag": {
|
||||||
|
"flagStatus": "notFlagged"
|
||||||
|
},
|
||||||
|
"attachments": [
|
||||||
|
{
|
||||||
|
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#/attachments(microsoft.graph.itemAttachment/item())/$entity",
|
||||||
|
"@odata.type": "#microsoft.graph.itemAttachment",
|
||||||
|
"id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADKo35SAAABEgAQABv3spWM8g5IriSvYJe5kO8=",
|
||||||
|
"lastModifiedDateTime": "2023-02-06T20:03:40Z",
|
||||||
|
"name": "Not Something Small. 28-Jul-2022_20:53:33 Different",
|
||||||
|
"contentType": null,
|
||||||
|
"size": 10959,
|
||||||
|
"isInline": false,
|
||||||
|
"item@odata.associationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')/$ref",
|
||||||
|
"item@odata.navigationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')",
|
||||||
|
"item": {
|
||||||
|
"@odata.type": "#microsoft.graph.message",
|
||||||
|
"id": "",
|
||||||
|
"createdDateTime": "2023-02-06T20:03:40Z",
|
||||||
|
"lastModifiedDateTime": "2023-02-06T20:03:40Z",
|
||||||
|
"receivedDateTime": "2022-07-28T20:53:33Z",
|
||||||
|
"sentDateTime": "2022-07-28T20:53:33Z",
|
||||||
|
"hasAttachments": false,
|
||||||
|
"internetMessageId": "<MWHPR1401MB1952C46D4A46B6398F562B0FA6E99@MWHPR1401MB1952.namprd14.prod.outlook.com>",
|
||||||
|
"subject": "Not Something Small. 28-Jul-2022_20:53:33 Different",
|
||||||
|
"bodyPreview": "I've been going through with the changing of messages. It shouldn't have the same calls, right? Call Me?\r\n\r\nWe want to be able to send multiple messages and we want to be able to respond and do other things that make sense for our users. In this case. Let",
|
||||||
|
"importance": "normal",
|
||||||
|
"conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAOlAM0OrVQlHkhUZeZMPxgg=",
|
||||||
|
"conversationIndex": "AQHYosQZ6UAzQ6tVCUeSFRl5kw/GCA==",
|
||||||
|
"isDeliveryReceiptRequested": false,
|
||||||
|
"isReadReceiptRequested": false,
|
||||||
|
"isRead": true,
|
||||||
|
"isDraft": false,
|
||||||
|
"webLink": "https://outlook.office365.com/owa/?AttachmentItemID=Aviewmodel=ItemAttachment",
|
||||||
|
"body": {
|
||||||
|
"contentType": "html",
|
||||||
|
"content": "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><meta name=\"Generator\" content=\"Microsoft Word 15 (filtered medium)\"><style><!--@font-face{font-family:\"Cambria Math\"}@font-face{font-family:Calibri}p.MsoNormal, li.MsoNormal, div.MsoNormal{margin:0in;font-size:11.0pt;font-family:\"Calibri\",sans-serif}span.EmailStyle17{font-family:\"Calibri\",sans-serif;color:windowtext}.MsoChpDefault{font-family:\"Calibri\",sans-serif}@page WordSection1{margin:1.0in 1.0in 1.0in 1.0in}div.WordSection1{}--></style></head><body lang=\"EN-US\" link=\"#0563C1\" vlink=\"#954F72\" style=\"word-wrap:break-word\"><div class=\"WordSection1\"><p class=\"MsoNormal\">I've been going through with the changing of messages. It shouldn't have the same calls, right? Call Me? </p><p class=\"MsoNormal\"> </p><p class=\"MsoNormal\">We want to be able to send multiple messages and we want to be able to respond and do other things that make sense for our users. In this case. Let’s consider a Mailbox</p></div></body></html>"
|
||||||
|
},
|
||||||
|
"sender": {
|
||||||
|
"emailAddress": {
|
||||||
|
"name": "%[2]s",
|
||||||
|
"address": "%[3]s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"from": {
|
||||||
|
"emailAddress": {
|
||||||
|
"name": "%[2]s",
|
||||||
|
"address": "%[4]s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"toRecipients": [
|
||||||
|
{
|
||||||
|
"emailAddress": {
|
||||||
|
"name": "Direct Report",
|
||||||
|
"address": "notAvailable@8qzvrj.onmicrosoft.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"flag": {
|
||||||
|
"flagStatus": "notFlagged"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`
|
||||||
|
|
||||||
|
message := fmt.Sprintf(
|
||||||
|
template,
|
||||||
|
subject,
|
||||||
|
defaultAlias,
|
||||||
|
defaultMessageSender,
|
||||||
|
defaultMessageFrom,
|
||||||
|
defaultMessageTo,
|
||||||
|
)
|
||||||
|
|
||||||
|
return []byte(message)
|
||||||
|
}
|
||||||
|
|
||||||
func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte {
|
func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte {
|
||||||
//nolint:lll
|
//nolint:lll
|
||||||
// Order of fields:
|
// Order of fields:
|
||||||
@ -545,3 +689,73 @@ func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte {
|
|||||||
|
|
||||||
return []byte(message)
|
return []byte(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetMockMessageWithNestedItemAttachmentMail(t *testing.T, nested []byte, subject string) []byte {
|
||||||
|
base := GetMockMessageBytes(subject)
|
||||||
|
message, err := hydrateMessage(base)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
nestedMessage, err := hydrateMessage(nested)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
iaNode := models.NewItemAttachment()
|
||||||
|
attachmentSize := int32(len(nested))
|
||||||
|
iaNode.SetSize(&attachmentSize)
|
||||||
|
|
||||||
|
internalName := "Nested Message"
|
||||||
|
iaNode.SetName(&internalName)
|
||||||
|
iaNode.SetItem(nestedMessage)
|
||||||
|
message.SetAttachments([]models.Attachmentable{iaNode})
|
||||||
|
|
||||||
|
return serialize(t, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetMockMessageWithNestedItemAttachmentContact(t *testing.T, nested []byte, subject string) []byte {
|
||||||
|
base := GetMockMessageBytes(subject)
|
||||||
|
message, err := hydrateMessage(base)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", nested)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
anObject, err := parseNode.GetObjectValue(models.CreateContactFromDiscriminatorValue)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
contact := anObject.(models.Contactable)
|
||||||
|
internalName := "Nested Contact"
|
||||||
|
iaNode := models.NewItemAttachment()
|
||||||
|
attachmentSize := int32(len(nested))
|
||||||
|
iaNode.SetSize(&attachmentSize)
|
||||||
|
iaNode.SetName(&internalName)
|
||||||
|
iaNode.SetItem(contact)
|
||||||
|
message.SetAttachments([]models.Attachmentable{iaNode})
|
||||||
|
|
||||||
|
return serialize(t, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func serialize(t *testing.T, item absser.Parsable) []byte {
|
||||||
|
wtr := js.NewJsonSerializationWriter()
|
||||||
|
err := wtr.WriteObjectValue("", item)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
byteArray, err := wtr.GetSerializedContent()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return byteArray
|
||||||
|
}
|
||||||
|
|
||||||
|
func hydrateMessage(byteArray []byte) (models.Messageable, error) {
|
||||||
|
parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", byteArray)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "deserializing bytes into base m365 object")
|
||||||
|
}
|
||||||
|
|
||||||
|
anObject, err := parseNode.GetObjectValue(models.CreateMessageFromDiscriminatorValue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "parsing m365 object factory")
|
||||||
|
}
|
||||||
|
|
||||||
|
message := anObject.(models.Messageable)
|
||||||
|
|
||||||
|
return message, nil
|
||||||
|
}
|
||||||
|
|||||||
@ -30,6 +30,7 @@ const pageSize = int32(999)
|
|||||||
|
|
||||||
type driveItemPager struct {
|
type driveItemPager struct {
|
||||||
gs graph.Servicer
|
gs graph.Servicer
|
||||||
|
driveID string
|
||||||
builder *msdrives.ItemRootDeltaRequestBuilder
|
builder *msdrives.ItemRootDeltaRequestBuilder
|
||||||
options *msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration
|
options *msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration
|
||||||
}
|
}
|
||||||
@ -49,6 +50,7 @@ func NewItemPager(
|
|||||||
|
|
||||||
res := &driveItemPager{
|
res := &driveItemPager{
|
||||||
gs: gs,
|
gs: gs,
|
||||||
|
driveID: driveID,
|
||||||
options: requestConfig,
|
options: requestConfig,
|
||||||
builder: gs.Client().DrivesById(driveID).Root().Delta(),
|
builder: gs.Client().DrivesById(driveID).Root().Delta(),
|
||||||
}
|
}
|
||||||
@ -78,6 +80,10 @@ func (p *driveItemPager) SetNext(link string) {
|
|||||||
p.builder = msdrives.NewItemRootDeltaRequestBuilder(link, p.gs.Adapter())
|
p.builder = msdrives.NewItemRootDeltaRequestBuilder(link, p.gs.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *driveItemPager) Reset() {
|
||||||
|
p.builder = p.gs.Client().DrivesById(p.driveID).Root().Delta()
|
||||||
|
}
|
||||||
|
|
||||||
func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) {
|
func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||||
return getValues[models.DriveItemable](l)
|
return getValues[models.DriveItemable](l)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -42,10 +42,10 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ data.Collection = &Collection{}
|
_ data.BackupCollection = &Collection{}
|
||||||
_ data.Stream = &Item{}
|
_ data.Stream = &Item{}
|
||||||
_ data.StreamInfo = &Item{}
|
_ data.StreamInfo = &Item{}
|
||||||
_ data.StreamModTime = &Item{}
|
_ data.StreamModTime = &Item{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Collection represents a set of OneDrive objects retrieved from M365
|
// Collection represents a set of OneDrive objects retrieved from M365
|
||||||
@ -97,17 +97,19 @@ func NewCollection(
|
|||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
source driveSource,
|
source driveSource,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
|
doNotMergeItems bool,
|
||||||
) *Collection {
|
) *Collection {
|
||||||
c := &Collection{
|
c := &Collection{
|
||||||
itemClient: itemClient,
|
itemClient: itemClient,
|
||||||
folderPath: folderPath,
|
folderPath: folderPath,
|
||||||
driveItems: map[string]models.DriveItemable{},
|
driveItems: map[string]models.DriveItemable{},
|
||||||
driveID: driveID,
|
driveID: driveID,
|
||||||
source: source,
|
source: source,
|
||||||
service: service,
|
service: service,
|
||||||
data: make(chan data.Stream, collectionChannelBufferSize),
|
data: make(chan data.Stream, collectionChannelBufferSize),
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
ctrl: ctrlOpts,
|
ctrl: ctrlOpts,
|
||||||
|
doNotMergeItems: doNotMergeItems,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allows tests to set a mock populator
|
// Allows tests to set a mock populator
|
||||||
@ -278,35 +280,23 @@ func (oc *Collection) populateItems(ctx context.Context) {
|
|||||||
|
|
||||||
if oc.source == OneDriveSource {
|
if oc.source == OneDriveSource {
|
||||||
// Fetch metadata for the file
|
// Fetch metadata for the file
|
||||||
for i := 1; i <= maxRetries; i++ {
|
if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup {
|
||||||
if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup {
|
// We are still writing the metadata file but with
|
||||||
// We are still writing the metadata file but with
|
// empty permissions as we don't have a way to
|
||||||
// empty permissions as we don't have a way to
|
// signify that the permissions was explicitly
|
||||||
// signify that the permissions was explicitly
|
// not added.
|
||||||
// not added.
|
itemMeta = io.NopCloser(strings.NewReader("{}"))
|
||||||
itemMeta = io.NopCloser(strings.NewReader("{}"))
|
itemMetaSize = 2
|
||||||
itemMetaSize = 2
|
} else {
|
||||||
|
err = graph.RunWithRetry(func() error {
|
||||||
|
itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
break
|
if err != nil {
|
||||||
|
errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions"))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item)
|
|
||||||
|
|
||||||
// retry on Timeout type errors, break otherwise.
|
|
||||||
if err == nil ||
|
|
||||||
!graph.IsErrTimeout(err) ||
|
|
||||||
!graph.IsInternalServerError(err) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < maxRetries {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions"))
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -168,7 +168,8 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
suite,
|
suite,
|
||||||
suite.testStatusUpdater(&wg, &collStatus),
|
suite.testStatusUpdater(&wg, &collStatus),
|
||||||
test.source,
|
test.source,
|
||||||
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}})
|
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}},
|
||||||
|
true)
|
||||||
require.NotNil(t, coll)
|
require.NotNil(t, coll)
|
||||||
assert.Equal(t, folderPath, coll.FullPath())
|
assert.Equal(t, folderPath, coll.FullPath())
|
||||||
|
|
||||||
@ -301,7 +302,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
|||||||
suite,
|
suite,
|
||||||
suite.testStatusUpdater(&wg, &collStatus),
|
suite.testStatusUpdater(&wg, &collStatus),
|
||||||
test.source,
|
test.source,
|
||||||
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}})
|
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}},
|
||||||
|
true)
|
||||||
|
|
||||||
mockItem := models.NewDriveItem()
|
mockItem := models.NewDriveItem()
|
||||||
mockItem.SetId(&testItemID)
|
mockItem.SetId(&testItemID)
|
||||||
@ -372,7 +374,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionDisablePermissionsBackup() {
|
|||||||
suite,
|
suite,
|
||||||
suite.testStatusUpdater(&wg, &collStatus),
|
suite.testStatusUpdater(&wg, &collStatus),
|
||||||
test.source,
|
test.source,
|
||||||
control.Options{ToggleFeatures: control.Toggles{}})
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
|
true)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
mockItem := models.NewDriveItem()
|
mockItem := models.NewDriveItem()
|
||||||
|
|||||||
@ -61,9 +61,9 @@ type Collections struct {
|
|||||||
|
|
||||||
ctrl control.Options
|
ctrl control.Options
|
||||||
|
|
||||||
// collectionMap allows lookup of the data.Collection
|
// collectionMap allows lookup of the data.BackupCollection
|
||||||
// for a OneDrive folder
|
// for a OneDrive folder
|
||||||
CollectionMap map[string]data.Collection
|
CollectionMap map[string]data.BackupCollection
|
||||||
|
|
||||||
// Not the most ideal, but allows us to change the pager function for testing
|
// Not the most ideal, but allows us to change the pager function for testing
|
||||||
// as needed. This will allow us to mock out some scenarios during testing.
|
// as needed. This will allow us to mock out some scenarios during testing.
|
||||||
@ -100,7 +100,7 @@ func NewCollections(
|
|||||||
resourceOwner: resourceOwner,
|
resourceOwner: resourceOwner,
|
||||||
source: source,
|
source: source,
|
||||||
matcher: matcher,
|
matcher: matcher,
|
||||||
CollectionMap: map[string]data.Collection{},
|
CollectionMap: map[string]data.BackupCollection{},
|
||||||
drivePagerFunc: PagerForSource,
|
drivePagerFunc: PagerForSource,
|
||||||
itemPagerFunc: defaultItemPager,
|
itemPagerFunc: defaultItemPager,
|
||||||
service: service,
|
service: service,
|
||||||
@ -111,7 +111,7 @@ func NewCollections(
|
|||||||
|
|
||||||
func deserializeMetadata(
|
func deserializeMetadata(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cols []data.Collection,
|
cols []data.RestoreCollection,
|
||||||
) (map[string]string, map[string]map[string]string, error) {
|
) (map[string]string, map[string]map[string]string, error) {
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
"deserialzing previous backup metadata",
|
"deserialzing previous backup metadata",
|
||||||
@ -249,9 +249,9 @@ func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro
|
|||||||
// be excluded from the upcoming backup.
|
// be excluded from the upcoming backup.
|
||||||
func (c *Collections) Get(
|
func (c *Collections) Get(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
prevMetadata []data.Collection,
|
prevMetadata []data.RestoreCollection,
|
||||||
) ([]data.Collection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
_, _, err := deserializeMetadata(ctx, prevMetadata)
|
prevDeltas, _, err := deserializeMetadata(ctx, prevMetadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -287,6 +287,8 @@ func (c *Collections) Get(
|
|||||||
driveID := *d.GetId()
|
driveID := *d.GetId()
|
||||||
driveName := *d.GetName()
|
driveName := *d.GetName()
|
||||||
|
|
||||||
|
prevDelta := prevDeltas[driveID]
|
||||||
|
|
||||||
delta, paths, excluded, err := collectItems(
|
delta, paths, excluded, err := collectItems(
|
||||||
ctx,
|
ctx,
|
||||||
c.itemPagerFunc(
|
c.itemPagerFunc(
|
||||||
@ -297,6 +299,7 @@ func (c *Collections) Get(
|
|||||||
driveID,
|
driveID,
|
||||||
driveName,
|
driveName,
|
||||||
c.UpdateCollections,
|
c.UpdateCollections,
|
||||||
|
prevDelta,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -307,8 +310,8 @@ func (c *Collections) Get(
|
|||||||
// remove entries for which there is no corresponding delta token/folder. If
|
// remove entries for which there is no corresponding delta token/folder. If
|
||||||
// we leave empty delta tokens then we may end up setting the State field
|
// we leave empty delta tokens then we may end up setting the State field
|
||||||
// for collections when not actually getting delta results.
|
// for collections when not actually getting delta results.
|
||||||
if len(delta) > 0 {
|
if len(delta.URL) > 0 {
|
||||||
deltaURLs[driveID] = delta
|
deltaURLs[driveID] = delta.URL
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid the edge case where there's no paths but we do have a valid delta
|
// Avoid the edge case where there's no paths but we do have a valid delta
|
||||||
@ -324,7 +327,7 @@ func (c *Collections) Get(
|
|||||||
observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items to backup", c.NumItems)))
|
observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items to backup", c.NumItems)))
|
||||||
|
|
||||||
// Add an extra for the metadata collection.
|
// Add an extra for the metadata collection.
|
||||||
collections := make([]data.Collection, 0, len(c.CollectionMap)+1)
|
collections := make([]data.BackupCollection, 0, len(c.CollectionMap)+1)
|
||||||
for _, coll := range c.CollectionMap {
|
for _, coll := range c.CollectionMap {
|
||||||
collections = append(collections, coll)
|
collections = append(collections, coll)
|
||||||
}
|
}
|
||||||
@ -356,7 +359,7 @@ func (c *Collections) Get(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ashmrtn): Track and return the set of items to exclude.
|
// TODO(ashmrtn): Track and return the set of items to exclude.
|
||||||
return collections, nil, nil
|
return collections, excludedItems, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateCollections initializes and adds the provided drive items to Collections
|
// UpdateCollections initializes and adds the provided drive items to Collections
|
||||||
@ -371,6 +374,7 @@ func (c *Collections) UpdateCollections(
|
|||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
|
invalidPrevDelta bool,
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if item.GetRoot() != nil {
|
if item.GetRoot() != nil {
|
||||||
@ -462,7 +466,9 @@ func (c *Collections) UpdateCollections(
|
|||||||
c.service,
|
c.service,
|
||||||
c.statusUpdater,
|
c.statusUpdater,
|
||||||
c.source,
|
c.source,
|
||||||
c.ctrl)
|
c.ctrl,
|
||||||
|
invalidPrevDelta,
|
||||||
|
)
|
||||||
|
|
||||||
c.CollectionMap[collectionPath.String()] = col
|
c.CollectionMap[collectionPath.String()] = col
|
||||||
c.NumContainers++
|
c.NumContainers++
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -645,6 +646,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
tt.inputFolderMap,
|
tt.inputFolderMap,
|
||||||
outputFolderMap,
|
outputFolderMap,
|
||||||
excludes,
|
excludes,
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
tt.expect(t, err)
|
tt.expect(t, err)
|
||||||
assert.Equal(t, len(tt.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
assert.Equal(t, len(tt.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
||||||
@ -981,7 +983,7 @@ func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
cols := []data.Collection{}
|
cols := []data.RestoreCollection{}
|
||||||
|
|
||||||
for _, c := range test.cols {
|
for _, c := range test.cols {
|
||||||
mc, err := graph.MakeMetadataCollection(
|
mc, err := graph.MakeMetadataCollection(
|
||||||
@ -994,7 +996,7 @@ func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
cols = append(cols, mc)
|
cols = append(cols, data.NotFoundRestoreCollection{Collection: mc})
|
||||||
}
|
}
|
||||||
|
|
||||||
deltas, paths, err := deserializeMetadata(ctx, cols)
|
deltas, paths, err := deserializeMetadata(ctx, cols)
|
||||||
@ -1047,6 +1049,7 @@ func (p *mockItemPager) GetPage(context.Context) (gapi.DeltaPageLinker, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *mockItemPager) SetNext(string) {}
|
func (p *mockItemPager) SetNext(string) {}
|
||||||
|
func (p *mockItemPager) Reset() {}
|
||||||
|
|
||||||
func (p *mockItemPager) ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error) {
|
func (p *mockItemPager) ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||||
idx := p.getIdx
|
idx := p.getIdx
|
||||||
@ -1131,6 +1134,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
|||||||
expectedDeltaURLs map[string]string
|
expectedDeltaURLs map[string]string
|
||||||
expectedFolderPaths map[string]map[string]string
|
expectedFolderPaths map[string]map[string]string
|
||||||
expectedDelList map[string]struct{}
|
expectedDelList map[string]struct{}
|
||||||
|
doNotMergeItems bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors",
|
name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors",
|
||||||
@ -1342,6 +1346,135 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
|||||||
expectedFolderPaths: nil,
|
expectedFolderPaths: nil,
|
||||||
expectedDelList: nil,
|
expectedDelList: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "OneDrive_OneItemPage_DeltaError",
|
||||||
|
drives: []models.Driveable{drive1},
|
||||||
|
items: map[string][]deltaPagerResult{
|
||||||
|
driveID1: {
|
||||||
|
{
|
||||||
|
err: getDeltaError(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
items: []models.DriveItemable{
|
||||||
|
driveItem("file", "file", testBaseDrivePath, true, false, false),
|
||||||
|
},
|
||||||
|
deltaLink: &delta,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
expectedCollections: map[string][]string{
|
||||||
|
expectedPathAsSlice(
|
||||||
|
suite.T(),
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
testBaseDrivePath,
|
||||||
|
)[0]: {"file"},
|
||||||
|
},
|
||||||
|
expectedDeltaURLs: map[string]string{
|
||||||
|
driveID1: delta,
|
||||||
|
},
|
||||||
|
expectedFolderPaths: map[string]map[string]string{
|
||||||
|
// We need an empty map here so deserializing metadata knows the delta
|
||||||
|
// token for this drive is valid.
|
||||||
|
driveID1: {},
|
||||||
|
},
|
||||||
|
expectedDelList: map[string]struct{}{},
|
||||||
|
doNotMergeItems: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OneDrive_MultipleCollections_DeltaError",
|
||||||
|
drives: []models.Driveable{drive1},
|
||||||
|
items: map[string][]deltaPagerResult{
|
||||||
|
driveID1: {
|
||||||
|
{
|
||||||
|
err: getDeltaError(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
items: []models.DriveItemable{
|
||||||
|
driveItem("file", "file", testBaseDrivePath, true, false, false),
|
||||||
|
},
|
||||||
|
nextLink: &next,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
items: []models.DriveItemable{
|
||||||
|
driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false),
|
||||||
|
},
|
||||||
|
deltaLink: &delta,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
expectedCollections: map[string][]string{
|
||||||
|
expectedPathAsSlice(
|
||||||
|
suite.T(),
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
testBaseDrivePath,
|
||||||
|
)[0]: {"file"},
|
||||||
|
expectedPathAsSlice(
|
||||||
|
suite.T(),
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
testBaseDrivePath+"/folder",
|
||||||
|
)[0]: {"file"},
|
||||||
|
},
|
||||||
|
expectedDeltaURLs: map[string]string{
|
||||||
|
driveID1: delta,
|
||||||
|
},
|
||||||
|
expectedFolderPaths: map[string]map[string]string{
|
||||||
|
// We need an empty map here so deserializing metadata knows the delta
|
||||||
|
// token for this drive is valid.
|
||||||
|
driveID1: {},
|
||||||
|
},
|
||||||
|
expectedDelList: map[string]struct{}{},
|
||||||
|
doNotMergeItems: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OneDrive_MultipleCollections_NoDeltaError",
|
||||||
|
drives: []models.Driveable{drive1},
|
||||||
|
items: map[string][]deltaPagerResult{
|
||||||
|
driveID1: {
|
||||||
|
{
|
||||||
|
items: []models.DriveItemable{
|
||||||
|
driveItem("file", "file", testBaseDrivePath, true, false, false),
|
||||||
|
},
|
||||||
|
nextLink: &next,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
items: []models.DriveItemable{
|
||||||
|
driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false),
|
||||||
|
},
|
||||||
|
deltaLink: &delta,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
expectedCollections: map[string][]string{
|
||||||
|
expectedPathAsSlice(
|
||||||
|
suite.T(),
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
testBaseDrivePath,
|
||||||
|
)[0]: {"file"},
|
||||||
|
expectedPathAsSlice(
|
||||||
|
suite.T(),
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
testBaseDrivePath+"/folder",
|
||||||
|
)[0]: {"file"},
|
||||||
|
},
|
||||||
|
expectedDeltaURLs: map[string]string{
|
||||||
|
driveID1: delta,
|
||||||
|
},
|
||||||
|
expectedFolderPaths: map[string]map[string]string{
|
||||||
|
// We need an empty map here so deserializing metadata knows the delta
|
||||||
|
// token for this drive is valid.
|
||||||
|
driveID1: {},
|
||||||
|
},
|
||||||
|
expectedDelList: map[string]struct{}{},
|
||||||
|
doNotMergeItems: false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
@ -1386,7 +1519,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
|||||||
c.itemPagerFunc = itemPagerFunc
|
c.itemPagerFunc = itemPagerFunc
|
||||||
|
|
||||||
// TODO(ashmrtn): Allow passing previous metadata.
|
// TODO(ashmrtn): Allow passing previous metadata.
|
||||||
cols, _, err := c.Get(ctx, nil)
|
cols, delList, err := c.Get(ctx, nil)
|
||||||
test.errCheck(t, err)
|
test.errCheck(t, err)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1396,7 +1529,9 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
|||||||
for _, baseCol := range cols {
|
for _, baseCol := range cols {
|
||||||
folderPath := baseCol.FullPath().String()
|
folderPath := baseCol.FullPath().String()
|
||||||
if folderPath == metadataPath.String() {
|
if folderPath == metadataPath.String() {
|
||||||
deltas, paths, err := deserializeMetadata(ctx, []data.Collection{baseCol})
|
deltas, paths, err := deserializeMetadata(ctx, []data.RestoreCollection{
|
||||||
|
data.NotFoundRestoreCollection{Collection: baseCol},
|
||||||
|
})
|
||||||
if !assert.NoError(t, err, "deserializing metadata") {
|
if !assert.NoError(t, err, "deserializing metadata") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1421,11 +1556,10 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert.ElementsMatch(t, test.expectedCollections[folderPath], itemIDs)
|
assert.ElementsMatch(t, test.expectedCollections[folderPath], itemIDs)
|
||||||
|
assert.Equal(t, test.doNotMergeItems, baseCol.DoNotMergeItems(), "DoNotMergeItems")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ashmrtn): Uncomment this when we begin return the set of items to
|
assert.Equal(t, test.expectedDelList, delList)
|
||||||
// remove from the upcoming backup.
|
|
||||||
// assert.Equal(t, test.expectedDelList, delList)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1482,3 +1616,98 @@ func delItem(
|
|||||||
|
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getDeltaError() error {
|
||||||
|
syncStateNotFound := "SyncStateNotFound" // TODO(meain): export graph.errCodeSyncStateNotFound
|
||||||
|
me := odataerrors.NewMainError()
|
||||||
|
me.SetCode(&syncStateNotFound)
|
||||||
|
|
||||||
|
deltaError := odataerrors.NewODataError()
|
||||||
|
deltaError.SetError(me)
|
||||||
|
|
||||||
|
return deltaError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *OneDriveCollectionsSuite) TestCollectItems() {
|
||||||
|
next := "next"
|
||||||
|
delta := "delta"
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
items []deltaPagerResult
|
||||||
|
deltaURL string
|
||||||
|
prevDeltaSuccess bool
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "delta on first run",
|
||||||
|
deltaURL: delta,
|
||||||
|
items: []deltaPagerResult{
|
||||||
|
{deltaLink: &delta},
|
||||||
|
},
|
||||||
|
prevDeltaSuccess: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "next then delta",
|
||||||
|
deltaURL: delta,
|
||||||
|
items: []deltaPagerResult{
|
||||||
|
{nextLink: &next},
|
||||||
|
{deltaLink: &delta},
|
||||||
|
},
|
||||||
|
prevDeltaSuccess: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid prev delta",
|
||||||
|
deltaURL: delta,
|
||||||
|
items: []deltaPagerResult{
|
||||||
|
{err: getDeltaError()},
|
||||||
|
{deltaLink: &delta}, // works on retry
|
||||||
|
},
|
||||||
|
prevDeltaSuccess: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fail a normal delta query",
|
||||||
|
items: []deltaPagerResult{
|
||||||
|
{nextLink: &next},
|
||||||
|
{err: assert.AnError},
|
||||||
|
},
|
||||||
|
prevDeltaSuccess: true,
|
||||||
|
err: assert.AnError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
itemPager := &mockItemPager{
|
||||||
|
toReturn: test.items,
|
||||||
|
}
|
||||||
|
|
||||||
|
collectorFunc := func(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID, driveName string,
|
||||||
|
driveItems []models.DriveItemable,
|
||||||
|
oldPaths map[string]string,
|
||||||
|
newPaths map[string]string,
|
||||||
|
excluded map[string]struct{},
|
||||||
|
doNotMergeItems bool,
|
||||||
|
) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
delta, _, _, err := collectItems(
|
||||||
|
ctx,
|
||||||
|
itemPager,
|
||||||
|
"",
|
||||||
|
"General",
|
||||||
|
collectorFunc,
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
|
||||||
|
require.ErrorIs(suite.T(), err, test.err, "delta fetch err")
|
||||||
|
require.Equal(suite.T(), test.deltaURL, delta.URL, "delta url")
|
||||||
|
require.Equal(suite.T(), !test.prevDeltaSuccess, delta.Reset, "delta reset")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -35,6 +35,17 @@ const (
|
|||||||
contextDeadlineExceeded = "context deadline exceeded"
|
contextDeadlineExceeded = "context deadline exceeded"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeltaUpdate holds the results of a current delta token. It normally
|
||||||
|
// gets produced when aggregating the addition and removal of items in
|
||||||
|
// a delta-queriable folder.
|
||||||
|
// FIXME: This is same as exchange.api.DeltaUpdate
|
||||||
|
type DeltaUpdate struct {
|
||||||
|
// the deltaLink itself
|
||||||
|
URL string
|
||||||
|
// true if the old delta was marked as invalid
|
||||||
|
Reset bool
|
||||||
|
}
|
||||||
|
|
||||||
type drivePager interface {
|
type drivePager interface {
|
||||||
GetPage(context.Context) (gapi.PageLinker, error)
|
GetPage(context.Context) (gapi.PageLinker, error)
|
||||||
SetNext(nextLink string)
|
SetNext(nextLink string)
|
||||||
@ -132,11 +143,13 @@ type itemCollector func(
|
|||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
|
validPrevDelta bool,
|
||||||
) error
|
) error
|
||||||
|
|
||||||
type itemPager interface {
|
type itemPager interface {
|
||||||
GetPage(context.Context) (gapi.DeltaPageLinker, error)
|
GetPage(context.Context) (gapi.DeltaPageLinker, error)
|
||||||
SetNext(nextLink string)
|
SetNext(nextLink string)
|
||||||
|
Reset()
|
||||||
ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error)
|
ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,22 +185,39 @@ func collectItems(
|
|||||||
pager itemPager,
|
pager itemPager,
|
||||||
driveID, driveName string,
|
driveID, driveName string,
|
||||||
collector itemCollector,
|
collector itemCollector,
|
||||||
) (string, map[string]string, map[string]struct{}, error) {
|
prevDelta string,
|
||||||
|
) (DeltaUpdate, map[string]string, map[string]struct{}, error) {
|
||||||
var (
|
var (
|
||||||
newDeltaURL = ""
|
newDeltaURL = ""
|
||||||
// TODO(ashmrtn): Eventually this should probably be a parameter so we can
|
// TODO(ashmrtn): Eventually this should probably be a parameter so we can
|
||||||
// take in previous paths.
|
// take in previous paths.
|
||||||
oldPaths = map[string]string{}
|
oldPaths = map[string]string{}
|
||||||
newPaths = map[string]string{}
|
newPaths = map[string]string{}
|
||||||
excluded = map[string]struct{}{}
|
excluded = map[string]struct{}{}
|
||||||
|
invalidPrevDelta = false
|
||||||
)
|
)
|
||||||
|
|
||||||
maps.Copy(newPaths, oldPaths)
|
maps.Copy(newPaths, oldPaths)
|
||||||
|
|
||||||
|
if len(prevDelta) != 0 {
|
||||||
|
pager.SetNext(prevDelta)
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
page, err := pager.GetPage(ctx)
|
page, err := pager.GetPage(ctx)
|
||||||
|
|
||||||
|
if graph.IsErrInvalidDelta(err) {
|
||||||
|
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
||||||
|
|
||||||
|
invalidPrevDelta = true
|
||||||
|
|
||||||
|
pager.Reset()
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, nil, errors.Wrapf(
|
return DeltaUpdate{}, nil, nil, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"failed to query drive items. details: %s",
|
"failed to query drive items. details: %s",
|
||||||
support.ConnectorStackErrorTrace(err),
|
support.ConnectorStackErrorTrace(err),
|
||||||
@ -196,12 +226,12 @@ func collectItems(
|
|||||||
|
|
||||||
vals, err := pager.ValuesIn(page)
|
vals, err := pager.ValuesIn(page)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, nil, errors.Wrap(err, "extracting items from response")
|
return DeltaUpdate{}, nil, nil, errors.Wrap(err, "extracting items from response")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded)
|
err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded, invalidPrevDelta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, nil, err
|
return DeltaUpdate{}, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nextLink, deltaLink := gapi.NextAndDeltaLink(page)
|
nextLink, deltaLink := gapi.NextAndDeltaLink(page)
|
||||||
@ -219,7 +249,7 @@ func collectItems(
|
|||||||
pager.SetNext(nextLink)
|
pager.SetNext(nextLink)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newDeltaURL, newPaths, excluded, nil
|
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getFolder will lookup the specified folder name under `parentFolderID`
|
// getFolder will lookup the specified folder name under `parentFolderID`
|
||||||
@ -351,6 +381,7 @@ func GetAllFolders(
|
|||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
|
doNotMergeItems bool,
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
// Skip the root item.
|
// Skip the root item.
|
||||||
@ -379,6 +410,7 @@ func GetAllFolders(
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
"",
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "getting items for drive %s", *d.GetName())
|
return nil, errors.Wrapf(err, "getting items for drive %s", *d.GetName())
|
||||||
|
|||||||
@ -106,6 +106,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
|
doNotMergeItems bool,
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if item.GetFile() != nil {
|
if item.GetFile() != nil {
|
||||||
@ -126,6 +127,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
suite.userDriveID,
|
suite.userDriveID,
|
||||||
"General",
|
"General",
|
||||||
itemCollector,
|
itemCollector,
|
||||||
|
"",
|
||||||
)
|
)
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
|
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -64,7 +65,7 @@ func RestoreCollections(
|
|||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
opts control.Options,
|
opts control.Options,
|
||||||
dcs []data.Collection,
|
dcs []data.RestoreCollection,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
@ -148,7 +149,7 @@ func RestoreCollection(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupVersion int,
|
backupVersion int,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
dc data.Collection,
|
dc data.RestoreCollection,
|
||||||
parentPerms []UserPermission,
|
parentPerms []UserPermission,
|
||||||
source driveSource,
|
source driveSource,
|
||||||
restoreContainerName string,
|
restoreContainerName string,
|
||||||
@ -164,7 +165,6 @@ func RestoreCollection(
|
|||||||
metrics = support.CollectionMetrics{}
|
metrics = support.CollectionMetrics{}
|
||||||
copyBuffer = make([]byte, copyBufferSize)
|
copyBuffer = make([]byte, copyBufferSize)
|
||||||
directory = dc.FullPath()
|
directory = dc.FullPath()
|
||||||
restoredIDs = map[string]string{}
|
|
||||||
itemInfo details.ItemInfo
|
itemInfo details.ItemInfo
|
||||||
itemID string
|
itemID string
|
||||||
folderPerms = map[string][]UserPermission{}
|
folderPerms = map[string][]UserPermission{}
|
||||||
@ -226,37 +226,44 @@ func RestoreCollection(
|
|||||||
metrics.TotalBytes += int64(len(copyBuffer))
|
metrics.TotalBytes += int64(len(copyBuffer))
|
||||||
trimmedName := strings.TrimSuffix(name, DataFileSuffix)
|
trimmedName := strings.TrimSuffix(name, DataFileSuffix)
|
||||||
|
|
||||||
itemID, itemInfo, err = restoreData(ctx, service, trimmedName, itemData,
|
itemID, itemInfo, err = restoreData(
|
||||||
drivePath.DriveID, restoreFolderID, copyBuffer, source)
|
ctx,
|
||||||
|
service,
|
||||||
|
trimmedName,
|
||||||
|
itemData,
|
||||||
|
drivePath.DriveID,
|
||||||
|
restoreFolderID,
|
||||||
|
copyBuffer,
|
||||||
|
source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errUpdater(itemData.UUID(), err)
|
errUpdater(itemData.UUID(), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
restoredIDs[trimmedName] = itemID
|
|
||||||
|
|
||||||
deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo)
|
deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo)
|
||||||
|
|
||||||
// Mark it as success without processing .meta
|
// Mark it as success without processing .meta
|
||||||
// file if we are not restoring permissions
|
// file if we are not restoring permissions
|
||||||
if !restorePerms {
|
if !restorePerms {
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
}
|
|
||||||
} else if strings.HasSuffix(name, MetaFileSuffix) {
|
|
||||||
if !restorePerms {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
meta, err := getMetadata(itemData.ToReader())
|
// Fetch item permissions from the collection and restore them.
|
||||||
|
metaName := trimmedName + MetaFileSuffix
|
||||||
|
|
||||||
|
permsFile, err := dc.Fetch(ctx, metaName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errUpdater(itemData.UUID(), err)
|
errUpdater(metaName, clues.Wrap(err, "getting item metadata"))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
trimmedName := strings.TrimSuffix(name, MetaFileSuffix)
|
metaReader := permsFile.ToReader()
|
||||||
restoreID, ok := restoredIDs[trimmedName]
|
meta, err := getMetadata(metaReader)
|
||||||
if !ok {
|
metaReader.Close()
|
||||||
errUpdater(itemData.UUID(), fmt.Errorf("item not available to restore permissions"))
|
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(metaName, clues.Wrap(err, "deserializing item metadata"))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -264,21 +271,22 @@ func RestoreCollection(
|
|||||||
ctx,
|
ctx,
|
||||||
service,
|
service,
|
||||||
drivePath.DriveID,
|
drivePath.DriveID,
|
||||||
restoreID,
|
itemID,
|
||||||
parentPerms,
|
parentPerms,
|
||||||
meta.Permissions,
|
meta.Permissions,
|
||||||
permissionIDMappings,
|
permissionIDMappings,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errUpdater(itemData.UUID(), err)
|
errUpdater(trimmedName, clues.Wrap(err, "restoring item permissions"))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Objects count is incremented when we restore a
|
|
||||||
// data file and success count is incremented when
|
|
||||||
// we restore a meta file as every data file
|
|
||||||
// should have an associated meta file
|
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
|
} else if strings.HasSuffix(name, MetaFileSuffix) {
|
||||||
|
// Just skip this for the moment since we moved the code to the above
|
||||||
|
// item restore path. We haven't yet stopped fetching these items in
|
||||||
|
// RestoreOp, so we still need to handle them in some way.
|
||||||
|
continue
|
||||||
} else if strings.HasSuffix(name, DirMetaFileSuffix) {
|
} else if strings.HasSuffix(name, DirMetaFileSuffix) {
|
||||||
trimmedName := strings.TrimSuffix(name, DirMetaFileSuffix)
|
trimmedName := strings.TrimSuffix(name, DirMetaFileSuffix)
|
||||||
folderID, err := createRestoreFolder(
|
folderID, err := createRestoreFolder(
|
||||||
|
|||||||
@ -4,3 +4,5 @@ type Tuple struct {
|
|||||||
Name string
|
Name string
|
||||||
ID string
|
ID string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const fetchChannelSize = 5
|
||||||
|
|||||||
@ -1,15 +1,16 @@
|
|||||||
package api
|
package api_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func createTestBetaService(t *testing.T, credentials account.M365Config) *api.BetaService {
|
func createTestBetaService(t *testing.T, credentials account.M365Config) *discover.BetaService {
|
||||||
adapter, err := graph.CreateAdapter(
|
adapter, err := graph.CreateAdapter(
|
||||||
credentials.AzureTenantID,
|
credentials.AzureTenantID,
|
||||||
credentials.AzureClientID,
|
credentials.AzureClientID,
|
||||||
@ -17,5 +18,5 @@ func createTestBetaService(t *testing.T, credentials account.M365Config) *api.Be
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return api.NewBetaService(adapter)
|
return discover.NewBetaService(adapter)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,46 +2,100 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites"
|
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetSitePages retrieves a collection of Pages related to the give Site.
|
// GetSitePages retrieves a collection of Pages related to the give Site.
|
||||||
// Returns error if error experienced during the call
|
// Returns error if error experienced during the call
|
||||||
func GetSitePage(
|
func GetSitePages(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
serv *api.BetaService,
|
serv *discover.BetaService,
|
||||||
siteID string,
|
siteID string,
|
||||||
pages []string,
|
pages []string,
|
||||||
) ([]models.SitePageable, error) {
|
) ([]models.SitePageable, error) {
|
||||||
col := make([]models.SitePageable, 0)
|
var (
|
||||||
opts := retrieveSitePageOptions()
|
col = make([]models.SitePageable, 0)
|
||||||
|
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
||||||
|
opts = retrieveSitePageOptions()
|
||||||
|
err, errs error
|
||||||
|
wg sync.WaitGroup
|
||||||
|
m sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
defer close(semaphoreCh)
|
||||||
|
|
||||||
|
errUpdater := func(id string, err error) {
|
||||||
|
m.Lock()
|
||||||
|
errs = support.WrapAndAppend(id, err, errs)
|
||||||
|
m.Unlock()
|
||||||
|
}
|
||||||
|
updatePages := func(page models.SitePageable) {
|
||||||
|
m.Lock()
|
||||||
|
col = append(col, page)
|
||||||
|
m.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
for _, entry := range pages {
|
for _, entry := range pages {
|
||||||
page, err := serv.Client().SitesById(siteID).PagesById(entry).Get(ctx, opts)
|
semaphoreCh <- struct{}{}
|
||||||
if err != nil {
|
|
||||||
return nil, support.ConnectorStackErrorTraceWrap(err, "fetching page: "+entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
col = append(col, page)
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func(pageID string) {
|
||||||
|
defer wg.Done()
|
||||||
|
defer func() { <-semaphoreCh }()
|
||||||
|
|
||||||
|
var page models.SitePageable
|
||||||
|
|
||||||
|
err = graph.RunWithRetry(func() error {
|
||||||
|
page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(pageID, errors.Wrap(err, support.ConnectorStackErrorTrace(err)+" fetching page"))
|
||||||
|
} else {
|
||||||
|
updatePages(page)
|
||||||
|
}
|
||||||
|
}(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if errs != nil {
|
||||||
|
return nil, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
return col, nil
|
return col, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchPages utility function to return the tuple of item
|
// fetchPages utility function to return the tuple of item
|
||||||
func FetchPages(ctx context.Context, bs *api.BetaService, siteID string) ([]Tuple, error) {
|
func FetchPages(ctx context.Context, bs *discover.BetaService, siteID string) ([]Tuple, error) {
|
||||||
var (
|
var (
|
||||||
builder = bs.Client().SitesById(siteID).Pages()
|
builder = bs.Client().SitesById(siteID).Pages()
|
||||||
opts = fetchPageOptions()
|
opts = fetchPageOptions()
|
||||||
pageTuples = make([]Tuple, 0)
|
pageTuples = make([]Tuple, 0)
|
||||||
|
resp models.SitePageCollectionResponseable
|
||||||
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
resp, err := builder.Get(ctx, opts)
|
err = graph.RunWithRetry(func() error {
|
||||||
|
resp, err = builder.Get(ctx, opts)
|
||||||
|
return err
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, support.ConnectorStackErrorTraceWrap(err, "failed fetching site page")
|
return nil, support.ConnectorStackErrorTraceWrap(err, "failed fetching site page")
|
||||||
}
|
}
|
||||||
@ -80,6 +134,21 @@ func fetchPageOptions() *sites.ItemPagesRequestBuilderGetRequestConfiguration {
|
|||||||
return options
|
return options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteSitePage removes the selected page from the SharePoint Site
|
||||||
|
// https://learn.microsoft.com/en-us/graph/api/sitepage-delete?view=graph-rest-beta
|
||||||
|
func DeleteSitePage(
|
||||||
|
ctx context.Context,
|
||||||
|
serv *discover.BetaService,
|
||||||
|
siteID, pageID string,
|
||||||
|
) error {
|
||||||
|
err := serv.Client().SitesById(siteID).PagesById(pageID).Delete(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return support.ConnectorStackErrorTraceWrap(err, "deleting page: "+pageID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// retrievePageOptions returns options to expand
|
// retrievePageOptions returns options to expand
|
||||||
func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration {
|
func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration {
|
||||||
fields := []string{"canvasLayout"}
|
fields := []string{"canvasLayout"}
|
||||||
@ -91,3 +160,113 @@ func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequ
|
|||||||
|
|
||||||
return options
|
return options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RestoreSitePage(
|
||||||
|
ctx context.Context,
|
||||||
|
service *discover.BetaService,
|
||||||
|
itemData data.Stream,
|
||||||
|
siteID, destName string,
|
||||||
|
) (details.ItemInfo, error) {
|
||||||
|
ctx, end := D.Span(ctx, "gc:sharepoint:restorePage", D.Label("item_uuid", itemData.UUID()))
|
||||||
|
defer end()
|
||||||
|
|
||||||
|
var (
|
||||||
|
dii = details.ItemInfo{}
|
||||||
|
pageID = itemData.UUID()
|
||||||
|
pageName = pageID
|
||||||
|
)
|
||||||
|
|
||||||
|
byteArray, err := io.ReadAll(itemData.ToReader())
|
||||||
|
if err != nil {
|
||||||
|
return dii, errors.Wrap(err, "reading sharepoint page bytes from stream")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hydrate Page
|
||||||
|
page, err := support.CreatePageFromBytes(byteArray)
|
||||||
|
if err != nil {
|
||||||
|
return dii, errors.Wrapf(err, "creating Page object %s", pageID)
|
||||||
|
}
|
||||||
|
|
||||||
|
pageNamePtr := page.GetName()
|
||||||
|
if pageNamePtr != nil {
|
||||||
|
pageName = *pageNamePtr
|
||||||
|
}
|
||||||
|
|
||||||
|
newName := fmt.Sprintf("%s_%s", destName, pageName)
|
||||||
|
page.SetName(&newName)
|
||||||
|
|
||||||
|
// Restore is a 2-Step Process in Graph API
|
||||||
|
// 1. Create the Page on the site
|
||||||
|
// 2. Publish the site
|
||||||
|
// See: https://learn.microsoft.com/en-us/graph/api/sitepage-create?view=graph-rest-beta
|
||||||
|
restoredPage, err := service.Client().SitesById(siteID).Pages().Post(ctx, page, nil)
|
||||||
|
if err != nil {
|
||||||
|
sendErr := support.ConnectorStackErrorTraceWrap(
|
||||||
|
err,
|
||||||
|
"creating page from ID: %s"+pageName+" API Error Details",
|
||||||
|
)
|
||||||
|
|
||||||
|
return dii, sendErr
|
||||||
|
}
|
||||||
|
|
||||||
|
pageID = *restoredPage.GetId()
|
||||||
|
// Publish page to make visible
|
||||||
|
// See https://learn.microsoft.com/en-us/graph/api/sitepage-publish?view=graph-rest-beta
|
||||||
|
if restoredPage.GetWebUrl() == nil {
|
||||||
|
return dii, fmt.Errorf("creating page %s incomplete. Field `webURL` not populated", pageID)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = service.Client().
|
||||||
|
SitesById(siteID).
|
||||||
|
PagesById(pageID).
|
||||||
|
Publish().
|
||||||
|
Post(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return dii, support.ConnectorStackErrorTraceWrap(
|
||||||
|
err,
|
||||||
|
"publishing page ID: "+*restoredPage.GetId()+" API Error Details",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
dii.SharePoint = PageInfo(restoredPage, int64(len(byteArray)))
|
||||||
|
// Storing new pageID in unused field.
|
||||||
|
dii.SharePoint.ParentPath = pageID
|
||||||
|
|
||||||
|
return dii, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==============================
|
||||||
|
// Helpers
|
||||||
|
// ==============================
|
||||||
|
// PageInfo extracts useful metadata into struct for book keeping
|
||||||
|
func PageInfo(page models.SitePageable, size int64) *details.SharePointInfo {
|
||||||
|
var (
|
||||||
|
name, webURL string
|
||||||
|
created, modified time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
if page.GetTitle() != nil {
|
||||||
|
name = *page.GetTitle()
|
||||||
|
}
|
||||||
|
|
||||||
|
if page.GetWebUrl() != nil {
|
||||||
|
webURL = *page.GetWebUrl()
|
||||||
|
}
|
||||||
|
|
||||||
|
if page.GetCreatedDateTime() != nil {
|
||||||
|
created = *page.GetCreatedDateTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
if page.GetLastModifiedDateTime() != nil {
|
||||||
|
modified = *page.GetLastModifiedDateTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &details.SharePointInfo{
|
||||||
|
ItemType: details.SharePointItem,
|
||||||
|
ItemName: name,
|
||||||
|
Created: created,
|
||||||
|
Modified: modified,
|
||||||
|
WebURL: webURL,
|
||||||
|
Size: size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -1,20 +1,28 @@
|
|||||||
package api
|
package api_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
|
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SharePointPageSuite struct {
|
type SharePointPageSuite struct {
|
||||||
suite.Suite
|
suite.Suite
|
||||||
siteID string
|
siteID string
|
||||||
creds account.M365Config
|
creds account.M365Config
|
||||||
|
service *discover.BetaService
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SharePointPageSuite) SetupSuite() {
|
func (suite *SharePointPageSuite) SetupSuite() {
|
||||||
@ -27,6 +35,7 @@ func (suite *SharePointPageSuite) SetupSuite() {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
suite.creds = m365
|
suite.creds = m365
|
||||||
|
suite.service = createTestBetaService(t, suite.creds)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSharePointPageSuite(t *testing.T) {
|
func TestSharePointPageSuite(t *testing.T) {
|
||||||
@ -42,9 +51,7 @@ func (suite *SharePointPageSuite) TestFetchPages() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
service := createTestBetaService(t, suite.creds)
|
pgs, err := api.FetchPages(ctx, suite.service, suite.siteID)
|
||||||
|
|
||||||
pgs, err := FetchPages(ctx, service, suite.siteID)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.NotNil(t, pgs)
|
require.NotNil(t, pgs)
|
||||||
assert.NotZero(t, len(pgs))
|
assert.NotZero(t, len(pgs))
|
||||||
@ -54,18 +61,52 @@ func (suite *SharePointPageSuite) TestFetchPages() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SharePointPageSuite) TestGetSitePage() {
|
func (suite *SharePointPageSuite) TestGetSitePages() {
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
service := createTestBetaService(t, suite.creds)
|
tuples, err := api.FetchPages(ctx, suite.service, suite.siteID)
|
||||||
tuples, err := FetchPages(ctx, service, suite.siteID)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, tuples)
|
require.NotNil(t, tuples)
|
||||||
|
|
||||||
jobs := []string{tuples[0].ID}
|
jobs := []string{tuples[0].ID}
|
||||||
pages, err := GetSitePage(ctx, service, suite.siteID, jobs)
|
pages, err := api.GetSitePages(ctx, suite.service, suite.siteID, jobs)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotEmpty(t, pages)
|
assert.NotEmpty(t, pages)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
destName := "Corso_Restore_" + common.FormatNow(common.SimpleTimeTesting)
|
||||||
|
testName := "MockPage"
|
||||||
|
|
||||||
|
// Create Test Page
|
||||||
|
//nolint:lll
|
||||||
|
byteArray := mockconnector.GetMockPage("Byte Test")
|
||||||
|
|
||||||
|
pageData := sharepoint.NewItem(
|
||||||
|
testName,
|
||||||
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
|
)
|
||||||
|
|
||||||
|
info, err := api.RestoreSitePage(
|
||||||
|
ctx,
|
||||||
|
suite.service,
|
||||||
|
pageData,
|
||||||
|
suite.siteID,
|
||||||
|
destName,
|
||||||
|
)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, info)
|
||||||
|
|
||||||
|
// Clean Up
|
||||||
|
pageID := info.SharePoint.ParentPath
|
||||||
|
err = api.DeleteSitePage(ctx, suite.service, suite.siteID, pageID)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|||||||
@ -3,18 +3,22 @@ package sharepoint
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
kw "github.com/microsoft/kiota-serialization-json-go"
|
kw "github.com/microsoft/kiota-serialization-json-go"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
|
sapi "github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -24,18 +28,26 @@ type DataCategory int
|
|||||||
//go:generate stringer -type=DataCategory
|
//go:generate stringer -type=DataCategory
|
||||||
const (
|
const (
|
||||||
collectionChannelBufferSize = 50
|
collectionChannelBufferSize = 50
|
||||||
|
fetchChannelSize = 5
|
||||||
Unknown DataCategory = iota
|
Unknown DataCategory = iota
|
||||||
List
|
List
|
||||||
Drive
|
Drive
|
||||||
|
Pages
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ data.Collection = &Collection{}
|
_ data.BackupCollection = &Collection{}
|
||||||
_ data.Stream = &Item{}
|
_ data.Stream = &Item{}
|
||||||
_ data.StreamInfo = &Item{}
|
_ data.StreamInfo = &Item{}
|
||||||
_ data.StreamModTime = &Item{}
|
_ data.StreamModTime = &Item{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type numMetrics struct {
|
||||||
|
attempts int
|
||||||
|
success int
|
||||||
|
totalBytes int64
|
||||||
|
}
|
||||||
|
|
||||||
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
|
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
|
||||||
// by the oneDrive.Collection as the calls are identical for populating the Collection
|
// by the oneDrive.Collection as the calls are identical for populating the Collection
|
||||||
type Collection struct {
|
type Collection struct {
|
||||||
@ -46,7 +58,9 @@ type Collection struct {
|
|||||||
// jobs contain the SharePoint.Site.ListIDs for the associated list(s).
|
// jobs contain the SharePoint.Site.ListIDs for the associated list(s).
|
||||||
jobs []string
|
jobs []string
|
||||||
// M365 IDs of the items of this collection
|
// M365 IDs of the items of this collection
|
||||||
|
category DataCategory
|
||||||
service graph.Servicer
|
service graph.Servicer
|
||||||
|
ctrl control.Options
|
||||||
betaService *api.BetaService
|
betaService *api.BetaService
|
||||||
statusUpdater support.StatusUpdater
|
statusUpdater support.StatusUpdater
|
||||||
}
|
}
|
||||||
@ -55,7 +69,9 @@ type Collection struct {
|
|||||||
func NewCollection(
|
func NewCollection(
|
||||||
folderPath path.Path,
|
folderPath path.Path,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
|
category DataCategory,
|
||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
|
ctrlOpts control.Options,
|
||||||
) *Collection {
|
) *Collection {
|
||||||
c := &Collection{
|
c := &Collection{
|
||||||
fullPath: folderPath,
|
fullPath: folderPath,
|
||||||
@ -63,6 +79,8 @@ func NewCollection(
|
|||||||
data: make(chan data.Stream, collectionChannelBufferSize),
|
data: make(chan data.Stream, collectionChannelBufferSize),
|
||||||
service: service,
|
service: service,
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
|
category: category,
|
||||||
|
ctrl: ctrlOpts,
|
||||||
}
|
}
|
||||||
|
|
||||||
return c
|
return c
|
||||||
@ -106,6 +124,15 @@ type Item struct {
|
|||||||
deleted bool
|
deleted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewItem(name string, d io.ReadCloser) *Item {
|
||||||
|
item := &Item{
|
||||||
|
id: name,
|
||||||
|
data: d,
|
||||||
|
}
|
||||||
|
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
|
||||||
func (sd *Item) UUID() string {
|
func (sd *Item) UUID() string {
|
||||||
return sd.id
|
return sd.id
|
||||||
}
|
}
|
||||||
@ -133,7 +160,7 @@ func (sc *Collection) finishPopulation(ctx context.Context, attempts, success in
|
|||||||
status := support.CreateStatus(
|
status := support.CreateStatus(
|
||||||
ctx,
|
ctx,
|
||||||
support.Backup,
|
support.Backup,
|
||||||
len(sc.jobs),
|
1, // 1 folder
|
||||||
support.CollectionMetrics{
|
support.CollectionMetrics{
|
||||||
Objects: attempted,
|
Objects: attempted,
|
||||||
Successes: success,
|
Successes: success,
|
||||||
@ -151,12 +178,14 @@ func (sc *Collection) finishPopulation(ctx context.Context, attempts, success in
|
|||||||
// populate utility function to retrieve data from back store for a given collection
|
// populate utility function to retrieve data from back store for a given collection
|
||||||
func (sc *Collection) populate(ctx context.Context) {
|
func (sc *Collection) populate(ctx context.Context) {
|
||||||
var (
|
var (
|
||||||
objects, success int
|
metrics numMetrics
|
||||||
totalBytes, arrayLength int64
|
errs error
|
||||||
errs error
|
writer = kw.NewJsonSerializationWriter()
|
||||||
writer = kw.NewJsonSerializationWriter()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
sc.finishPopulation(ctx, metrics.attempts, metrics.success, int64(metrics.totalBytes), errs)
|
||||||
|
}()
|
||||||
// TODO: Insert correct ID for CollectionProgress
|
// TODO: Insert correct ID for CollectionProgress
|
||||||
colProgress, closer := observe.CollectionProgress(
|
colProgress, closer := observe.CollectionProgress(
|
||||||
ctx,
|
ctx,
|
||||||
@ -167,25 +196,49 @@ func (sc *Collection) populate(ctx context.Context) {
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
close(colProgress)
|
close(colProgress)
|
||||||
sc.finishPopulation(ctx, objects, success, totalBytes, errs)
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Retrieve list data from M365
|
// Switch retrieval function based on category
|
||||||
|
switch sc.category {
|
||||||
|
case List:
|
||||||
|
metrics, errs = sc.retrieveLists(ctx, writer, colProgress)
|
||||||
|
case Pages:
|
||||||
|
metrics, errs = sc.retrievePages(ctx, writer, colProgress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// retrieveLists utility function for collection that downloads and serializes
|
||||||
|
// models.Listable objects based on M365 IDs from the jobs field.
|
||||||
|
func (sc *Collection) retrieveLists(
|
||||||
|
ctx context.Context,
|
||||||
|
wtr *kw.JsonSerializationWriter,
|
||||||
|
progress chan<- struct{},
|
||||||
|
) (numMetrics, error) {
|
||||||
|
var (
|
||||||
|
errs error
|
||||||
|
metrics numMetrics
|
||||||
|
)
|
||||||
|
|
||||||
lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs)
|
lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = support.WrapAndAppend(sc.fullPath.ResourceOwner(), err, errs)
|
return metrics, errors.Wrap(err, sc.fullPath.ResourceOwner())
|
||||||
}
|
}
|
||||||
|
|
||||||
objects += len(lists)
|
metrics.attempts += len(lists)
|
||||||
// Write Data and Send
|
// For each models.Listable, object is serialized and the metrics are collected.
|
||||||
|
// The progress is objected via the passed in channel.
|
||||||
for _, lst := range lists {
|
for _, lst := range lists {
|
||||||
byteArray, err := serializeListContent(writer, lst)
|
byteArray, err := serializeContent(wtr, lst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = support.WrapAndAppend(*lst.GetId(), err, errs)
|
errs = support.WrapAndAppend(*lst.GetId(), err, errs)
|
||||||
|
if sc.ctrl.FailFast {
|
||||||
|
return metrics, errs
|
||||||
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
arrayLength = int64(len(byteArray))
|
arrayLength := int64(len(byteArray))
|
||||||
|
|
||||||
if arrayLength > 0 {
|
if arrayLength > 0 {
|
||||||
t := time.Now()
|
t := time.Now()
|
||||||
@ -193,9 +246,9 @@ func (sc *Collection) populate(ctx context.Context) {
|
|||||||
t = *t1
|
t = *t1
|
||||||
}
|
}
|
||||||
|
|
||||||
totalBytes += arrayLength
|
metrics.totalBytes += arrayLength
|
||||||
|
|
||||||
success++
|
metrics.success++
|
||||||
sc.data <- &Item{
|
sc.data <- &Item{
|
||||||
id: *lst.GetId(),
|
id: *lst.GetId(),
|
||||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
@ -203,15 +256,76 @@ func (sc *Collection) populate(ctx context.Context) {
|
|||||||
modTime: t,
|
modTime: t,
|
||||||
}
|
}
|
||||||
|
|
||||||
colProgress <- struct{}{}
|
progress <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return metrics, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func serializeListContent(writer *kw.JsonSerializationWriter, lst models.Listable) ([]byte, error) {
|
func (sc *Collection) retrievePages(
|
||||||
|
ctx context.Context,
|
||||||
|
wtr *kw.JsonSerializationWriter,
|
||||||
|
progress chan<- struct{},
|
||||||
|
) (numMetrics, error) {
|
||||||
|
var (
|
||||||
|
errs error
|
||||||
|
metrics numMetrics
|
||||||
|
)
|
||||||
|
|
||||||
|
betaService := sc.betaService
|
||||||
|
if betaService == nil {
|
||||||
|
return metrics, fmt.Errorf("beta service not found in collection")
|
||||||
|
}
|
||||||
|
|
||||||
|
pages, err := sapi.GetSitePages(ctx, betaService, sc.fullPath.ResourceOwner(), sc.jobs)
|
||||||
|
if err != nil {
|
||||||
|
return metrics, errors.Wrap(err, sc.fullPath.ResourceOwner())
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.attempts = len(pages)
|
||||||
|
// For each models.Pageable, object is serialize and the metrics are collected and returned.
|
||||||
|
// Pageable objects are not supported in v1.0 of msgraph at this time.
|
||||||
|
// TODO: Verify Parsable interface supported with modified-Pageable
|
||||||
|
for _, pg := range pages {
|
||||||
|
byteArray, err := serializeContent(wtr, pg)
|
||||||
|
if err != nil {
|
||||||
|
errs = support.WrapAndAppend(*pg.GetId(), err, errs)
|
||||||
|
if sc.ctrl.FailFast {
|
||||||
|
return metrics, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
arrayLength := int64(len(byteArray))
|
||||||
|
|
||||||
|
if arrayLength > 0 {
|
||||||
|
t := time.Now()
|
||||||
|
if t1 := pg.GetLastModifiedDateTime(); t1 != nil {
|
||||||
|
t = *t1
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.totalBytes += arrayLength
|
||||||
|
metrics.success++
|
||||||
|
sc.data <- &Item{
|
||||||
|
id: *pg.GetId(),
|
||||||
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
|
info: sharePointPageInfo(pg, arrayLength),
|
||||||
|
modTime: t,
|
||||||
|
}
|
||||||
|
|
||||||
|
progress <- struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) {
|
||||||
defer writer.Close()
|
defer writer.Close()
|
||||||
|
|
||||||
err := writer.WriteObjectValue("", lst)
|
err := writer.WriteObjectValue("", obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
@ -50,7 +51,7 @@ func TestSharePointCollectionSuite(t *testing.T) {
|
|||||||
suite.Run(t, new(SharePointCollectionSuite))
|
suite.Run(t, new(SharePointCollectionSuite))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SharePointCollectionSuite) TestSharePointDataReader_Valid() {
|
func (suite *SharePointCollectionSuite) TestCollection_Item_Read() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
m := []byte("test message")
|
m := []byte("test message")
|
||||||
name := "aFile"
|
name := "aFile"
|
||||||
@ -65,73 +66,109 @@ func (suite *SharePointCollectionSuite) TestSharePointDataReader_Valid() {
|
|||||||
assert.Equal(t, readData, m)
|
assert.Equal(t, readData, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSharePointListCollection tests basic functionality to create
|
// TestListCollection tests basic functionality to create
|
||||||
// SharePoint collection and to use the data stream channel.
|
// SharePoint collection and to use the data stream channel.
|
||||||
func (suite *SharePointCollectionSuite) TestSharePointListCollection() {
|
func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
tenant := "some"
|
||||||
|
user := "user"
|
||||||
|
dirRoot := "directory"
|
||||||
|
tables := []struct {
|
||||||
|
name, itemName string
|
||||||
|
category DataCategory
|
||||||
|
getDir func(t *testing.T) path.Path
|
||||||
|
getItem func(t *testing.T, itemName string) *Item
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "List",
|
||||||
|
itemName: "MockListing",
|
||||||
|
category: List,
|
||||||
|
getDir: func(t *testing.T) path.Path {
|
||||||
|
dir, err := path.Builder{}.Append(dirRoot).
|
||||||
|
ToDataLayerSharePointPath(
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
path.ListsCategory,
|
||||||
|
false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
ow := kioser.NewJsonSerializationWriter()
|
return dir
|
||||||
listing := mockconnector.GetMockListDefault("Mock List")
|
},
|
||||||
testName := "MockListing"
|
getItem: func(t *testing.T, name string) *Item {
|
||||||
listing.SetDisplayName(&testName)
|
ow := kioser.NewJsonSerializationWriter()
|
||||||
|
listing := mockconnector.GetMockListDefault(name)
|
||||||
|
listing.SetDisplayName(&name)
|
||||||
|
|
||||||
err := ow.WriteObjectValue("", listing)
|
err := ow.WriteObjectValue("", listing)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
byteArray, err := ow.GetSerializedContent()
|
byteArray, err := ow.GetSerializedContent()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
dir, err := path.Builder{}.Append("directory").
|
data := &Item{
|
||||||
ToDataLayerSharePointPath(
|
id: name,
|
||||||
"some",
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
"user",
|
info: sharePointListInfo(listing, int64(len(byteArray))),
|
||||||
path.ListsCategory,
|
}
|
||||||
false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
col := NewCollection(dir, nil, nil)
|
return data
|
||||||
col.data <- &Item{
|
},
|
||||||
id: testName,
|
},
|
||||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
{
|
||||||
info: sharePointListInfo(listing, int64(len(byteArray))),
|
name: "Pages",
|
||||||
|
itemName: "MockPages",
|
||||||
|
category: Pages,
|
||||||
|
getDir: func(t *testing.T) path.Path {
|
||||||
|
dir, err := path.Builder{}.Append(dirRoot).
|
||||||
|
ToDataLayerSharePointPath(
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
path.PagesCategory,
|
||||||
|
false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return dir
|
||||||
|
},
|
||||||
|
getItem: func(t *testing.T, itemName string) *Item {
|
||||||
|
byteArray := mockconnector.GetMockPage(itemName)
|
||||||
|
page, err := support.CreatePageFromBytes(byteArray)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data := &Item{
|
||||||
|
id: itemName,
|
||||||
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
|
info: api.PageInfo(page, int64(len(byteArray))),
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
readItems := []data.Stream{}
|
for _, test := range tables {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
col := NewCollection(test.getDir(t), nil, test.category, nil, control.Defaults())
|
||||||
|
col.data <- test.getItem(t, test.itemName)
|
||||||
|
|
||||||
for item := range col.Items() {
|
readItems := []data.Stream{}
|
||||||
readItems = append(readItems, item)
|
|
||||||
|
for item := range col.Items() {
|
||||||
|
readItems = append(readItems, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, len(readItems), 1)
|
||||||
|
item := readItems[0]
|
||||||
|
shareInfo, ok := item.(data.StreamInfo)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.NotNil(t, shareInfo.Info())
|
||||||
|
require.NotNil(t, shareInfo.Info().SharePoint)
|
||||||
|
assert.Equal(t, test.itemName, shareInfo.Info().SharePoint.ItemName)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, len(readItems), 1)
|
|
||||||
item := readItems[0]
|
|
||||||
shareInfo, ok := item.(data.StreamInfo)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.NotNil(t, shareInfo.Info())
|
|
||||||
require.NotNil(t, shareInfo.Info().SharePoint)
|
|
||||||
assert.Equal(t, testName, shareInfo.Info().SharePoint.ItemName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *SharePointCollectionSuite) TestCollectPages() {
|
|
||||||
ctx, flush := tester.NewContext()
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
t := suite.T()
|
|
||||||
col, err := collectPages(
|
|
||||||
ctx,
|
|
||||||
suite.creds,
|
|
||||||
nil,
|
|
||||||
account.AzureTenantID,
|
|
||||||
suite.siteID,
|
|
||||||
nil,
|
|
||||||
&MockGraphService{},
|
|
||||||
control.Defaults(),
|
|
||||||
)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, col)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRestoreListCollection verifies Graph Restore API for the List Collection
|
// TestRestoreListCollection verifies Graph Restore API for the List Collection
|
||||||
func (suite *SharePointCollectionSuite) TestRestoreListCollection() {
|
func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
|||||||
@ -30,11 +30,11 @@ func DataCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
itemClient *http.Client,
|
itemClient *http.Client,
|
||||||
selector selectors.Selector,
|
selector selectors.Selector,
|
||||||
tenantID string,
|
creds account.M365Config,
|
||||||
serv graph.Servicer,
|
serv graph.Servicer,
|
||||||
su statusUpdater,
|
su statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
b, err := selector.ToSharePointBackup()
|
b, err := selector.ToSharePointBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "sharePointDataCollection: parsing selector")
|
return nil, nil, errors.Wrap(err, "sharePointDataCollection: parsing selector")
|
||||||
@ -42,7 +42,7 @@ func DataCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
site = b.DiscreteOwner
|
site = b.DiscreteOwner
|
||||||
collections = []data.Collection{}
|
collections = []data.BackupCollection{}
|
||||||
errs error
|
errs error
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -54,14 +54,14 @@ func DataCollections(
|
|||||||
defer closer()
|
defer closer()
|
||||||
defer close(foldersComplete)
|
defer close(foldersComplete)
|
||||||
|
|
||||||
var spcs []data.Collection
|
var spcs []data.BackupCollection
|
||||||
|
|
||||||
switch scope.Category().PathType() {
|
switch scope.Category().PathType() {
|
||||||
case path.ListsCategory:
|
case path.ListsCategory:
|
||||||
spcs, err = collectLists(
|
spcs, err = collectLists(
|
||||||
ctx,
|
ctx,
|
||||||
serv,
|
serv,
|
||||||
tenantID,
|
creds.AzureTenantID,
|
||||||
site,
|
site,
|
||||||
su,
|
su,
|
||||||
ctrlOpts)
|
ctrlOpts)
|
||||||
@ -74,7 +74,7 @@ func DataCollections(
|
|||||||
ctx,
|
ctx,
|
||||||
itemClient,
|
itemClient,
|
||||||
serv,
|
serv,
|
||||||
tenantID,
|
creds.AzureTenantID,
|
||||||
site,
|
site,
|
||||||
scope,
|
scope,
|
||||||
su,
|
su,
|
||||||
@ -82,6 +82,17 @@ func DataCollections(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, support.WrapAndAppend(site, err, errs)
|
return nil, nil, support.WrapAndAppend(site, err, errs)
|
||||||
}
|
}
|
||||||
|
case path.PagesCategory:
|
||||||
|
spcs, err = collectPages(
|
||||||
|
ctx,
|
||||||
|
creds,
|
||||||
|
serv,
|
||||||
|
site,
|
||||||
|
su,
|
||||||
|
ctrlOpts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, support.WrapAndAppend(site, err, errs)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, spcs...)
|
collections = append(collections, spcs...)
|
||||||
@ -97,10 +108,10 @@ func collectLists(
|
|||||||
tenantID, siteID string,
|
tenantID, siteID string,
|
||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections")
|
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections")
|
||||||
|
|
||||||
spcs := make([]data.Collection, 0)
|
spcs := make([]data.BackupCollection, 0)
|
||||||
|
|
||||||
tuples, err := preFetchLists(ctx, serv, siteID)
|
tuples, err := preFetchLists(ctx, serv, siteID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -118,7 +129,7 @@ func collectLists(
|
|||||||
return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID)
|
return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID)
|
||||||
}
|
}
|
||||||
|
|
||||||
collection := NewCollection(dir, serv, updater.UpdateStatus)
|
collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts)
|
||||||
collection.AddJob(tuple.id)
|
collection.AddJob(tuple.id)
|
||||||
|
|
||||||
spcs = append(spcs, collection)
|
spcs = append(spcs, collection)
|
||||||
@ -137,9 +148,9 @@ func collectLibraries(
|
|||||||
scope selectors.SharePointScope,
|
scope selectors.SharePointScope,
|
||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, map[string]struct{}, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
var (
|
var (
|
||||||
collections = []data.Collection{}
|
collections = []data.BackupCollection{}
|
||||||
errs error
|
errs error
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -166,24 +177,24 @@ func collectLibraries(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// collectPages constructs a sharepoint Collections struct and Get()s the associated
|
// collectPages constructs a sharepoint Collections struct and Get()s the associated
|
||||||
// M365 IDs for the associated Pages
|
// M365 IDs for the associated Pages.
|
||||||
func collectPages(
|
func collectPages(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
creds account.M365Config,
|
creds account.M365Config,
|
||||||
serv graph.Servicer,
|
serv graph.Servicer,
|
||||||
tenantID, siteID string,
|
siteID string,
|
||||||
scope selectors.SharePointScope,
|
|
||||||
updater statusUpdater,
|
updater statusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Pages collections")
|
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Pages collections")
|
||||||
|
|
||||||
spcs := make([]data.Collection, 0)
|
spcs := make([]data.BackupCollection, 0)
|
||||||
|
|
||||||
// make the betaClient
|
// make the betaClient
|
||||||
|
// Need to receive From DataCollection Call
|
||||||
adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
|
adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "adapter for betaservice not created")
|
return nil, errors.New("unable to create adapter w/ env credentials")
|
||||||
}
|
}
|
||||||
|
|
||||||
betaService := api.NewBetaService(adpt)
|
betaService := api.NewBetaService(adpt)
|
||||||
@ -196,7 +207,7 @@ func collectPages(
|
|||||||
for _, tuple := range tuples {
|
for _, tuple := range tuples {
|
||||||
dir, err := path.Builder{}.Append(tuple.Name).
|
dir, err := path.Builder{}.Append(tuple.Name).
|
||||||
ToDataLayerSharePointPath(
|
ToDataLayerSharePointPath(
|
||||||
tenantID,
|
creds.AzureTenantID,
|
||||||
siteID,
|
siteID,
|
||||||
path.PagesCategory,
|
path.PagesCategory,
|
||||||
false)
|
false)
|
||||||
@ -204,7 +215,7 @@ func collectPages(
|
|||||||
return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID)
|
return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID)
|
||||||
}
|
}
|
||||||
|
|
||||||
collection := NewCollection(dir, serv, updater.UpdateStatus)
|
collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts)
|
||||||
collection.betaService = betaService
|
collection.betaService = betaService
|
||||||
collection.AddJob(tuple.ID)
|
collection.AddJob(tuple.ID)
|
||||||
|
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
@ -100,7 +101,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() {
|
|||||||
&MockGraphService{},
|
&MockGraphService{},
|
||||||
nil,
|
nil,
|
||||||
control.Options{})
|
control.Options{})
|
||||||
err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths, excluded)
|
err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths, excluded, true)
|
||||||
test.expect(t, err)
|
test.expect(t, err)
|
||||||
assert.Equal(t, len(test.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
assert.Equal(t, len(test.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
||||||
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
||||||
@ -128,3 +129,38 @@ func driveItem(name string, path string, isFile bool) models.DriveItemable {
|
|||||||
|
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SharePointPagesSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSharePointPagesSuite(t *testing.T) {
|
||||||
|
tester.RunOnAny(
|
||||||
|
t,
|
||||||
|
tester.CorsoCITests,
|
||||||
|
tester.CorsoGraphConnectorTests,
|
||||||
|
tester.CorsoGraphConnectorSharePointTests)
|
||||||
|
suite.Run(t, new(SharePointPagesSuite))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SharePointPagesSuite) TestCollectPages() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
siteID := tester.M365SiteID(t)
|
||||||
|
a := tester.NewM365Account(t)
|
||||||
|
account, err := a.M365Config()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
col, err := collectPages(
|
||||||
|
ctx,
|
||||||
|
account,
|
||||||
|
nil,
|
||||||
|
siteID,
|
||||||
|
&MockGraphService{},
|
||||||
|
control.Defaults(),
|
||||||
|
)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotEmpty(t, col)
|
||||||
|
}
|
||||||
|
|||||||
@ -8,19 +8,20 @@ func _() {
|
|||||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
// Re-run the stringer command to generate them again.
|
// Re-run the stringer command to generate them again.
|
||||||
var x [1]struct{}
|
var x [1]struct{}
|
||||||
_ = x[Unknown-1]
|
_ = x[Unknown-2]
|
||||||
_ = x[List-2]
|
_ = x[List-3]
|
||||||
_ = x[Drive-3]
|
_ = x[Drive-4]
|
||||||
|
_ = x[Pages-5]
|
||||||
}
|
}
|
||||||
|
|
||||||
const _DataCategory_name = "UnknownListDrive"
|
const _DataCategory_name = "UnknownListDrivePages"
|
||||||
|
|
||||||
var _DataCategory_index = [...]uint8{0, 7, 11, 16}
|
var _DataCategory_index = [...]uint8{0, 7, 11, 16, 21}
|
||||||
|
|
||||||
func (i DataCategory) String() string {
|
func (i DataCategory) String() string {
|
||||||
i -= 1
|
i -= 2
|
||||||
if i < 0 || i >= DataCategory(len(_DataCategory_index)-1) {
|
if i < 0 || i >= DataCategory(len(_DataCategory_index)-1) {
|
||||||
return "DataCategory(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
return "DataCategory(" + strconv.FormatInt(int64(i+2), 10) + ")"
|
||||||
}
|
}
|
||||||
return _DataCategory_name[_DataCategory_index[i]:_DataCategory_index[i+1]]
|
return _DataCategory_name[_DataCategory_index[i]:_DataCategory_index[i+1]]
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,6 +17,16 @@ import (
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
type MockGraphService struct{}
|
type MockGraphService struct{}
|
||||||
|
|
||||||
|
type MockUpdater struct {
|
||||||
|
UpdateState func(*support.ConnectorOperationStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mu *MockUpdater) UpdateStatus(input *support.ConnectorOperationStatus) {
|
||||||
|
if mu.UpdateState != nil {
|
||||||
|
mu.UpdateState(input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------
|
//------------------------------------------------------------
|
||||||
// Interface Functions: @See graph.Service
|
// Interface Functions: @See graph.Service
|
||||||
//------------------------------------------------------------
|
//------------------------------------------------------------
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package sharepoint
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
mssite "github.com/microsoftgraph/msgraph-sdk-go/sites"
|
mssite "github.com/microsoftgraph/msgraph-sdk-go/sites"
|
||||||
@ -91,33 +92,65 @@ func loadSiteLists(
|
|||||||
listIDs []string,
|
listIDs []string,
|
||||||
) ([]models.Listable, error) {
|
) ([]models.Listable, error) {
|
||||||
var (
|
var (
|
||||||
results = make([]models.Listable, 0)
|
results = make([]models.Listable, 0)
|
||||||
errs error
|
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
||||||
|
errs error
|
||||||
|
wg sync.WaitGroup
|
||||||
|
m sync.Mutex
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, listID := range listIDs {
|
defer close(semaphoreCh)
|
||||||
entry, err := gs.Client().SitesById(siteID).ListsById(listID).Get(ctx, nil)
|
|
||||||
if err != nil {
|
errUpdater := func(id string, err error) {
|
||||||
errs = support.WrapAndAppend(
|
m.Lock()
|
||||||
listID,
|
errs = support.WrapAndAppend(id, err, errs)
|
||||||
errors.Wrap(err, support.ConnectorStackErrorTrace(err)),
|
m.Unlock()
|
||||||
errs,
|
}
|
||||||
)
|
|
||||||
}
|
updateLists := func(list models.Listable) {
|
||||||
|
m.Lock()
|
||||||
|
results = append(results, list)
|
||||||
|
m.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, listID := range listIDs {
|
||||||
|
semaphoreCh <- struct{}{}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func(id string) {
|
||||||
|
defer wg.Done()
|
||||||
|
defer func() { <-semaphoreCh }()
|
||||||
|
|
||||||
|
var (
|
||||||
|
entry models.Listable
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
err = graph.RunWithRetry(func() error {
|
||||||
|
entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(id, support.ConnectorStackErrorTraceWrap(err, ""))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id)
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(id, errors.Wrap(err, "unable to fetchRelationships during loadSiteLists"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, listID)
|
|
||||||
if err == nil {
|
|
||||||
entry.SetColumns(cols)
|
entry.SetColumns(cols)
|
||||||
entry.SetContentTypes(cTypes)
|
entry.SetContentTypes(cTypes)
|
||||||
entry.SetItems(lItems)
|
entry.SetItems(lItems)
|
||||||
} else {
|
updateLists(entry)
|
||||||
errs = support.WrapAndAppend("unable to fetchRelationships during loadSiteLists", err, errs)
|
}(listID)
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
results = append(results, entry)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
return nil, errs
|
return nil, errs
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,11 +9,14 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -27,7 +30,7 @@ import (
|
|||||||
// -- Switch:
|
// -- Switch:
|
||||||
// ---- Libraries restored via the same workflow as oneDrive
|
// ---- Libraries restored via the same workflow as oneDrive
|
||||||
// ---- Lists call RestoreCollection()
|
// ---- Lists call RestoreCollection()
|
||||||
// ----> for each data.Stream within Collection.Items()
|
// ----> for each data.Stream within RestoreCollection.Items()
|
||||||
// ----> restoreListItems() is called
|
// ----> restoreListItems() is called
|
||||||
// Restored List can be found in the Site's `Site content` page
|
// Restored List can be found in the Site's `Site content` page
|
||||||
// Restored Libraries can be found within the Site's `Pages` page
|
// Restored Libraries can be found within the Site's `Pages` page
|
||||||
@ -37,9 +40,10 @@ import (
|
|||||||
func RestoreCollections(
|
func RestoreCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
backupVersion int,
|
backupVersion int,
|
||||||
|
creds account.M365Config,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
dcs []data.Collection,
|
dcs []data.RestoreCollection,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
@ -74,7 +78,7 @@ func RestoreCollections(
|
|||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
case path.ListsCategory:
|
case path.ListsCategory:
|
||||||
metrics, canceled = RestoreCollection(
|
metrics, canceled = RestoreListCollection(
|
||||||
ctx,
|
ctx,
|
||||||
service,
|
service,
|
||||||
dc,
|
dc,
|
||||||
@ -83,11 +87,14 @@ func RestoreCollections(
|
|||||||
errUpdater,
|
errUpdater,
|
||||||
)
|
)
|
||||||
case path.PagesCategory:
|
case path.PagesCategory:
|
||||||
errorMessage := fmt.Sprintf("restore of %s not supported", dc.FullPath().Category())
|
metrics, canceled = RestorePageCollection(
|
||||||
logger.Ctx(ctx).Error(errorMessage)
|
ctx,
|
||||||
|
creds,
|
||||||
return nil, errors.New(errorMessage)
|
dc,
|
||||||
|
dest.ContainerName,
|
||||||
|
deets,
|
||||||
|
errUpdater,
|
||||||
|
)
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("category %s not supported", dc.FullPath().Category())
|
return nil, errors.Errorf("category %s not supported", dc.FullPath().Category())
|
||||||
}
|
}
|
||||||
@ -209,15 +216,15 @@ func restoreListItem(
|
|||||||
return dii, nil
|
return dii, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RestoreCollection(
|
func RestoreListCollection(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
dc data.Collection,
|
dc data.RestoreCollection,
|
||||||
restoreContainerName string,
|
restoreContainerName string,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errUpdater func(string, error),
|
errUpdater func(string, error),
|
||||||
) (support.CollectionMetrics, bool) {
|
) (support.CollectionMetrics, bool) {
|
||||||
ctx, end := D.Span(ctx, "gc:sharepoint:restoreCollection", D.Label("path", dc.FullPath()))
|
ctx, end := D.Span(ctx, "gc:sharepoint:restoreListCollection", D.Label("path", dc.FullPath()))
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -225,7 +232,7 @@ func RestoreCollection(
|
|||||||
directory = dc.FullPath()
|
directory = dc.FullPath()
|
||||||
)
|
)
|
||||||
|
|
||||||
trace.Log(ctx, "gc:sharepoint:restoreCollection", directory.String())
|
trace.Log(ctx, "gc:sharepoint:restoreListCollection", directory.String())
|
||||||
siteID := directory.ResourceOwner()
|
siteID := directory.ResourceOwner()
|
||||||
|
|
||||||
// Restore items from the collection
|
// Restore items from the collection
|
||||||
@ -276,3 +283,83 @@ func RestoreCollection(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestorePageCollection handles restoration of an individual site page collection.
|
||||||
|
// returns:
|
||||||
|
// - the collection's item and byte count metrics
|
||||||
|
// - the context cancellation station. True iff context is canceled.
|
||||||
|
func RestorePageCollection(
|
||||||
|
ctx context.Context,
|
||||||
|
creds account.M365Config,
|
||||||
|
dc data.RestoreCollection,
|
||||||
|
restoreContainerName string,
|
||||||
|
deets *details.Builder,
|
||||||
|
errUpdater func(string, error),
|
||||||
|
) (support.CollectionMetrics, bool) {
|
||||||
|
ctx, end := D.Span(ctx, "gc:sharepoint:restorePageCollection", D.Label("path", dc.FullPath()))
|
||||||
|
defer end()
|
||||||
|
|
||||||
|
var (
|
||||||
|
metrics = support.CollectionMetrics{}
|
||||||
|
directory = dc.FullPath()
|
||||||
|
)
|
||||||
|
|
||||||
|
adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
|
||||||
|
if err != nil {
|
||||||
|
return metrics, false
|
||||||
|
}
|
||||||
|
|
||||||
|
service := discover.NewBetaService(adpt)
|
||||||
|
|
||||||
|
trace.Log(ctx, "gc:sharepoint:restorePageCollection", directory.String())
|
||||||
|
siteID := directory.ResourceOwner()
|
||||||
|
|
||||||
|
// Restore items from collection
|
||||||
|
items := dc.Items()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
errUpdater("context canceled", ctx.Err())
|
||||||
|
return metrics, true
|
||||||
|
|
||||||
|
case itemData, ok := <-items:
|
||||||
|
if !ok {
|
||||||
|
return metrics, false
|
||||||
|
}
|
||||||
|
metrics.Objects++
|
||||||
|
|
||||||
|
itemInfo, err := api.RestoreSitePage(
|
||||||
|
ctx,
|
||||||
|
service,
|
||||||
|
itemData,
|
||||||
|
siteID,
|
||||||
|
restoreContainerName,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(itemData.UUID(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.TotalBytes += itemInfo.SharePoint.Size
|
||||||
|
|
||||||
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
|
if err != nil {
|
||||||
|
logger.Ctx(ctx).Errorw("transforming item to full path", "error", err)
|
||||||
|
errUpdater(itemData.UUID(), err)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
deets.Add(
|
||||||
|
itemPath.String(),
|
||||||
|
itemPath.ShortRef(),
|
||||||
|
"",
|
||||||
|
true,
|
||||||
|
itemInfo,
|
||||||
|
)
|
||||||
|
|
||||||
|
metrics.Successes++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -8,29 +8,8 @@ import (
|
|||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
msgraph_errors "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
msgraph_errors "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GraphConnector has two types of errors that are exported
|
|
||||||
// RecoverableGCError is a query error that can be overcome with time
|
|
||||||
type RecoverableGCError struct {
|
|
||||||
common.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetRecoverableError(e error) error {
|
|
||||||
return RecoverableGCError{*common.EncapsulateError(e)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NonRecoverableGCError is a permanent query error
|
|
||||||
type NonRecoverableGCError struct {
|
|
||||||
common.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetNonRecoverableError(e error) error {
|
|
||||||
return NonRecoverableGCError{*common.EncapsulateError(e)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapErrorAndAppend helper function used to attach identifying information to an error
|
// WrapErrorAndAppend helper function used to attach identifying information to an error
|
||||||
// and return it as a mulitierror
|
// and return it as a mulitierror
|
||||||
func WrapAndAppend(identifier string, e, previous error) error {
|
func WrapAndAppend(identifier string, e, previous error) error {
|
||||||
@ -101,7 +80,7 @@ func ConnectorStackErrorTraceWrap(e error, prefix string) error {
|
|||||||
return errors.Wrap(e, prefix)
|
return errors.Wrap(e, prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnectorStackErrorTracew is a helper function that extracts
|
// ConnectorStackErrorTrace is a helper function that extracts
|
||||||
// the stack trace for oDataErrors, if the error has one.
|
// the stack trace for oDataErrors, if the error has one.
|
||||||
func ConnectorStackErrorTrace(e error) string {
|
func ConnectorStackErrorTrace(e error) string {
|
||||||
eMessage := ""
|
eMessage := ""
|
||||||
|
|||||||
@ -41,26 +41,6 @@ func (suite *GraphConnectorErrorSuite) TestWrapAndAppend_OnVar() {
|
|||||||
suite.True(strings.Contains(received.Error(), id))
|
suite.True(strings.Contains(received.Error(), id))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *GraphConnectorErrorSuite) TestAsRecoverableError() {
|
|
||||||
err := assert.AnError
|
|
||||||
|
|
||||||
rcv := RecoverableGCError{}
|
|
||||||
suite.False(errors.As(err, &rcv))
|
|
||||||
|
|
||||||
aRecoverable := SetRecoverableError(err)
|
|
||||||
suite.True(errors.As(aRecoverable, &rcv))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *GraphConnectorErrorSuite) TestAsNonRecoverableError() {
|
|
||||||
err := assert.AnError
|
|
||||||
|
|
||||||
noRecover := NonRecoverableGCError{}
|
|
||||||
suite.False(errors.As(err, &noRecover))
|
|
||||||
|
|
||||||
nonRecoverable := SetNonRecoverableError(err)
|
|
||||||
suite.True(errors.As(nonRecoverable, &noRecover))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *GraphConnectorErrorSuite) TestWrapAndAppend_Add3() {
|
func (suite *GraphConnectorErrorSuite) TestWrapAndAppend_Add3() {
|
||||||
errOneTwo := WrapAndAppend("user1", assert.AnError, assert.AnError)
|
errOneTwo := WrapAndAppend("user1", assert.AnError, assert.AnError)
|
||||||
combined := WrapAndAppend("unix36", assert.AnError, errOneTwo)
|
combined := WrapAndAppend("unix36", assert.AnError, errOneTwo)
|
||||||
|
|||||||
@ -3,11 +3,12 @@ package support
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
|
||||||
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||||
js "github.com/microsoft/kiota-serialization-json-go"
|
js "github.com/microsoft/kiota-serialization-json-go"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateFromBytes helper function to initialize m365 object form bytes.
|
// CreateFromBytes helper function to initialize m365 object form bytes.
|
||||||
|
|||||||
@ -7,7 +7,11 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
const itemAttachment = "#microsoft.graph.itemAttachment"
|
//==========================================================
|
||||||
|
// m365Transform.go contains utility functions that
|
||||||
|
// either add, modify, or remove fields from M365
|
||||||
|
// objects for interacton with M365 services
|
||||||
|
//=========================================================
|
||||||
|
|
||||||
// CloneMessageableFields places data from original data into new message object.
|
// CloneMessageableFields places data from original data into new message object.
|
||||||
// SingleLegacyValueProperty is not populated during this operation
|
// SingleLegacyValueProperty is not populated during this operation
|
||||||
@ -282,14 +286,36 @@ func cloneColumnDefinitionable(orig models.ColumnDefinitionable) models.ColumnDe
|
|||||||
return newColumn
|
return newColumn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ===============================================================================================
|
||||||
|
// Sanitization section
|
||||||
|
// Set of functions that support ItemAttachemtable object restoration.
|
||||||
|
// These attachments can be nested as well as possess one of the other
|
||||||
|
// reference types. To ensure proper upload, each interior`item` requires
|
||||||
|
// that certain fields be modified.
|
||||||
|
// ItemAttachment:
|
||||||
|
// https://learn.microsoft.com/en-us/graph/api/resources/itemattachment?view=graph-rest-1.0
|
||||||
|
// https://learn.microsoft.com/en-us/exchange/client-developer/exchange-web-services/attachments-and-ews-in-exchange
|
||||||
|
// https://learn.microsoft.com/en-us/exchange/client-developer/exchange-web-services/folders-and-items-in-ews-in-exchange
|
||||||
|
// ===============================================================================================
|
||||||
|
// M365 Models possess a field, OData.Type which indicate
|
||||||
|
// the represent the intended model in string format.
|
||||||
|
// The constants listed here identify the supported itemAttachments
|
||||||
|
// currently supported for Restore operations.
|
||||||
|
// itemAttachments
|
||||||
|
// support ODataType values
|
||||||
|
//
|
||||||
|
//nolint:lll
|
||||||
|
const (
|
||||||
|
itemAttachment = "#microsoft.graph.itemAttachment"
|
||||||
|
eventItemType = "#microsoft.graph.event"
|
||||||
|
mailItemType = "#microsoft.graph.message"
|
||||||
|
contactItemType = "#microsoft.graph.contact"
|
||||||
|
)
|
||||||
|
|
||||||
// ToItemAttachment transforms internal item, OutlookItemables, into
|
// ToItemAttachment transforms internal item, OutlookItemables, into
|
||||||
// objects that are able to be uploaded into M365.
|
// objects that are able to be uploaded into M365.
|
||||||
// Supported Internal Items:
|
|
||||||
// - Events
|
|
||||||
func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) {
|
func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) {
|
||||||
transform, ok := orig.(models.ItemAttachmentable)
|
transform, ok := orig.(models.ItemAttachmentable)
|
||||||
supported := "#microsoft.graph.event"
|
|
||||||
|
|
||||||
if !ok { // Shouldn't ever happen
|
if !ok { // Shouldn't ever happen
|
||||||
return nil, fmt.Errorf("transforming attachment to item attachment")
|
return nil, fmt.Errorf("transforming attachment to item attachment")
|
||||||
}
|
}
|
||||||
@ -298,7 +324,14 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error)
|
|||||||
itemType := item.GetOdataType()
|
itemType := item.GetOdataType()
|
||||||
|
|
||||||
switch *itemType {
|
switch *itemType {
|
||||||
case supported:
|
case contactItemType:
|
||||||
|
contact := item.(models.Contactable)
|
||||||
|
revised := sanitizeContact(contact)
|
||||||
|
|
||||||
|
transform.SetItem(revised)
|
||||||
|
|
||||||
|
return transform, nil
|
||||||
|
case eventItemType:
|
||||||
event := item.(models.Eventable)
|
event := item.(models.Eventable)
|
||||||
|
|
||||||
newEvent, err := sanitizeEvent(event)
|
newEvent, err := sanitizeEvent(event)
|
||||||
@ -308,12 +341,54 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error)
|
|||||||
|
|
||||||
transform.SetItem(newEvent)
|
transform.SetItem(newEvent)
|
||||||
|
|
||||||
|
return transform, nil
|
||||||
|
case mailItemType:
|
||||||
|
message := item.(models.Messageable)
|
||||||
|
|
||||||
|
newMessage, err := sanitizeMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
transform.SetItem(newMessage)
|
||||||
|
|
||||||
return transform, nil
|
return transform, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("exiting ToItemAttachment: %s not supported", *itemType)
|
return nil, fmt.Errorf("exiting ToItemAttachment: %s not supported", *itemType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO #2428 (dadam39): re-apply nested attachments for itemAttachments
|
||||||
|
// func sanitizeAttachments(attached []models.Attachmentable) ([]models.Attachmentable, error) {
|
||||||
|
// attachments := make([]models.Attachmentable, len(attached))
|
||||||
|
|
||||||
|
// for _, ax := range attached {
|
||||||
|
// if *ax.GetOdataType() == itemAttachment {
|
||||||
|
// newAttachment, err := ToItemAttachment(ax)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
|
||||||
|
// attachments = append(attachments, newAttachment)
|
||||||
|
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
|
||||||
|
// attachments = append(attachments, ax)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return attachments, nil
|
||||||
|
// }
|
||||||
|
|
||||||
|
// sanitizeContact removes fields which prevent a Contact from
|
||||||
|
// being uploaded as an attachment.
|
||||||
|
func sanitizeContact(orig models.Contactable) models.Contactable {
|
||||||
|
orig.SetParentFolderId(nil)
|
||||||
|
orig.SetAdditionalData(nil)
|
||||||
|
|
||||||
|
return orig
|
||||||
|
}
|
||||||
|
|
||||||
// sanitizeEvent transfers data into event object and
|
// sanitizeEvent transfers data into event object and
|
||||||
// removes unique IDs from the M365 object
|
// removes unique IDs from the M365 object
|
||||||
func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
||||||
@ -324,7 +399,9 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
|||||||
newEvent.SetCalendar(orig.GetCalendar())
|
newEvent.SetCalendar(orig.GetCalendar())
|
||||||
newEvent.SetCreatedDateTime(orig.GetCreatedDateTime())
|
newEvent.SetCreatedDateTime(orig.GetCreatedDateTime())
|
||||||
newEvent.SetEnd(orig.GetEnd())
|
newEvent.SetEnd(orig.GetEnd())
|
||||||
newEvent.SetHasAttachments(orig.GetHasAttachments())
|
// TODO: dadams39 Nested attachments not supported
|
||||||
|
// Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61
|
||||||
|
newEvent.SetHasAttachments(nil)
|
||||||
newEvent.SetHideAttendees(orig.GetHideAttendees())
|
newEvent.SetHideAttendees(orig.GetHideAttendees())
|
||||||
newEvent.SetImportance(orig.GetImportance())
|
newEvent.SetImportance(orig.GetImportance())
|
||||||
newEvent.SetIsAllDay(orig.GetIsAllDay())
|
newEvent.SetIsAllDay(orig.GetIsAllDay())
|
||||||
@ -337,7 +414,7 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
|||||||
newEvent.SetSubject(orig.GetSubject())
|
newEvent.SetSubject(orig.GetSubject())
|
||||||
newEvent.SetType(orig.GetType())
|
newEvent.SetType(orig.GetType())
|
||||||
|
|
||||||
// Sanitation
|
// Sanitation NOTE
|
||||||
// isDraft and isOrganizer *bool ptr's have to be removed completely
|
// isDraft and isOrganizer *bool ptr's have to be removed completely
|
||||||
// from JSON in order for POST method to succeed.
|
// from JSON in order for POST method to succeed.
|
||||||
// Current as of 2/2/2023
|
// Current as of 2/2/2023
|
||||||
@ -346,25 +423,34 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
|||||||
newEvent.SetIsDraft(nil)
|
newEvent.SetIsDraft(nil)
|
||||||
newEvent.SetAdditionalData(orig.GetAdditionalData())
|
newEvent.SetAdditionalData(orig.GetAdditionalData())
|
||||||
|
|
||||||
attached := orig.GetAttachments()
|
// TODO #2428 (dadam39): re-apply nested attachments for itemAttachments
|
||||||
attachments := make([]models.Attachmentable, len(attached))
|
// Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61
|
||||||
|
// attachments, err := sanitizeAttachments(message.GetAttachments())
|
||||||
for _, ax := range attached {
|
// if err != nil {
|
||||||
if *ax.GetOdataType() == itemAttachment {
|
// return nil, err
|
||||||
newAttachment, err := ToItemAttachment(ax)
|
// }
|
||||||
if err != nil {
|
newEvent.SetAttachments(nil)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
attachments = append(attachments, newAttachment)
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
attachments = append(attachments, ax)
|
|
||||||
}
|
|
||||||
|
|
||||||
newEvent.SetAttachments(attachments)
|
|
||||||
|
|
||||||
return newEvent, nil
|
return newEvent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sanitizeMessage(orig models.Messageable) (models.Messageable, error) {
|
||||||
|
message := ToMessage(orig)
|
||||||
|
|
||||||
|
// TODO #2428 (dadam39): re-apply nested attachments for itemAttachments
|
||||||
|
// Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61
|
||||||
|
// attachments, err := sanitizeAttachments(message.GetAttachments())
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
message.SetAttachments(nil)
|
||||||
|
|
||||||
|
// The following fields are set to nil to
|
||||||
|
// not interfere with M365 guard checks.
|
||||||
|
message.SetHasAttachments(nil)
|
||||||
|
message.SetParentFolderId(nil)
|
||||||
|
message.SetInternetMessageHeaders(nil)
|
||||||
|
message.SetIsDraft(nil)
|
||||||
|
|
||||||
|
return message, nil
|
||||||
|
}
|
||||||
|
|||||||
@ -1,6 +1,8 @@
|
|||||||
package data
|
package data
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -12,6 +14,8 @@ import (
|
|||||||
// standard ifaces
|
// standard ifaces
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
var ErrNotFound = errors.New("not found")
|
||||||
|
|
||||||
type CollectionState int
|
type CollectionState int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -21,8 +25,8 @@ const (
|
|||||||
DeletedState
|
DeletedState
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Collection represents a compilation of data from the
|
// A Collection represents the set of data within a single logical location
|
||||||
// same type application (e.g. mail)
|
// denoted by FullPath.
|
||||||
type Collection interface {
|
type Collection interface {
|
||||||
// Items returns a channel from which items in the collection can be read.
|
// Items returns a channel from which items in the collection can be read.
|
||||||
// Each returned struct contains the next item in the collection
|
// Each returned struct contains the next item in the collection
|
||||||
@ -30,10 +34,13 @@ type Collection interface {
|
|||||||
// an unrecoverable error caused an early termination in the sender.
|
// an unrecoverable error caused an early termination in the sender.
|
||||||
Items() <-chan Stream
|
Items() <-chan Stream
|
||||||
// FullPath returns a path struct that acts as a metadata tag for this
|
// FullPath returns a path struct that acts as a metadata tag for this
|
||||||
// DataCollection. Returned items should be ordered from most generic to least
|
// Collection.
|
||||||
// generic. For example, a DataCollection for emails from a specific user
|
|
||||||
// would be {"<tenant id>", "exchange", "<user ID>", "emails"}.
|
|
||||||
FullPath() path.Path
|
FullPath() path.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupCollection is an extension of Collection that is used during backups.
|
||||||
|
type BackupCollection interface {
|
||||||
|
Collection
|
||||||
// PreviousPath returns the path.Path this collection used to reside at
|
// PreviousPath returns the path.Path this collection used to reside at
|
||||||
// (according to the M365 ID for the container) if the collection was moved or
|
// (according to the M365 ID for the container) if the collection was moved or
|
||||||
// renamed. Returns nil if the collection is new.
|
// renamed. Returns nil if the collection is new.
|
||||||
@ -58,6 +65,25 @@ type Collection interface {
|
|||||||
DoNotMergeItems() bool
|
DoNotMergeItems() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestoreCollection is an extension of Collection that is used during restores.
|
||||||
|
type RestoreCollection interface {
|
||||||
|
Collection
|
||||||
|
// Fetch retrieves an item with the given name from the Collection if it
|
||||||
|
// exists. Items retrieved with Fetch may still appear in the channel returned
|
||||||
|
// by Items().
|
||||||
|
Fetch(ctx context.Context, name string) (Stream, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotFoundRestoreCollection is a wrapper for a Collection that returns
|
||||||
|
// ErrNotFound for all Fetch calls.
|
||||||
|
type NotFoundRestoreCollection struct {
|
||||||
|
Collection
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c NotFoundRestoreCollection) Fetch(context.Context, string) (Stream, error) {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
// Stream represents a single item within a Collection
|
// Stream represents a single item within a Collection
|
||||||
// that can be consumed as a stream (it embeds io.Reader)
|
// that can be consumed as a stream (it embeds io.Reader)
|
||||||
type Stream interface {
|
type Stream interface {
|
||||||
@ -87,37 +113,20 @@ type StreamModTime interface {
|
|||||||
ModTime() time.Time
|
ModTime() time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// StateOf lets us figure out the state of the collection from the
|
||||||
// functionality
|
// previous and current path
|
||||||
// ------------------------------------------------------------------------------------------------
|
func StateOf(prev, curr path.Path) CollectionState {
|
||||||
|
if curr == nil || len(curr.String()) == 0 {
|
||||||
// ResourceOwnerSet extracts the set of unique resource owners from the
|
return DeletedState
|
||||||
// slice of Collections.
|
|
||||||
func ResourceOwnerSet(cs []Collection) []string {
|
|
||||||
rs := map[string]struct{}{}
|
|
||||||
|
|
||||||
for _, c := range cs {
|
|
||||||
fp := c.FullPath()
|
|
||||||
if fp == nil {
|
|
||||||
// Deleted collections have their full path set to nil but the previous
|
|
||||||
// path will be populated.
|
|
||||||
fp = c.PreviousPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if fp == nil {
|
|
||||||
// This should not happen, but keep us from hitting a nil pointer
|
|
||||||
// exception if it does somehow occur. Statistics will be off though.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rs[fp.ResourceOwner()] = struct{}{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rss := make([]string, 0, len(rs))
|
if prev == nil || len(prev.String()) == 0 {
|
||||||
|
return NewState
|
||||||
for k := range rs {
|
|
||||||
rss = append(rss, k)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return rss
|
if curr.Folder() != prev.Folder() {
|
||||||
|
return MovedState
|
||||||
|
}
|
||||||
|
|
||||||
|
return NotMovedState
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,89 +10,57 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockColl struct {
|
type DataCollectionSuite struct {
|
||||||
p path.Path
|
|
||||||
prevP path.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc mockColl) Items() <-chan Stream {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc mockColl) FullPath() path.Path {
|
|
||||||
return mc.p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc mockColl) PreviousPath() path.Path {
|
|
||||||
return mc.prevP
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc mockColl) State() CollectionState {
|
|
||||||
return NewState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc mockColl) DoNotMergeItems() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type CollectionSuite struct {
|
|
||||||
suite.Suite
|
suite.Suite
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
func TestDataCollectionSuite(t *testing.T) {
|
||||||
// tests
|
suite.Run(t, new(DataCollectionSuite))
|
||||||
// ------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func TestCollectionSuite(t *testing.T) {
|
|
||||||
suite.Run(t, new(CollectionSuite))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *CollectionSuite) TestResourceOwnerSet() {
|
func (suite *DataCollectionSuite) TestStateOf() {
|
||||||
t := suite.T()
|
fooP, err := path.Builder{}.
|
||||||
toColl := func(t *testing.T, resource string) Collection {
|
Append("foo").
|
||||||
p, err := path.Builder{}.
|
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
||||||
Append("foo").
|
require.NoError(suite.T(), err)
|
||||||
ToDataLayerExchangePathForCategory("tid", resource, path.EventsCategory, false)
|
barP, err := path.Builder{}.
|
||||||
require.NoError(t, err)
|
Append("bar").
|
||||||
|
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
||||||
return mockColl{p, nil}
|
require.NoError(suite.T(), err)
|
||||||
}
|
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
input []Collection
|
prev path.Path
|
||||||
expect []string
|
curr path.Path
|
||||||
|
expect CollectionState
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "empty",
|
name: "new",
|
||||||
input: []Collection{},
|
curr: fooP,
|
||||||
expect: []string{},
|
expect: NewState,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "nil",
|
name: "not moved",
|
||||||
input: nil,
|
prev: fooP,
|
||||||
expect: []string{},
|
curr: fooP,
|
||||||
|
expect: NotMovedState,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "single resource",
|
name: "moved",
|
||||||
input: []Collection{toColl(t, "fnords")},
|
prev: fooP,
|
||||||
expect: []string{"fnords"},
|
curr: barP,
|
||||||
|
expect: MovedState,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "multiple resource",
|
name: "deleted",
|
||||||
input: []Collection{toColl(t, "fnords"), toColl(t, "smarfs")},
|
prev: fooP,
|
||||||
expect: []string{"fnords", "smarfs"},
|
expect: DeletedState,
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "duplciate resources",
|
|
||||||
input: []Collection{toColl(t, "fnords"), toColl(t, "smarfs"), toColl(t, "fnords")},
|
|
||||||
expect: []string{"fnords", "smarfs"},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
rs := ResourceOwnerSet(test.input)
|
state := StateOf(test.prev, test.curr)
|
||||||
assert.ElementsMatch(t, test.expect, rs)
|
assert.Equal(t, test.expect, state)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,7 +7,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/alcionai/clues"
|
||||||
analytics "github.com/rudderlabs/analytics-go"
|
analytics "github.com/rudderlabs/analytics-go"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
@ -93,7 +93,7 @@ func NewBus(ctx context.Context, s storage.Storage, tenID string, opts control.O
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Bus{}, errors.Wrap(err, "configuring event bus")
|
return Bus{}, clues.Wrap(err, "configuring event bus").WithClues(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/fs"
|
"github.com/kopia/kopia/fs"
|
||||||
"github.com/kopia/kopia/repo"
|
"github.com/kopia/kopia/repo"
|
||||||
"github.com/kopia/kopia/repo/blob"
|
"github.com/kopia/kopia/repo/blob"
|
||||||
@ -17,7 +18,6 @@ import (
|
|||||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -29,11 +29,9 @@ const (
|
|||||||
defaultSchedulingInterval = time.Second * 0
|
defaultSchedulingInterval = time.Second * 0
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultConfigErrTmpl = "setting default repo config values"
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errInit = errors.New("initializing repo")
|
ErrSettingDefaultConfig = errors.New("setting default repo config values")
|
||||||
errConnect = errors.New("connecting repo")
|
ErrorRepoAlreadyExists = errors.New("repo already exists")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Having all fields set to 0 causes it to keep max-int versions of snapshots.
|
// Having all fields set to 0 causes it to keep max-int versions of snapshots.
|
||||||
@ -53,19 +51,6 @@ type snapshotLoader interface {
|
|||||||
SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error)
|
SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrorRepoAlreadyExists struct {
|
|
||||||
common.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
func RepoAlreadyExistsError(e error) error {
|
|
||||||
return ErrorRepoAlreadyExists{*common.EncapsulateError(e)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsRepoAlreadyExistsError(e error) bool {
|
|
||||||
var erae ErrorRepoAlreadyExists
|
|
||||||
return errors.As(e, &erae)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ snapshotManager = &conn{}
|
_ snapshotManager = &conn{}
|
||||||
_ snapshotLoader = &conn{}
|
_ snapshotLoader = &conn{}
|
||||||
@ -87,22 +72,22 @@ func NewConn(s storage.Storage) *conn {
|
|||||||
func (w *conn) Initialize(ctx context.Context) error {
|
func (w *conn) Initialize(ctx context.Context) error {
|
||||||
bst, err := blobStoreByProvider(ctx, w.storage)
|
bst, err := blobStoreByProvider(ctx, w.storage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, errInit.Error())
|
return errors.Wrap(err, "initializing storage")
|
||||||
}
|
}
|
||||||
defer bst.Close(ctx)
|
defer bst.Close(ctx)
|
||||||
|
|
||||||
cfg, err := w.storage.CommonConfig()
|
cfg, err := w.storage.CommonConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo - issue #75: nil here should be a storage.NewRepoOptions()
|
// todo - issue #75: nil here should be a storage.NewRepoOptions()
|
||||||
if err = repo.Initialize(ctx, bst, nil, cfg.CorsoPassphrase); err != nil {
|
if err = repo.Initialize(ctx, bst, nil, cfg.CorsoPassphrase); err != nil {
|
||||||
if errors.Is(err, repo.ErrAlreadyInitialized) {
|
if errors.Is(err, repo.ErrAlreadyInitialized) {
|
||||||
return RepoAlreadyExistsError(err)
|
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Wrap(err, errInit.Error())
|
return clues.Wrap(err, "initialzing repo").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return w.commonConnect(
|
return w.commonConnect(
|
||||||
@ -117,13 +102,13 @@ func (w *conn) Initialize(ctx context.Context) error {
|
|||||||
func (w *conn) Connect(ctx context.Context) error {
|
func (w *conn) Connect(ctx context.Context) error {
|
||||||
bst, err := blobStoreByProvider(ctx, w.storage)
|
bst, err := blobStoreByProvider(ctx, w.storage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, errInit.Error())
|
return errors.Wrap(err, "initializing storage")
|
||||||
}
|
}
|
||||||
defer bst.Close(ctx)
|
defer bst.Close(ctx)
|
||||||
|
|
||||||
cfg, err := w.storage.CommonConfig()
|
cfg, err := w.storage.CommonConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return w.commonConnect(
|
return w.commonConnect(
|
||||||
@ -162,14 +147,18 @@ func (w *conn) commonConnect(
|
|||||||
password,
|
password,
|
||||||
opts,
|
opts,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrap(err, errConnect.Error())
|
return clues.Wrap(err, "connecting to repo").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.open(ctx, cfgFile, password); err != nil {
|
if err := w.open(ctx, cfgFile, password); err != nil {
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return w.setDefaultConfigValues(ctx)
|
if err := w.setDefaultConfigValues(ctx); err != nil {
|
||||||
|
return clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage, error) {
|
func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage, error) {
|
||||||
@ -177,7 +166,7 @@ func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage,
|
|||||||
case storage.ProviderS3:
|
case storage.ProviderS3:
|
||||||
return s3BlobStorage(ctx, s)
|
return s3BlobStorage(ctx, s)
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("storage provider details are required")
|
return nil, clues.New("storage provider details are required").WithClues(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,7 +193,11 @@ func (w *conn) close(ctx context.Context) error {
|
|||||||
err := w.Repository.Close(ctx)
|
err := w.Repository.Close(ctx)
|
||||||
w.Repository = nil
|
w.Repository = nil
|
||||||
|
|
||||||
return errors.Wrap(err, "closing repository connection")
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "closing repository connection").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *conn) open(ctx context.Context, configPath, password string) error {
|
func (w *conn) open(ctx context.Context, configPath, password string) error {
|
||||||
@ -216,7 +209,7 @@ func (w *conn) open(ctx context.Context, configPath, password string) error {
|
|||||||
// TODO(ashmrtnz): issue #75: nil here should be storage.ConnectionOptions().
|
// TODO(ashmrtnz): issue #75: nil here should be storage.ConnectionOptions().
|
||||||
rep, err := repo.Open(ctx, configPath, password, nil)
|
rep, err := repo.Open(ctx, configPath, password, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening repository connection")
|
return clues.Wrap(err, "opening repository connection").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Repository = rep
|
w.Repository = rep
|
||||||
@ -229,7 +222,7 @@ func (w *conn) wrap() error {
|
|||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
if w.refCount == 0 {
|
if w.refCount == 0 {
|
||||||
return errors.New("conn already closed")
|
return clues.New("conn already closed")
|
||||||
}
|
}
|
||||||
|
|
||||||
w.refCount++
|
w.refCount++
|
||||||
@ -240,12 +233,12 @@ func (w *conn) wrap() error {
|
|||||||
func (w *conn) setDefaultConfigValues(ctx context.Context) error {
|
func (w *conn) setDefaultConfigValues(ctx context.Context) error {
|
||||||
p, err := w.getGlobalPolicyOrEmpty(ctx)
|
p, err := w.getGlobalPolicyOrEmpty(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, defaultConfigErrTmpl)
|
return clues.Stack(ErrSettingDefaultConfig, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
changed, err := updateCompressionOnPolicy(defaultCompressor, p)
|
changed, err := updateCompressionOnPolicy(defaultCompressor, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, defaultConfigErrTmpl)
|
return clues.Stack(ErrSettingDefaultConfig, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if updateRetentionOnPolicy(defaultRetention, p) {
|
if updateRetentionOnPolicy(defaultRetention, p) {
|
||||||
@ -260,10 +253,11 @@ func (w *conn) setDefaultConfigValues(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Wrap(
|
if err := w.writeGlobalPolicy(ctx, "UpdateGlobalPolicyWithDefaults", p); err != nil {
|
||||||
w.writeGlobalPolicy(ctx, "UpdateGlobalPolicyWithDefaults", p),
|
return clues.Wrap(err, "updating global policy with defaults")
|
||||||
"updating global policy with defaults",
|
}
|
||||||
)
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compression attempts to set the global compression policy for the kopia repo
|
// Compression attempts to set the global compression policy for the kopia repo
|
||||||
@ -273,7 +267,7 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
|
|||||||
// compressor was given.
|
// compressor was given.
|
||||||
comp := compression.Name(compressor)
|
comp := compression.Name(compressor)
|
||||||
if err := checkCompressor(comp); err != nil {
|
if err := checkCompressor(comp); err != nil {
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := w.getGlobalPolicyOrEmpty(ctx)
|
p, err := w.getGlobalPolicyOrEmpty(ctx)
|
||||||
@ -283,17 +277,18 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
|
|||||||
|
|
||||||
changed, err := updateCompressionOnPolicy(compressor, p)
|
changed, err := updateCompressionOnPolicy(compressor, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !changed {
|
if !changed {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Wrap(
|
if err := w.writeGlobalPolicy(ctx, "UpdateGlobalCompressionPolicy", p); err != nil {
|
||||||
w.writeGlobalPolicy(ctx, "UpdateGlobalCompressionPolicy", p),
|
return clues.Wrap(err, "updating global compression policy")
|
||||||
"updating global compression policy",
|
}
|
||||||
)
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateCompressionOnPolicy(compressor string, p *policy.Policy) (bool, error) {
|
func updateCompressionOnPolicy(compressor string, p *policy.Policy) (bool, error) {
|
||||||
@ -349,7 +344,7 @@ func (w *conn) getPolicyOrEmpty(ctx context.Context, si snapshot.SourceInfo) (*p
|
|||||||
return &policy.Policy{}, nil
|
return &policy.Policy{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrapf(err, "getting backup policy for %+v", si)
|
return nil, clues.Wrap(err, "getting backup policy").With("source_info", si).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return p, nil
|
return p, nil
|
||||||
@ -370,16 +365,22 @@ func (w *conn) writePolicy(
|
|||||||
si snapshot.SourceInfo,
|
si snapshot.SourceInfo,
|
||||||
p *policy.Policy,
|
p *policy.Policy,
|
||||||
) error {
|
) error {
|
||||||
err := repo.WriteSession(
|
ctx = clues.Add(ctx, "source_info", si)
|
||||||
ctx,
|
|
||||||
w.Repository,
|
|
||||||
repo.WriteSessionOptions{Purpose: purpose},
|
|
||||||
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
|
||||||
return policy.SetPolicy(ctx, rw, si, p)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
return errors.Wrapf(err, "updating policy for %+v", si)
|
writeOpts := repo.WriteSessionOptions{Purpose: purpose}
|
||||||
|
cb := func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||||
|
if err := policy.SetPolicy(ctx, rw, si, p); err != nil {
|
||||||
|
return clues.Stack(err).WithClues(innerCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := repo.WriteSession(ctx, w.Repository, writeOpts, cb); err != nil {
|
||||||
|
return clues.Wrap(err, "updating policy").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkCompressor(compressor compression.Name) error {
|
func checkCompressor(compressor compression.Name) error {
|
||||||
@ -389,14 +390,19 @@ func checkCompressor(compressor compression.Name) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Errorf("unknown compressor type %s", compressor)
|
return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *conn) LoadSnapshots(
|
func (w *conn) LoadSnapshots(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
ids []manifest.ID,
|
ids []manifest.ID,
|
||||||
) ([]*snapshot.Manifest, error) {
|
) ([]*snapshot.Manifest, error) {
|
||||||
return snapshot.LoadSnapshots(ctx, w.Repository, ids)
|
mans, err := snapshot.LoadSnapshots(ctx, w.Repository, ids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mans, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) {
|
func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) {
|
||||||
|
|||||||
@ -85,7 +85,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
|
|||||||
|
|
||||||
err := k.Initialize(ctx)
|
err := k.Initialize(ctx)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.True(t, IsRepoAlreadyExistsError(err))
|
assert.ErrorIs(t, err, ErrorRepoAlreadyExists)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
|
func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
|
||||||
|
|||||||
@ -1,20 +1,26 @@
|
|||||||
package kopia
|
package kopia
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/kopia/kopia/fs"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ data.Collection = &kopiaDataCollection{}
|
_ data.RestoreCollection = &kopiaDataCollection{}
|
||||||
_ data.Stream = &kopiaDataStream{}
|
_ data.Stream = &kopiaDataStream{}
|
||||||
)
|
)
|
||||||
|
|
||||||
type kopiaDataCollection struct {
|
type kopiaDataCollection struct {
|
||||||
path path.Path
|
path path.Path
|
||||||
streams []data.Stream
|
streams []data.Stream
|
||||||
|
snapshotRoot fs.Entry
|
||||||
|
counter ByteCounter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kdc *kopiaDataCollection) Items() <-chan data.Stream {
|
func (kdc *kopiaDataCollection) Items() <-chan data.Stream {
|
||||||
@ -35,16 +41,23 @@ func (kdc kopiaDataCollection) FullPath() path.Path {
|
|||||||
return kdc.path
|
return kdc.path
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kdc kopiaDataCollection) PreviousPath() path.Path {
|
func (kdc kopiaDataCollection) Fetch(
|
||||||
return nil
|
ctx context.Context,
|
||||||
}
|
name string,
|
||||||
|
) (data.Stream, error) {
|
||||||
|
if kdc.snapshotRoot == nil {
|
||||||
|
return nil, clues.New("no snapshot root")
|
||||||
|
}
|
||||||
|
|
||||||
func (kdc kopiaDataCollection) State() data.CollectionState {
|
p, err := kdc.FullPath().Append(name, true)
|
||||||
return data.NewState
|
if err != nil {
|
||||||
}
|
return nil, clues.Wrap(err, "creating item path")
|
||||||
|
}
|
||||||
|
|
||||||
func (kdc kopiaDataCollection) DoNotMergeItems() bool {
|
// TODO(ashmrtn): We could possibly hold a reference to the folder this
|
||||||
return false
|
// collection corresponds to, but that requires larger changes for the
|
||||||
|
// creation of these collections.
|
||||||
|
return getItemStream(ctx, p, kdc.snapshotRoot, kdc.counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
type kopiaDataStream struct {
|
type kopiaDataStream struct {
|
||||||
|
|||||||
@ -2,14 +2,20 @@ package kopia
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/kopia/kopia/fs"
|
||||||
|
"github.com/kopia/kopia/fs/virtualfs"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -113,3 +119,172 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// These types are needed because we check that a fs.File was returned.
|
||||||
|
// Unfortunately fs.StreamingFile and fs.File have different interfaces so we
|
||||||
|
// have to fake things.
|
||||||
|
type mockSeeker struct{}
|
||||||
|
|
||||||
|
func (s mockSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
return 0, errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockReader struct {
|
||||||
|
io.ReadCloser
|
||||||
|
mockSeeker
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r mockReader) Entry() (fs.Entry, error) {
|
||||||
|
return nil, errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockFile struct {
|
||||||
|
// Use for Entry interface.
|
||||||
|
fs.StreamingFile
|
||||||
|
r io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *mockFile) Open(ctx context.Context) (fs.Reader, error) {
|
||||||
|
return mockReader{ReadCloser: f.r}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *KopiaDataCollectionUnitSuite) TestFetch() {
|
||||||
|
var (
|
||||||
|
tenant = "a-tenant"
|
||||||
|
user = "a-user"
|
||||||
|
service = path.ExchangeService.String()
|
||||||
|
category = path.EmailCategory
|
||||||
|
folder1 = "folder1"
|
||||||
|
folder2 = "folder2"
|
||||||
|
|
||||||
|
noErrFileName = "noError"
|
||||||
|
errFileName = "error"
|
||||||
|
|
||||||
|
noErrFileData = "foo bar baz"
|
||||||
|
|
||||||
|
errReader = &mockconnector.MockExchangeData{
|
||||||
|
ReadErr: assert.AnError,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Needs to be a function so we can switch the serialization version as
|
||||||
|
// needed.
|
||||||
|
getLayout := func(serVersion uint32) fs.Entry {
|
||||||
|
return virtualfs.NewStaticDirectory(encodeAsPath(tenant), []fs.Entry{
|
||||||
|
virtualfs.NewStaticDirectory(encodeAsPath(service), []fs.Entry{
|
||||||
|
virtualfs.NewStaticDirectory(encodeAsPath(user), []fs.Entry{
|
||||||
|
virtualfs.NewStaticDirectory(encodeAsPath(category.String()), []fs.Entry{
|
||||||
|
virtualfs.NewStaticDirectory(encodeAsPath(folder1), []fs.Entry{
|
||||||
|
virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{
|
||||||
|
&mockFile{
|
||||||
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
|
encodeAsPath(noErrFileName),
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
r: newBackupStreamReader(
|
||||||
|
serVersion,
|
||||||
|
io.NopCloser(bytes.NewReader([]byte(noErrFileData))),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
&mockFile{
|
||||||
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
|
encodeAsPath(errFileName),
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
r: newBackupStreamReader(
|
||||||
|
serVersion,
|
||||||
|
errReader.ToReader(),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
b := path.Builder{}.Append(folder1, folder2)
|
||||||
|
pth, err := b.ToDataLayerExchangePathForCategory(
|
||||||
|
tenant,
|
||||||
|
user,
|
||||||
|
category,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
inputName string
|
||||||
|
inputSerializationVersion uint32
|
||||||
|
expectedData []byte
|
||||||
|
lookupErr assert.ErrorAssertionFunc
|
||||||
|
readErr assert.ErrorAssertionFunc
|
||||||
|
notFoundErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "FileFound_NoError",
|
||||||
|
inputName: noErrFileName,
|
||||||
|
inputSerializationVersion: serializationVersion,
|
||||||
|
expectedData: []byte(noErrFileData),
|
||||||
|
lookupErr: assert.NoError,
|
||||||
|
readErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "FileFound_ReadError",
|
||||||
|
inputName: errFileName,
|
||||||
|
inputSerializationVersion: serializationVersion,
|
||||||
|
lookupErr: assert.NoError,
|
||||||
|
readErr: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "FileFound_VersionError",
|
||||||
|
inputName: noErrFileName,
|
||||||
|
inputSerializationVersion: serializationVersion + 1,
|
||||||
|
lookupErr: assert.NoError,
|
||||||
|
readErr: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "FileNotFound",
|
||||||
|
inputName: "foo",
|
||||||
|
inputSerializationVersion: serializationVersion + 1,
|
||||||
|
lookupErr: assert.Error,
|
||||||
|
notFoundErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
ctx, flush := tester.NewContext()
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
root := getLayout(test.inputSerializationVersion)
|
||||||
|
c := &i64counter{}
|
||||||
|
|
||||||
|
col := &kopiaDataCollection{path: pth, snapshotRoot: root, counter: c}
|
||||||
|
|
||||||
|
s, err := col.Fetch(ctx, test.inputName)
|
||||||
|
|
||||||
|
test.lookupErr(t, err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if test.notFoundErr {
|
||||||
|
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fileData, err := io.ReadAll(s.ToReader())
|
||||||
|
|
||||||
|
test.readErr(t, err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, test.expectedData, fileData)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -4,12 +4,14 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/kopia/kopia/repo"
|
"github.com/kopia/kopia/repo"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,7 +22,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrNotFound = errors.New("not found")
|
|
||||||
errNoModelStoreID = errors.New("model has no ModelStoreID")
|
errNoModelStoreID = errors.New("model has no ModelStoreID")
|
||||||
errNoStableID = errors.New("model has no StableID")
|
errNoStableID = errors.New("model has no StableID")
|
||||||
errBadTagKey = errors.New("tag key overlaps with required key")
|
errBadTagKey = errors.New("tag key overlaps with required key")
|
||||||
@ -59,7 +60,7 @@ func (ms *ModelStore) Close(ctx context.Context) error {
|
|||||||
// bad model type is given.
|
// bad model type is given.
|
||||||
func tagsForModel(s model.Schema, tags map[string]string) (map[string]string, error) {
|
func tagsForModel(s model.Schema, tags map[string]string) (map[string]string, error) {
|
||||||
if _, ok := tags[manifest.TypeLabelKey]; ok {
|
if _, ok := tags[manifest.TypeLabelKey]; ok {
|
||||||
return nil, errors.WithStack(errBadTagKey)
|
return nil, clues.Stack(errBadTagKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
res := make(map[string]string, len(tags)+1)
|
res := make(map[string]string, len(tags)+1)
|
||||||
@ -80,11 +81,11 @@ func tagsForModelWithID(
|
|||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) (map[string]string, error) {
|
) (map[string]string, error) {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return nil, errors.WithStack(errUnrecognizedSchema)
|
return nil, clues.Stack(errUnrecognizedSchema)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(id) == 0 {
|
if len(id) == 0 {
|
||||||
return nil, errors.WithStack(errNoStableID)
|
return nil, clues.Stack(errNoStableID)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := tagsForModel(s, tags)
|
res, err := tagsForModel(s, tags)
|
||||||
@ -93,13 +94,13 @@ func tagsForModelWithID(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := res[stableIDKey]; ok {
|
if _, ok := res[stableIDKey]; ok {
|
||||||
return nil, errors.WithStack(errBadTagKey)
|
return nil, clues.Stack(errBadTagKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
res[stableIDKey] = string(id)
|
res[stableIDKey] = string(id)
|
||||||
|
|
||||||
if _, ok := res[modelVersionKey]; ok {
|
if _, ok := res[modelVersionKey]; ok {
|
||||||
return nil, errors.WithStack(errBadTagKey)
|
return nil, clues.Stack(errBadTagKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
res[modelVersionKey] = strconv.Itoa(version)
|
res[modelVersionKey] = strconv.Itoa(version)
|
||||||
@ -117,7 +118,7 @@ func putInner(
|
|||||||
create bool,
|
create bool,
|
||||||
) error {
|
) error {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return errors.WithStack(errUnrecognizedSchema)
|
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
base := m.Base()
|
base := m.Base()
|
||||||
@ -128,13 +129,13 @@ func putInner(
|
|||||||
tmpTags, err := tagsForModelWithID(s, base.ID, base.Version, base.Tags)
|
tmpTags, err := tagsForModelWithID(s, base.ID, base.Version, base.Tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Will be wrapped at a higher layer.
|
// Will be wrapped at a higher layer.
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := w.PutManifest(ctx, tmpTags, m)
|
id, err := w.PutManifest(ctx, tmpTags, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Will be wrapped at a higher layer.
|
// Will be wrapped at a higher layer.
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
base.ModelStoreID = id
|
base.ModelStoreID = id
|
||||||
@ -150,7 +151,7 @@ func (ms *ModelStore) Put(
|
|||||||
m model.Model,
|
m model.Model,
|
||||||
) error {
|
) error {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return errors.WithStack(errUnrecognizedSchema)
|
return clues.Stack(errUnrecognizedSchema)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Base().Version = ms.modelVersion
|
m.Base().Version = ms.modelVersion
|
||||||
@ -162,14 +163,16 @@ func (ms *ModelStore) Put(
|
|||||||
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||||
err := putInner(innerCtx, w, s, m, true)
|
err := putInner(innerCtx, w, s, m, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return clues.Stack(err).WithClues(innerCtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
})
|
||||||
)
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "putting model").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
return errors.Wrap(err, "putting model")
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func stripHiddenTags(tags map[string]string) {
|
func stripHiddenTags(tags map[string]string) {
|
||||||
@ -184,7 +187,7 @@ func (ms ModelStore) populateBaseModelFromMetadata(
|
|||||||
) error {
|
) error {
|
||||||
id, ok := m.Labels[stableIDKey]
|
id, ok := m.Labels[stableIDKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.WithStack(errNoStableID)
|
return clues.Stack(errNoStableID)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, err := strconv.Atoi(m.Labels[modelVersionKey])
|
v, err := strconv.Atoi(m.Labels[modelVersionKey])
|
||||||
@ -193,7 +196,7 @@ func (ms ModelStore) populateBaseModelFromMetadata(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if v != ms.modelVersion {
|
if v != ms.modelVersion {
|
||||||
return errors.Errorf("bad model version %s", m.Labels[modelVersionKey])
|
return clues.Wrap(clues.New(m.Labels[modelVersionKey]), "bad model version")
|
||||||
}
|
}
|
||||||
|
|
||||||
base.ModelStoreID = m.ID
|
base.ModelStoreID = m.ID
|
||||||
@ -211,7 +214,7 @@ func (ms ModelStore) baseModelFromMetadata(
|
|||||||
) (*model.BaseModel, error) {
|
) (*model.BaseModel, error) {
|
||||||
res := &model.BaseModel{}
|
res := &model.BaseModel{}
|
||||||
if err := ms.populateBaseModelFromMetadata(res, m); err != nil {
|
if err := ms.populateBaseModelFromMetadata(res, m); err != nil {
|
||||||
return nil, err
|
return nil, clues.Stack(err).WithAll("metadata_id", m.ID, "metadata_modtime", m.ModTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, nil
|
return res, nil
|
||||||
@ -226,21 +229,21 @@ func (ms *ModelStore) GetIDsForType(
|
|||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) ([]*model.BaseModel, error) {
|
) ([]*model.BaseModel, error) {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return nil, errors.WithStack(errUnrecognizedSchema)
|
return nil, clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := tags[stableIDKey]; ok {
|
if _, ok := tags[stableIDKey]; ok {
|
||||||
return nil, errors.WithStack(errBadTagKey)
|
return nil, clues.Stack(errBadTagKey).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpTags, err := tagsForModel(s, tags)
|
tmpTags, err := tagsForModel(s, tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getting model metadata")
|
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata, err := ms.c.FindManifests(ctx, tmpTags)
|
metadata, err := ms.c.FindManifests(ctx, tmpTags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getting model metadata")
|
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
res := make([]*model.BaseModel, 0, len(metadata))
|
res := make([]*model.BaseModel, 0, len(metadata))
|
||||||
@ -248,7 +251,7 @@ func (ms *ModelStore) GetIDsForType(
|
|||||||
for _, m := range metadata {
|
for _, m := range metadata {
|
||||||
bm, err := ms.baseModelFromMetadata(m)
|
bm, err := ms.baseModelFromMetadata(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "parsing model metadata")
|
return nil, clues.Wrap(err, "parsing model metadata").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
res = append(res, bm)
|
res = append(res, bm)
|
||||||
@ -266,30 +269,30 @@ func (ms *ModelStore) getModelStoreID(
|
|||||||
id model.StableID,
|
id model.StableID,
|
||||||
) (manifest.ID, error) {
|
) (manifest.ID, error) {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return "", errors.WithStack(errUnrecognizedSchema)
|
return "", clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(id) == 0 {
|
if len(id) == 0 {
|
||||||
return "", errors.WithStack(errNoStableID)
|
return "", clues.Stack(errNoStableID).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{stableIDKey: string(id)}
|
tags := map[string]string{stableIDKey: string(id)}
|
||||||
|
|
||||||
metadata, err := ms.c.FindManifests(ctx, tags)
|
metadata, err := ms.c.FindManifests(ctx, tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "getting ModelStoreID")
|
return "", clues.Wrap(err, "getting ModelStoreID").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metadata) == 0 {
|
if len(metadata) == 0 {
|
||||||
return "", errors.Wrap(ErrNotFound, "getting ModelStoreID")
|
return "", clues.Wrap(data.ErrNotFound, "getting ModelStoreID").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metadata) != 1 {
|
if len(metadata) != 1 {
|
||||||
return "", errors.New("multiple models with same StableID")
|
return "", clues.New("multiple models with same StableID").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if metadata[0].Labels[manifest.TypeLabelKey] != s.String() {
|
if metadata[0].Labels[manifest.TypeLabelKey] != s.String() {
|
||||||
return "", errors.WithStack(errModelTypeMismatch)
|
return "", clues.Stack(errModelTypeMismatch).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return metadata[0].ID, nil
|
return metadata[0].ID, nil
|
||||||
@ -302,10 +305,10 @@ func (ms *ModelStore) Get(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
s model.Schema,
|
s model.Schema,
|
||||||
id model.StableID,
|
id model.StableID,
|
||||||
data model.Model,
|
m model.Model,
|
||||||
) error {
|
) error {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return errors.WithStack(errUnrecognizedSchema)
|
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
modelID, err := ms.getModelStoreID(ctx, s, id)
|
modelID, err := ms.getModelStoreID(ctx, s, id)
|
||||||
@ -313,7 +316,7 @@ func (ms *ModelStore) Get(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return transmuteErr(ms.GetWithModelStoreID(ctx, s, modelID, data))
|
return ms.GetWithModelStoreID(ctx, s, modelID, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetWithModelStoreID deserializes the model with the given ModelStoreID into
|
// GetWithModelStoreID deserializes the model with the given ModelStoreID into
|
||||||
@ -323,29 +326,37 @@ func (ms *ModelStore) GetWithModelStoreID(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
s model.Schema,
|
s model.Schema,
|
||||||
id manifest.ID,
|
id manifest.ID,
|
||||||
data model.Model,
|
m model.Model,
|
||||||
) error {
|
) error {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return errors.WithStack(errUnrecognizedSchema)
|
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(id) == 0 {
|
if len(id) == 0 {
|
||||||
return errors.WithStack(errNoModelStoreID)
|
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata, err := ms.c.GetManifest(ctx, id, data)
|
metadata, err := ms.c.GetManifest(ctx, id, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(transmuteErr(err), "getting model data")
|
if errors.Is(err, manifest.ErrNotFound) {
|
||||||
|
err = data.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return clues.Wrap(err, "getting model data").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if metadata.Labels[manifest.TypeLabelKey] != s.String() {
|
mdlbl := metadata.Labels[manifest.TypeLabelKey]
|
||||||
return errors.WithStack(errModelTypeMismatch)
|
if mdlbl != s.String() {
|
||||||
|
return clues.Stack(errModelTypeMismatch).
|
||||||
|
WithClues(ctx).
|
||||||
|
WithAll("expected_label", s, "got_label", mdlbl)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Wrap(
|
if err := ms.populateBaseModelFromMetadata(m.Base(), metadata); err != nil {
|
||||||
ms.populateBaseModelFromMetadata(data.Base(), metadata),
|
return clues.Wrap(err, "getting model by ID").WithClues(ctx)
|
||||||
"getting model by ID",
|
}
|
||||||
)
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkPrevModelVersion compares the ModelType and ModelStoreID in this model
|
// checkPrevModelVersion compares the ModelType and ModelStoreID in this model
|
||||||
@ -359,26 +370,31 @@ func (ms *ModelStore) checkPrevModelVersion(
|
|||||||
b *model.BaseModel,
|
b *model.BaseModel,
|
||||||
) error {
|
) error {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return errors.WithStack(errUnrecognizedSchema)
|
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := ms.getModelStoreID(ctx, s, b.ID)
|
id, err := ms.getModelStoreID(ctx, s, b.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We actually got something back during our lookup.
|
// We actually got something back during our lookup.
|
||||||
meta, err := ms.c.GetManifest(ctx, id, nil)
|
meta, err := ms.c.GetManifest(ctx, id, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "getting previous model version")
|
return clues.Wrap(err, "getting previous model version").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if meta.ID != b.ModelStoreID {
|
if meta.ID != b.ModelStoreID {
|
||||||
return errors.New("updated model has different ModelStoreID")
|
return clues.New("updated model has different ModelStoreID").
|
||||||
|
WithClues(ctx).
|
||||||
|
WithAll("expected_id", meta.ID, "model_store_id", b.ModelStoreID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if meta.Labels[manifest.TypeLabelKey] != s.String() {
|
mdlbl := meta.Labels[manifest.TypeLabelKey]
|
||||||
return errors.New("updated model has different model type")
|
if mdlbl != s.String() {
|
||||||
|
return clues.New("updated model has different model type").
|
||||||
|
WithClues(ctx).
|
||||||
|
WithAll("expected_label", s, "got_label", mdlbl)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -396,12 +412,12 @@ func (ms *ModelStore) Update(
|
|||||||
m model.Model,
|
m model.Model,
|
||||||
) error {
|
) error {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return errors.WithStack(errUnrecognizedSchema)
|
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
base := m.Base()
|
base := m.Base()
|
||||||
if len(base.ModelStoreID) == 0 {
|
if len(base.ModelStoreID) == 0 {
|
||||||
return errors.WithStack(errNoModelStoreID)
|
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
base.Version = ms.modelVersion
|
base.Version = ms.modelVersion
|
||||||
@ -415,8 +431,11 @@ func (ms *ModelStore) Update(
|
|||||||
ctx,
|
ctx,
|
||||||
ms.c,
|
ms.c,
|
||||||
repo.WriteSessionOptions{Purpose: "ModelStoreUpdate"},
|
repo.WriteSessionOptions{Purpose: "ModelStoreUpdate"},
|
||||||
func(innerCtx context.Context, w repo.RepositoryWriter) (innerErr error) {
|
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||||
oldID := base.ModelStoreID
|
var (
|
||||||
|
innerErr error
|
||||||
|
oldID = base.ModelStoreID
|
||||||
|
)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if innerErr != nil {
|
if innerErr != nil {
|
||||||
@ -429,19 +448,26 @@ func (ms *ModelStore) Update(
|
|||||||
return innerErr
|
return innerErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if equal, everything worked out fine.
|
||||||
|
// if not, we handle the cleanup below.
|
||||||
|
if oldID == base.ModelStoreID {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// If we fail at this point no changes will be made to the manifest store
|
// If we fail at this point no changes will be made to the manifest store
|
||||||
// in kopia, making it appear like nothing ever happened. At worst some
|
// in kopia, making it appear like nothing ever happened. At worst some
|
||||||
// orphaned content blobs may be uploaded, but they should be garbage
|
// orphaned content blobs may be uploaded, but they should be garbage
|
||||||
// collected the next time kopia maintenance is run.
|
// collected the next time kopia maintenance is run.
|
||||||
if oldID != base.ModelStoreID {
|
innerErr = w.DeleteManifest(innerCtx, oldID)
|
||||||
innerErr = w.DeleteManifest(innerCtx, oldID)
|
if innerErr != nil {
|
||||||
|
return clues.Stack(innerErr).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return innerErr
|
return nil
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "updating model")
|
return clues.Wrap(err, "updating model").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -452,12 +478,12 @@ func (ms *ModelStore) Update(
|
|||||||
// have the same StableID.
|
// have the same StableID.
|
||||||
func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.StableID) error {
|
func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.StableID) error {
|
||||||
if !s.Valid() {
|
if !s.Valid() {
|
||||||
return errors.WithStack(errUnrecognizedSchema)
|
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
latest, err := ms.getModelStoreID(ctx, s, id)
|
latest, err := ms.getModelStoreID(ctx, s, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -472,26 +498,17 @@ func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.Stabl
|
|||||||
// exist.
|
// exist.
|
||||||
func (ms *ModelStore) DeleteWithModelStoreID(ctx context.Context, id manifest.ID) error {
|
func (ms *ModelStore) DeleteWithModelStoreID(ctx context.Context, id manifest.ID) error {
|
||||||
if len(id) == 0 {
|
if len(id) == 0 {
|
||||||
return errors.WithStack(errNoModelStoreID)
|
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := repo.WriteSession(
|
opts := repo.WriteSessionOptions{Purpose: "ModelStoreDelete"}
|
||||||
ctx,
|
cb := func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||||
ms.c,
|
return w.DeleteManifest(innerCtx, id)
|
||||||
repo.WriteSessionOptions{Purpose: "ModelStoreDelete"},
|
|
||||||
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
|
||||||
return w.DeleteManifest(innerCtx, id)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
return errors.Wrap(err, "deleting model")
|
|
||||||
}
|
|
||||||
|
|
||||||
func transmuteErr(err error) error {
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, manifest.ErrNotFound):
|
|
||||||
return ErrNotFound
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := repo.WriteSession(ctx, ms.c, opts, cb); err != nil {
|
||||||
|
return clues.Wrap(err, "deleting model").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
@ -360,9 +361,9 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet_WithTags() {
|
|||||||
func (suite *ModelStoreIntegrationSuite) TestGet_NotFoundErrors() {
|
func (suite *ModelStoreIntegrationSuite) TestGet_NotFoundErrors() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
assert.ErrorIs(t, suite.m.Get(suite.ctx, model.BackupOpSchema, "baz", nil), ErrNotFound)
|
assert.ErrorIs(t, suite.m.Get(suite.ctx, model.BackupOpSchema, "baz", nil), data.ErrNotFound)
|
||||||
assert.ErrorIs(
|
assert.ErrorIs(
|
||||||
t, suite.m.GetWithModelStoreID(suite.ctx, model.BackupOpSchema, "baz", nil), ErrNotFound)
|
t, suite.m.GetWithModelStoreID(suite.ctx, model.BackupOpSchema, "baz", nil), data.ErrNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ModelStoreIntegrationSuite) TestPutGetOfTypeBadVersion() {
|
func (suite *ModelStoreIntegrationSuite) TestPutGetOfTypeBadVersion() {
|
||||||
@ -630,7 +631,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = m.GetWithModelStoreID(ctx, theModelType, oldModelID, nil)
|
err = m.GetWithModelStoreID(ctx, theModelType, oldModelID, nil)
|
||||||
assert.ErrorIs(t, err, ErrNotFound)
|
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -691,7 +692,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutDelete() {
|
|||||||
|
|
||||||
returned := &fooModel{}
|
returned := &fooModel{}
|
||||||
err := suite.m.GetWithModelStoreID(suite.ctx, theModelType, foo.ModelStoreID, returned)
|
err := suite.m.GetWithModelStoreID(suite.ctx, theModelType, foo.ModelStoreID, returned)
|
||||||
assert.ErrorIs(t, err, ErrNotFound)
|
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ModelStoreIntegrationSuite) TestPutDelete_BadIDsNoop() {
|
func (suite *ModelStoreIntegrationSuite) TestPutDelete_BadIDsNoop() {
|
||||||
@ -775,7 +776,7 @@ func (suite *ModelStoreRegressionSuite) TestFailDuringWriteSessionHasNoVisibleEf
|
|||||||
assert.ErrorIs(t, err, assert.AnError)
|
assert.ErrorIs(t, err, assert.AnError)
|
||||||
|
|
||||||
err = m.GetWithModelStoreID(ctx, theModelType, newID, nil)
|
err = m.GetWithModelStoreID(ctx, theModelType, newID, nil)
|
||||||
assert.ErrorIs(t, err, ErrNotFound)
|
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||||
|
|
||||||
returned := &fooModel{}
|
returned := &fooModel{}
|
||||||
require.NoError(
|
require.NoError(
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package kopia
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/repo/blob"
|
"github.com/kopia/kopia/repo/blob"
|
||||||
"github.com/kopia/kopia/repo/blob/s3"
|
"github.com/kopia/kopia/repo/blob/s3"
|
||||||
|
|
||||||
@ -16,7 +17,7 @@ const (
|
|||||||
func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error) {
|
func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error) {
|
||||||
cfg, err := s.S3Config()
|
cfg, err := s.S3Config()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint := defaultS3Endpoint
|
endpoint := defaultS3Endpoint
|
||||||
@ -32,5 +33,10 @@ func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error)
|
|||||||
DoNotVerifyTLS: cfg.DoNotVerifyTLS,
|
DoNotVerifyTLS: cfg.DoNotVerifyTLS,
|
||||||
}
|
}
|
||||||
|
|
||||||
return s3.New(ctx, &opts, false)
|
store, err := s3.New(ctx, &opts, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return store, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
"github.com/kopia/kopia/snapshot"
|
"github.com/kopia/kopia/snapshot"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -218,9 +219,7 @@ func fetchPrevManifests(
|
|||||||
found = append(found, man.Manifest)
|
found = append(found, man.Manifest)
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
"reusing cached complete snapshot",
|
"reusing cached complete snapshot",
|
||||||
"snapshot_id",
|
"snapshot_id", man.ID)
|
||||||
man.ID,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return found, nil
|
return found, nil
|
||||||
@ -251,29 +250,19 @@ func fetchPrevSnapshotManifests(
|
|||||||
for _, reason := range reasons {
|
for _, reason := range reasons {
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
"searching for previous manifests for reason",
|
"searching for previous manifests for reason",
|
||||||
"service",
|
"service", reason.Service.String(),
|
||||||
reason.Service.String(),
|
"category", reason.Category.String())
|
||||||
"category",
|
|
||||||
reason.Category.String(),
|
|
||||||
)
|
|
||||||
|
|
||||||
found, err := fetchPrevManifests(
|
found, err := fetchPrevManifests(ctx, sm, mans, reason, tags)
|
||||||
ctx,
|
|
||||||
sm,
|
|
||||||
mans,
|
|
||||||
reason,
|
|
||||||
tags,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Ctx(ctx).Warnw(
|
logger.Ctx(ctx).
|
||||||
"fetching previous snapshot manifests for service/category/resource owner",
|
With(
|
||||||
"error",
|
"err", err,
|
||||||
err,
|
"service", reason.Service.String(),
|
||||||
"service",
|
"category", reason.Category.String()).
|
||||||
reason.Service.String(),
|
Warnw(
|
||||||
"category",
|
"fetching previous snapshot manifests for service/category/resource owner",
|
||||||
reason.Category.String(),
|
clues.InErr(err).Slice()...)
|
||||||
)
|
|
||||||
|
|
||||||
// Snapshot can still complete fine, just not as efficient.
|
// Snapshot can still complete fine, just not as efficient.
|
||||||
continue
|
continue
|
||||||
|
|||||||
@ -15,7 +15,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/fs"
|
"github.com/kopia/kopia/fs"
|
||||||
"github.com/kopia/kopia/fs/virtualfs"
|
"github.com/kopia/kopia/fs/virtualfs"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -137,7 +138,7 @@ type corsoProgress struct {
|
|||||||
toMerge map[string]path.Path
|
toMerge map[string]path.Path
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
totalBytes int64
|
totalBytes int64
|
||||||
errs *multierror.Error
|
errs *fault.Errors
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kopia interface function used as a callback when kopia finishes processing a
|
// Kopia interface function used as a callback when kopia finishes processing a
|
||||||
@ -167,11 +168,11 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
|||||||
// never had to materialize their details in-memory.
|
// never had to materialize their details in-memory.
|
||||||
if d.info == nil {
|
if d.info == nil {
|
||||||
if d.prevPath == nil {
|
if d.prevPath == nil {
|
||||||
cp.errs = multierror.Append(cp.errs, errors.Errorf(
|
cp.errs.Add(clues.New("item sourced from previous backup with no previous path").
|
||||||
"item sourced from previous backup with no previous path. Service: %s, Category: %s",
|
WithAll(
|
||||||
d.repoPath.Service().String(),
|
"service", d.repoPath.Service().String(),
|
||||||
d.repoPath.Category().String(),
|
"category", d.repoPath.Category().String(),
|
||||||
))
|
))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -254,31 +255,28 @@ func (cp *corsoProgress) get(k string) *itemDetails {
|
|||||||
func collectionEntries(
|
func collectionEntries(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cb func(context.Context, fs.Entry) error,
|
cb func(context.Context, fs.Entry) error,
|
||||||
streamedEnts data.Collection,
|
streamedEnts data.BackupCollection,
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
) (map[string]struct{}, *multierror.Error) {
|
) (map[string]struct{}, error) {
|
||||||
if streamedEnts == nil {
|
if streamedEnts == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errs *multierror.Error
|
|
||||||
// Track which items have already been seen so we can skip them if we see
|
// Track which items have already been seen so we can skip them if we see
|
||||||
// them again in the data from the base snapshot.
|
// them again in the data from the base snapshot.
|
||||||
seen = map[string]struct{}{}
|
seen = map[string]struct{}{}
|
||||||
items = streamedEnts.Items()
|
items = streamedEnts.Items()
|
||||||
log = logger.Ctx(ctx)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
errs = multierror.Append(errs, ctx.Err())
|
return seen, clues.Stack(ctx.Err()).WithClues(ctx)
|
||||||
return seen, errs
|
|
||||||
|
|
||||||
case e, ok := <-items:
|
case e, ok := <-items:
|
||||||
if !ok {
|
if !ok {
|
||||||
return seen, errs
|
return seen, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
encodedName := encodeAsPath(e.UUID())
|
encodedName := encodeAsPath(e.UUID())
|
||||||
@ -302,9 +300,9 @@ func collectionEntries(
|
|||||||
itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true)
|
itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "getting full item path")
|
err = errors.Wrap(err, "getting full item path")
|
||||||
errs = multierror.Append(errs, err)
|
progress.errs.Add(err)
|
||||||
|
|
||||||
log.Error(err)
|
logger.Ctx(ctx).With("err", err).Errorw("getting full item path", clues.InErr(err).Slice()...)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -342,13 +340,12 @@ func collectionEntries(
|
|||||||
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodedName,
|
encodedName,
|
||||||
modTime,
|
modTime,
|
||||||
newBackupStreamReader(serializationVersion, e.ToReader()),
|
newBackupStreamReader(serializationVersion, e.ToReader()))
|
||||||
)
|
|
||||||
if err := cb(ctx, entry); err != nil {
|
if err := cb(ctx, entry); err != nil {
|
||||||
// Kopia's uploader swallows errors in most cases, so if we see
|
// Kopia's uploader swallows errors in most cases, so if we see
|
||||||
// something here it's probably a big issue and we should return.
|
// something here it's probably a big issue and we should return.
|
||||||
errs = multierror.Append(errs, errors.Wrapf(err, "executing callback on %q", itemPath))
|
return seen, clues.Wrap(err, "executing callback").WithClues(ctx).With("item_path", itemPath)
|
||||||
return seen, errs
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -442,7 +439,7 @@ func getStreamItemFunc(
|
|||||||
curPath path.Path,
|
curPath path.Path,
|
||||||
prevPath path.Path,
|
prevPath path.Path,
|
||||||
staticEnts []fs.Entry,
|
staticEnts []fs.Entry,
|
||||||
streamedEnts data.Collection,
|
streamedEnts data.BackupCollection,
|
||||||
baseDir fs.Directory,
|
baseDir fs.Directory,
|
||||||
globalExcludeSet map[string]struct{},
|
globalExcludeSet map[string]struct{},
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
@ -454,11 +451,14 @@ func getStreamItemFunc(
|
|||||||
// Return static entries in this directory first.
|
// Return static entries in this directory first.
|
||||||
for _, d := range staticEnts {
|
for _, d := range staticEnts {
|
||||||
if err := cb(ctx, d); err != nil {
|
if err := cb(ctx, d); err != nil {
|
||||||
return errors.Wrap(err, "executing callback on static directory")
|
return clues.Wrap(err, "executing callback on static directory").WithClues(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
seen, errs := collectionEntries(ctx, cb, streamedEnts, progress)
|
seen, err := collectionEntries(ctx, cb, streamedEnts, progress)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "streaming collection entries")
|
||||||
|
}
|
||||||
|
|
||||||
if err := streamBaseEntries(
|
if err := streamBaseEntries(
|
||||||
ctx,
|
ctx,
|
||||||
@ -470,13 +470,10 @@ func getStreamItemFunc(
|
|||||||
globalExcludeSet,
|
globalExcludeSet,
|
||||||
progress,
|
progress,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
errs = multierror.Append(
|
return errors.Wrap(err, "streaming base snapshot entries")
|
||||||
errs,
|
|
||||||
errors.Wrap(err, "streaming base snapshot entries"),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return errs.ErrorOrNil()
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -540,7 +537,7 @@ type treeMap struct {
|
|||||||
childDirs map[string]*treeMap
|
childDirs map[string]*treeMap
|
||||||
// Reference to data pulled from the external service. Contains only items in
|
// Reference to data pulled from the external service. Contains only items in
|
||||||
// this directory. Does not contain references to subdirectories.
|
// this directory. Does not contain references to subdirectories.
|
||||||
collection data.Collection
|
collection data.BackupCollection
|
||||||
// Reference to directory in base snapshot. The referenced directory itself
|
// Reference to directory in base snapshot. The referenced directory itself
|
||||||
// may contain files and subdirectories, but the subdirectories should
|
// may contain files and subdirectories, but the subdirectories should
|
||||||
// eventually be added when walking the base snapshot to build the hierarchy,
|
// eventually be added when walking the base snapshot to build the hierarchy,
|
||||||
@ -617,7 +614,7 @@ func getTreeNode(roots map[string]*treeMap, pathElements []string) *treeMap {
|
|||||||
|
|
||||||
func inflateCollectionTree(
|
func inflateCollectionTree(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
collections []data.Collection,
|
collections []data.BackupCollection,
|
||||||
) (map[string]*treeMap, map[string]path.Path, error) {
|
) (map[string]*treeMap, map[string]path.Path, error) {
|
||||||
roots := make(map[string]*treeMap)
|
roots := make(map[string]*treeMap)
|
||||||
// Contains the old path for collections that have been moved or renamed.
|
// Contains the old path for collections that have been moved or renamed.
|
||||||
@ -911,13 +908,13 @@ func inflateBaseTree(
|
|||||||
// exclude from base directories when uploading the snapshot. As items in *all*
|
// exclude from base directories when uploading the snapshot. As items in *all*
|
||||||
// base directories will be checked for in every base directory, this assumes
|
// base directories will be checked for in every base directory, this assumes
|
||||||
// that items in the bases are unique. Deletions of directories or subtrees
|
// that items in the bases are unique. Deletions of directories or subtrees
|
||||||
// should be represented as changes in the status of a Collection, not an entry
|
// should be represented as changes in the status of a BackupCollection, not an
|
||||||
// in the globalExcludeSet.
|
// entry in the globalExcludeSet.
|
||||||
func inflateDirTree(
|
func inflateDirTree(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
loader snapshotLoader,
|
loader snapshotLoader,
|
||||||
baseSnaps []IncrementalBase,
|
baseSnaps []IncrementalBase,
|
||||||
collections []data.Collection,
|
collections []data.BackupCollection,
|
||||||
globalExcludeSet map[string]struct{},
|
globalExcludeSet map[string]struct{},
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
) (fs.Directory, error) {
|
) (fs.Directory, error) {
|
||||||
@ -933,9 +930,7 @@ func inflateDirTree(
|
|||||||
|
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
"merging hierarchies from base snapshots",
|
"merging hierarchies from base snapshots",
|
||||||
"snapshot_ids",
|
"snapshot_ids", baseIDs)
|
||||||
baseIDs,
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, snap := range baseSnaps {
|
for _, snap := range baseSnaps {
|
||||||
if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil {
|
if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil {
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -456,6 +457,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
|||||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||||
deets: bd,
|
deets: bd,
|
||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
||||||
@ -503,6 +505,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
|||||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||||
deets: bd,
|
deets: bd,
|
||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range cachedItems {
|
for k, v := range cachedItems {
|
||||||
@ -518,7 +521,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
|||||||
|
|
||||||
assert.Empty(t, cp.pending)
|
assert.Empty(t, cp.pending)
|
||||||
assert.Empty(t, bd.Details().Entries)
|
assert.Empty(t, bd.Details().Entries)
|
||||||
assert.Error(t, cp.errs.ErrorOrNil())
|
assert.Error(t, cp.errs.Err())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||||
@ -533,6 +536,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
|||||||
deets: bd,
|
deets: bd,
|
||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
toMerge: map[string]path.Path{},
|
toMerge: map[string]path.Path{},
|
||||||
|
errs: fault.New(true),
|
||||||
}
|
}
|
||||||
|
|
||||||
deets := &itemDetails{info: &details.ItemInfo{}, repoPath: suite.targetFilePath}
|
deets := &itemDetails{info: &details.ItemInfo{}, repoPath: suite.targetFilePath}
|
||||||
@ -605,6 +609,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
|||||||
deets: bd,
|
deets: bd,
|
||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
toMerge: map[string]path.Path{},
|
toMerge: map[string]path.Path{},
|
||||||
|
errs: fault.New(true),
|
||||||
}
|
}
|
||||||
|
|
||||||
deets := &itemDetails{
|
deets := &itemDetails{
|
||||||
@ -629,6 +634,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() {
|
|||||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||||
deets: bd,
|
deets: bd,
|
||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
||||||
@ -681,9 +687,12 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
|
|||||||
user2Encoded: 42,
|
user2Encoded: 42,
|
||||||
}
|
}
|
||||||
|
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{
|
||||||
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
|
}
|
||||||
|
|
||||||
collections := []data.Collection{
|
collections := []data.BackupCollection{
|
||||||
mockconnector.NewMockExchangeCollection(
|
mockconnector.NewMockExchangeCollection(
|
||||||
suite.testPath,
|
suite.testPath,
|
||||||
expectedFileCount[user1Encoded],
|
expectedFileCount[user1Encoded],
|
||||||
@ -759,11 +768,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
|||||||
// - 42 separate files
|
// - 42 separate files
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
layout []data.Collection
|
layout []data.BackupCollection
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "SubdirFirst",
|
name: "SubdirFirst",
|
||||||
layout: []data.Collection{
|
layout: []data.BackupCollection{
|
||||||
mockconnector.NewMockExchangeCollection(
|
mockconnector.NewMockExchangeCollection(
|
||||||
p2,
|
p2,
|
||||||
5,
|
5,
|
||||||
@ -776,7 +785,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "SubdirLast",
|
name: "SubdirLast",
|
||||||
layout: []data.Collection{
|
layout: []data.BackupCollection{
|
||||||
mockconnector.NewMockExchangeCollection(
|
mockconnector.NewMockExchangeCollection(
|
||||||
suite.testPath,
|
suite.testPath,
|
||||||
42,
|
42,
|
||||||
@ -791,7 +800,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
|||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{
|
||||||
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
|
}
|
||||||
|
|
||||||
dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
|
dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -845,7 +857,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
|||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
layout []data.Collection
|
layout []data.BackupCollection
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"MultipleRoots",
|
"MultipleRoots",
|
||||||
@ -862,7 +874,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
|||||||
// - emails
|
// - emails
|
||||||
// - Inbox
|
// - Inbox
|
||||||
// - 42 separate files
|
// - 42 separate files
|
||||||
[]data.Collection{
|
[]data.BackupCollection{
|
||||||
mockconnector.NewMockExchangeCollection(
|
mockconnector.NewMockExchangeCollection(
|
||||||
suite.testPath,
|
suite.testPath,
|
||||||
5,
|
5,
|
||||||
@ -875,7 +887,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"NoCollectionPath",
|
"NoCollectionPath",
|
||||||
[]data.Collection{
|
[]data.BackupCollection{
|
||||||
mockconnector.NewMockExchangeCollection(
|
mockconnector.NewMockExchangeCollection(
|
||||||
nil,
|
nil,
|
||||||
5,
|
5,
|
||||||
@ -971,9 +983,12 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{
|
||||||
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
|
}
|
||||||
|
|
||||||
cols := []data.Collection{}
|
cols := []data.BackupCollection{}
|
||||||
for _, s := range test.states {
|
for _, s := range test.states {
|
||||||
prevPath := dirPath
|
prevPath := dirPath
|
||||||
nowPath := dirPath
|
nowPath := dirPath
|
||||||
@ -1037,17 +1052,17 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
inputCollections func() []data.Collection
|
inputCollections func() []data.BackupCollection
|
||||||
expected *expectedNode
|
expected *expectedNode
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "SkipsDeletedItems",
|
name: "SkipsDeletedItems",
|
||||||
inputCollections: func() []data.Collection {
|
inputCollections: func() []data.BackupCollection {
|
||||||
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||||
mc.Names[0] = testFileName
|
mc.Names[0] = testFileName
|
||||||
mc.DeletedItems[0] = true
|
mc.DeletedItems[0] = true
|
||||||
|
|
||||||
return []data.Collection{mc}
|
return []data.BackupCollection{mc}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1066,13 +1081,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "AddsNewItems",
|
name: "AddsNewItems",
|
||||||
inputCollections: func() []data.Collection {
|
inputCollections: func() []data.BackupCollection {
|
||||||
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||||
mc.Names[0] = testFileName2
|
mc.Names[0] = testFileName2
|
||||||
mc.Data[0] = testFileData2
|
mc.Data[0] = testFileData2
|
||||||
mc.ColState = data.NotMovedState
|
mc.ColState = data.NotMovedState
|
||||||
|
|
||||||
return []data.Collection{mc}
|
return []data.BackupCollection{mc}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1101,13 +1116,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "SkipsUpdatedItems",
|
name: "SkipsUpdatedItems",
|
||||||
inputCollections: func() []data.Collection {
|
inputCollections: func() []data.BackupCollection {
|
||||||
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||||
mc.Names[0] = testFileName
|
mc.Names[0] = testFileName
|
||||||
mc.Data[0] = testFileData2
|
mc.Data[0] = testFileData2
|
||||||
mc.ColState = data.NotMovedState
|
mc.ColState = data.NotMovedState
|
||||||
|
|
||||||
return []data.Collection{mc}
|
return []data.BackupCollection{mc}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1132,7 +1147,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "DeleteAndNew",
|
name: "DeleteAndNew",
|
||||||
inputCollections: func() []data.Collection {
|
inputCollections: func() []data.BackupCollection {
|
||||||
mc1 := mockconnector.NewMockExchangeCollection(dirPath, 0)
|
mc1 := mockconnector.NewMockExchangeCollection(dirPath, 0)
|
||||||
mc1.ColState = data.DeletedState
|
mc1.ColState = data.DeletedState
|
||||||
mc1.PrevPath = dirPath
|
mc1.PrevPath = dirPath
|
||||||
@ -1142,7 +1157,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
mc2.Names[0] = testFileName2
|
mc2.Names[0] = testFileName2
|
||||||
mc2.Data[0] = testFileData2
|
mc2.Data[0] = testFileData2
|
||||||
|
|
||||||
return []data.Collection{mc1, mc2}
|
return []data.BackupCollection{mc1, mc2}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1167,7 +1182,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MovedAndNew",
|
name: "MovedAndNew",
|
||||||
inputCollections: func() []data.Collection {
|
inputCollections: func() []data.BackupCollection {
|
||||||
mc1 := mockconnector.NewMockExchangeCollection(dirPath2, 0)
|
mc1 := mockconnector.NewMockExchangeCollection(dirPath2, 0)
|
||||||
mc1.ColState = data.MovedState
|
mc1.ColState = data.MovedState
|
||||||
mc1.PrevPath = dirPath
|
mc1.PrevPath = dirPath
|
||||||
@ -1177,7 +1192,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
mc2.Names[0] = testFileName2
|
mc2.Names[0] = testFileName2
|
||||||
mc2.Data[0] = testFileData2
|
mc2.Data[0] = testFileData2
|
||||||
|
|
||||||
return []data.Collection{mc1, mc2}
|
return []data.BackupCollection{mc1, mc2}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1211,13 +1226,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "NewDoesntMerge",
|
name: "NewDoesntMerge",
|
||||||
inputCollections: func() []data.Collection {
|
inputCollections: func() []data.BackupCollection {
|
||||||
mc1 := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
mc1 := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||||
mc1.ColState = data.NewState
|
mc1.ColState = data.NewState
|
||||||
mc1.Names[0] = testFileName2
|
mc1.Names[0] = testFileName2
|
||||||
mc1.Data[0] = testFileData2
|
mc1.Data[0] = testFileData2
|
||||||
|
|
||||||
return []data.Collection{mc1}
|
return []data.BackupCollection{mc1}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1249,7 +1264,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{
|
||||||
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
|
}
|
||||||
msw := &mockSnapshotWalker{
|
msw := &mockSnapshotWalker{
|
||||||
snapshotRoot: getBaseSnapshot(),
|
snapshotRoot: getBaseSnapshot(),
|
||||||
}
|
}
|
||||||
@ -1369,13 +1387,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
inputCollections func(t *testing.T) []data.Collection
|
inputCollections func(t *testing.T) []data.BackupCollection
|
||||||
inputExcludes map[string]struct{}
|
inputExcludes map[string]struct{}
|
||||||
expected *expectedNode
|
expected *expectedNode
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "GlobalExcludeSet",
|
name: "GlobalExcludeSet",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
inputExcludes: map[string]struct{}{
|
inputExcludes: map[string]struct{}{
|
||||||
@ -1417,7 +1435,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MovesSubtree",
|
name: "MovesSubtree",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
newPath := makePath(
|
newPath := makePath(
|
||||||
t,
|
t,
|
||||||
[]string{testTenant, service, testUser, category, testInboxDir + "2"},
|
[]string{testTenant, service, testUser, category, testInboxDir + "2"},
|
||||||
@ -1428,7 +1446,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
mc.PrevPath = inboxPath
|
mc.PrevPath = inboxPath
|
||||||
mc.ColState = data.MovedState
|
mc.ColState = data.MovedState
|
||||||
|
|
||||||
return []data.Collection{mc}
|
return []data.BackupCollection{mc}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1474,7 +1492,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MovesChildAfterAncestorMove",
|
name: "MovesChildAfterAncestorMove",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
newInboxPath := makePath(
|
newInboxPath := makePath(
|
||||||
t,
|
t,
|
||||||
[]string{testTenant, service, testUser, category, testInboxDir + "2"},
|
[]string{testTenant, service, testUser, category, testInboxDir + "2"},
|
||||||
@ -1494,7 +1512,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
work.PrevPath = workPath
|
work.PrevPath = workPath
|
||||||
work.ColState = data.MovedState
|
work.ColState = data.MovedState
|
||||||
|
|
||||||
return []data.Collection{inbox, work}
|
return []data.BackupCollection{inbox, work}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1540,7 +1558,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MovesChildAfterAncestorDelete",
|
name: "MovesChildAfterAncestorDelete",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
newWorkPath := makePath(
|
newWorkPath := makePath(
|
||||||
t,
|
t,
|
||||||
[]string{testTenant, service, testUser, category, workDir},
|
[]string{testTenant, service, testUser, category, workDir},
|
||||||
@ -1555,7 +1573,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
work.PrevPath = workPath
|
work.PrevPath = workPath
|
||||||
work.ColState = data.MovedState
|
work.ColState = data.MovedState
|
||||||
|
|
||||||
return []data.Collection{inbox, work}
|
return []data.BackupCollection{inbox, work}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1579,7 +1597,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ReplaceDeletedDirectory",
|
name: "ReplaceDeletedDirectory",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
personal := mockconnector.NewMockExchangeCollection(personalPath, 0)
|
personal := mockconnector.NewMockExchangeCollection(personalPath, 0)
|
||||||
personal.PrevPath = personalPath
|
personal.PrevPath = personalPath
|
||||||
personal.ColState = data.DeletedState
|
personal.ColState = data.DeletedState
|
||||||
@ -1588,7 +1606,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
work.PrevPath = workPath
|
work.PrevPath = workPath
|
||||||
work.ColState = data.MovedState
|
work.ColState = data.MovedState
|
||||||
|
|
||||||
return []data.Collection{personal, work}
|
return []data.BackupCollection{personal, work}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1620,7 +1638,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ReplaceDeletedDirectoryWithNew",
|
name: "ReplaceDeletedDirectoryWithNew",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
personal := mockconnector.NewMockExchangeCollection(personalPath, 0)
|
personal := mockconnector.NewMockExchangeCollection(personalPath, 0)
|
||||||
personal.PrevPath = personalPath
|
personal.PrevPath = personalPath
|
||||||
personal.ColState = data.DeletedState
|
personal.ColState = data.DeletedState
|
||||||
@ -1630,7 +1648,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
newCol.Names[0] = workFileName2
|
newCol.Names[0] = workFileName2
|
||||||
newCol.Data[0] = workFileData2
|
newCol.Data[0] = workFileData2
|
||||||
|
|
||||||
return []data.Collection{personal, newCol}
|
return []data.BackupCollection{personal, newCol}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1671,7 +1689,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ReplaceMovedDirectory",
|
name: "ReplaceMovedDirectory",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
newPersonalPath := makePath(
|
newPersonalPath := makePath(
|
||||||
t,
|
t,
|
||||||
[]string{testTenant, service, testUser, category, personalDir},
|
[]string{testTenant, service, testUser, category, personalDir},
|
||||||
@ -1686,7 +1704,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
work.PrevPath = workPath
|
work.PrevPath = workPath
|
||||||
work.ColState = data.MovedState
|
work.ColState = data.MovedState
|
||||||
|
|
||||||
return []data.Collection{personal, work}
|
return []data.BackupCollection{personal, work}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1729,7 +1747,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MoveDirectoryAndMergeItems",
|
name: "MoveDirectoryAndMergeItems",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
newPersonalPath := makePath(
|
newPersonalPath := makePath(
|
||||||
t,
|
t,
|
||||||
[]string{testTenant, service, testUser, category, workDir},
|
[]string{testTenant, service, testUser, category, workDir},
|
||||||
@ -1744,7 +1762,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
personal.Names[1] = testFileName4
|
personal.Names[1] = testFileName4
|
||||||
personal.Data[1] = testFileData4
|
personal.Data[1] = testFileData4
|
||||||
|
|
||||||
return []data.Collection{personal}
|
return []data.BackupCollection{personal}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1793,7 +1811,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MoveParentDeleteFileNoMergeSubtreeMerge",
|
name: "MoveParentDeleteFileNoMergeSubtreeMerge",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
newInboxPath := makePath(
|
newInboxPath := makePath(
|
||||||
t,
|
t,
|
||||||
[]string{testTenant, service, testUser, category, personalDir},
|
[]string{testTenant, service, testUser, category, personalDir},
|
||||||
@ -1824,7 +1842,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
work.Names[0] = testFileName6
|
work.Names[0] = testFileName6
|
||||||
work.Data[0] = testFileData6
|
work.Data[0] = testFileData6
|
||||||
|
|
||||||
return []data.Collection{inbox, work}
|
return []data.BackupCollection{inbox, work}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1876,7 +1894,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "NoMoveParentDeleteFileNoMergeSubtreeMerge",
|
name: "NoMoveParentDeleteFileNoMergeSubtreeMerge",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||||
inbox := mockconnector.NewMockExchangeCollection(inboxPath, 1)
|
inbox := mockconnector.NewMockExchangeCollection(inboxPath, 1)
|
||||||
inbox.PrevPath = inboxPath
|
inbox.PrevPath = inboxPath
|
||||||
inbox.ColState = data.NotMovedState
|
inbox.ColState = data.NotMovedState
|
||||||
@ -1892,7 +1910,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
work.Names[0] = testFileName6
|
work.Names[0] = testFileName6
|
||||||
work.Data[0] = testFileData6
|
work.Data[0] = testFileData6
|
||||||
|
|
||||||
return []data.Collection{inbox, work}
|
return []data.BackupCollection{inbox, work}
|
||||||
},
|
},
|
||||||
expected: expectedTreeWithChildren(
|
expected: expectedTreeWithChildren(
|
||||||
[]string{
|
[]string{
|
||||||
@ -1951,7 +1969,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{
|
||||||
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
|
}
|
||||||
msw := &mockSnapshotWalker{
|
msw := &mockSnapshotWalker{
|
||||||
snapshotRoot: getBaseSnapshot(),
|
snapshotRoot: getBaseSnapshot(),
|
||||||
}
|
}
|
||||||
@ -2097,7 +2118,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{
|
||||||
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
|
}
|
||||||
mc := mockconnector.NewMockExchangeCollection(suite.testPath, 1)
|
mc := mockconnector.NewMockExchangeCollection(suite.testPath, 1)
|
||||||
mc.PrevPath = mc.FullPath()
|
mc.PrevPath = mc.FullPath()
|
||||||
mc.ColState = data.DeletedState
|
mc.ColState = data.DeletedState
|
||||||
@ -2105,7 +2129,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
|||||||
snapshotRoot: getBaseSnapshot(),
|
snapshotRoot: getBaseSnapshot(),
|
||||||
}
|
}
|
||||||
|
|
||||||
collections := []data.Collection{mc}
|
collections := []data.BackupCollection{mc}
|
||||||
|
|
||||||
// Returned directory structure should look like:
|
// Returned directory structure should look like:
|
||||||
// - a-tenant
|
// - a-tenant
|
||||||
@ -2346,7 +2370,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{
|
||||||
|
pending: map[string]*itemDetails{},
|
||||||
|
errs: fault.New(true),
|
||||||
|
}
|
||||||
|
|
||||||
mc := mockconnector.NewMockExchangeCollection(inboxPath, 1)
|
mc := mockconnector.NewMockExchangeCollection(inboxPath, 1)
|
||||||
mc.PrevPath = mc.FullPath()
|
mc.PrevPath = mc.FullPath()
|
||||||
@ -2361,7 +2388,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
collections := []data.Collection{mc}
|
collections := []data.BackupCollection{mc}
|
||||||
|
|
||||||
dirTree, err := inflateDirTree(
|
dirTree, err := inflateDirTree(
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/fs"
|
"github.com/kopia/kopia/fs"
|
||||||
"github.com/kopia/kopia/repo"
|
"github.com/kopia/kopia/repo"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
@ -17,6 +17,7 @@ import (
|
|||||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/internal/stats"
|
"github.com/alcionai/corso/src/internal/stats"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -101,7 +102,11 @@ func (w *Wrapper) Close(ctx context.Context) error {
|
|||||||
err := w.c.Close(ctx)
|
err := w.c.Close(ctx)
|
||||||
w.c = nil
|
w.c = nil
|
||||||
|
|
||||||
return errors.Wrap(err, "closing Wrapper")
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "closing Wrapper").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type IncrementalBase struct {
|
type IncrementalBase struct {
|
||||||
@ -118,13 +123,14 @@ type IncrementalBase struct {
|
|||||||
func (w Wrapper) BackupCollections(
|
func (w Wrapper) BackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
previousSnapshots []IncrementalBase,
|
previousSnapshots []IncrementalBase,
|
||||||
collections []data.Collection,
|
collections []data.BackupCollection,
|
||||||
globalExcludeSet map[string]struct{},
|
globalExcludeSet map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
|
errs *fault.Errors,
|
||||||
) (*BackupStats, *details.Builder, map[string]path.Path, error) {
|
) (*BackupStats, *details.Builder, map[string]path.Path, error) {
|
||||||
if w.c == nil {
|
if w.c == nil {
|
||||||
return nil, nil, nil, errNotConnected
|
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, end := D.Span(ctx, "kopia:backupCollections")
|
ctx, end := D.Span(ctx, "kopia:backupCollections")
|
||||||
@ -138,6 +144,7 @@ func (w Wrapper) BackupCollections(
|
|||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
deets: &details.Builder{},
|
deets: &details.Builder{},
|
||||||
toMerge: map[string]path.Path{},
|
toMerge: map[string]path.Path{},
|
||||||
|
errs: errs,
|
||||||
}
|
}
|
||||||
|
|
||||||
// When running an incremental backup, we need to pass the prior
|
// When running an incremental backup, we need to pass the prior
|
||||||
@ -165,14 +172,12 @@ func (w Wrapper) BackupCollections(
|
|||||||
previousSnapshots,
|
previousSnapshots,
|
||||||
dirTree,
|
dirTree,
|
||||||
tags,
|
tags,
|
||||||
progress,
|
progress)
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
combinedErrs := multierror.Append(nil, err, progress.errs)
|
return nil, nil, nil, err
|
||||||
return nil, nil, nil, combinedErrs.ErrorOrNil()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, progress.deets, progress.toMerge, progress.errs.ErrorOrNil()
|
return s, progress.deets, progress.toMerge, progress.errs.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w Wrapper) makeSnapshotWithRoot(
|
func (w Wrapper) makeSnapshotWithRoot(
|
||||||
@ -197,9 +202,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
|||||||
|
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
"using snapshots for kopia-assisted incrementals",
|
"using snapshots for kopia-assisted incrementals",
|
||||||
"snapshot_ids",
|
"snapshot_ids", snapIDs)
|
||||||
snapIDs,
|
|
||||||
)
|
|
||||||
|
|
||||||
tags := map[string]string{}
|
tags := map[string]string{}
|
||||||
|
|
||||||
@ -224,6 +227,8 @@ func (w Wrapper) makeSnapshotWithRoot(
|
|||||||
OnUpload: bc.Count,
|
OnUpload: bc.Count,
|
||||||
},
|
},
|
||||||
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||||
|
log := logger.Ctx(innerCtx)
|
||||||
|
|
||||||
si := snapshot.SourceInfo{
|
si := snapshot.SourceInfo{
|
||||||
Host: corsoHost,
|
Host: corsoHost,
|
||||||
UserName: corsoUser,
|
UserName: corsoUser,
|
||||||
@ -240,8 +245,8 @@ func (w Wrapper) makeSnapshotWithRoot(
|
|||||||
}
|
}
|
||||||
policyTree, err := policy.TreeForSourceWithOverride(innerCtx, w.c, si, errPolicy)
|
policyTree, err := policy.TreeForSourceWithOverride(innerCtx, w.c, si, errPolicy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "get policy tree")
|
err = clues.Wrap(err, "get policy tree").WithClues(ctx)
|
||||||
logger.Ctx(innerCtx).Errorw("kopia backup", err)
|
log.With("err", err).Errorw("building kopia backup", clues.InErr(err).Slice()...)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,16 +258,16 @@ func (w Wrapper) makeSnapshotWithRoot(
|
|||||||
|
|
||||||
man, err = u.Upload(innerCtx, root, policyTree, si, prevSnaps...)
|
man, err = u.Upload(innerCtx, root, policyTree, si, prevSnaps...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "uploading data")
|
err = clues.Wrap(err, "uploading data").WithClues(ctx)
|
||||||
logger.Ctx(innerCtx).Errorw("kopia backup", err)
|
log.With("err", err).Errorw("uploading kopia backup", clues.InErr(err).Slice()...)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
man.Tags = tags
|
man.Tags = tags
|
||||||
|
|
||||||
if _, err := snapshot.SaveSnapshot(innerCtx, rw, man); err != nil {
|
if _, err := snapshot.SaveSnapshot(innerCtx, rw, man); err != nil {
|
||||||
err = errors.Wrap(err, "saving snapshot")
|
err = clues.Wrap(err, "saving snapshot").WithClues(ctx)
|
||||||
logger.Ctx(innerCtx).Errorw("kopia backup", err)
|
log.With("err", err).Errorw("persisting kopia backup snapshot", clues.InErr(err).Slice()...)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,7 +277,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
|||||||
// Telling kopia to always flush may hide other errors if it fails while
|
// Telling kopia to always flush may hide other errors if it fails while
|
||||||
// flushing the write session (hence logging above).
|
// flushing the write session (hence logging above).
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "kopia backup")
|
return nil, clues.Wrap(err, "kopia backup")
|
||||||
}
|
}
|
||||||
|
|
||||||
res := manifestToStats(man, progress, bc)
|
res := manifestToStats(man, progress, bc)
|
||||||
@ -286,12 +291,15 @@ func (w Wrapper) getSnapshotRoot(
|
|||||||
) (fs.Entry, error) {
|
) (fs.Entry, error) {
|
||||||
man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID))
|
man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getting snapshot handle")
|
return nil, clues.Wrap(err, "getting snapshot handle").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man)
|
rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "getting root directory").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
return rootDirEntry, errors.Wrap(err, "getting root directory")
|
return rootDirEntry, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getItemStream looks up the item at the given path starting from snapshotRoot.
|
// getItemStream looks up the item at the given path starting from snapshotRoot.
|
||||||
@ -306,7 +314,7 @@ func getItemStream(
|
|||||||
bcounter ByteCounter,
|
bcounter ByteCounter,
|
||||||
) (data.Stream, error) {
|
) (data.Stream, error) {
|
||||||
if itemPath == nil {
|
if itemPath == nil {
|
||||||
return nil, errors.WithStack(errNoRestorePath)
|
return nil, clues.Stack(errNoRestorePath).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNestedEntry handles nil properly.
|
// GetNestedEntry handles nil properly.
|
||||||
@ -317,15 +325,15 @@ func getItemStream(
|
|||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "entry not found") {
|
if strings.Contains(err.Error(), "entry not found") {
|
||||||
err = errors.Wrap(ErrNotFound, err.Error())
|
err = clues.Stack(data.ErrNotFound, err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(err, "getting nested object handle")
|
return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
f, ok := e.(fs.File)
|
f, ok := e.(fs.File)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("requested object is not a file")
|
return nil, clues.New("requested object is not a file").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if bcounter != nil {
|
if bcounter != nil {
|
||||||
@ -334,12 +342,12 @@ func getItemStream(
|
|||||||
|
|
||||||
r, err := f.Open(ctx)
|
r, err := f.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "opening file")
|
return nil, clues.Wrap(err, "opening file").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedName, err := decodeElement(f.Name())
|
decodedName, err := decodeElement(f.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "decoding file name")
|
return nil, clues.Wrap(err, "decoding file name").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &kopiaDataStream{
|
return &kopiaDataStream{
|
||||||
@ -368,12 +376,13 @@ func (w Wrapper) RestoreMultipleItems(
|
|||||||
snapshotID string,
|
snapshotID string,
|
||||||
paths []path.Path,
|
paths []path.Path,
|
||||||
bcounter ByteCounter,
|
bcounter ByteCounter,
|
||||||
) ([]data.Collection, error) {
|
errs *fault.Errors,
|
||||||
|
) ([]data.RestoreCollection, error) {
|
||||||
ctx, end := D.Span(ctx, "kopia:restoreMultipleItems")
|
ctx, end := D.Span(ctx, "kopia:restoreMultipleItems")
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
if len(paths) == 0 {
|
if len(paths) == 0 {
|
||||||
return nil, errors.WithStack(errNoRestorePath)
|
return nil, clues.Stack(errNoRestorePath).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID)
|
snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID)
|
||||||
@ -381,40 +390,47 @@ func (w Wrapper) RestoreMultipleItems(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// Maps short ID of parent path to data collection for that folder.
|
||||||
errs *multierror.Error
|
cols := map[string]*kopiaDataCollection{}
|
||||||
// Maps short ID of parent path to data collection for that folder.
|
|
||||||
cols = map[string]*kopiaDataCollection{}
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, itemPath := range paths {
|
for _, itemPath := range paths {
|
||||||
|
if errs.Err() != nil {
|
||||||
|
return nil, errs.Err()
|
||||||
|
}
|
||||||
|
|
||||||
ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter)
|
ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, err)
|
errs.Add(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
parentPath, err := itemPath.Dir()
|
parentPath, err := itemPath.Dir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierror.Append(errs, errors.Wrap(err, "making directory collection"))
|
errs.Add(clues.Wrap(err, "making directory collection").WithClues(ctx))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
c, ok := cols[parentPath.ShortRef()]
|
c, ok := cols[parentPath.ShortRef()]
|
||||||
if !ok {
|
if !ok {
|
||||||
cols[parentPath.ShortRef()] = &kopiaDataCollection{path: parentPath}
|
cols[parentPath.ShortRef()] = &kopiaDataCollection{
|
||||||
|
path: parentPath,
|
||||||
|
snapshotRoot: snapshotRoot,
|
||||||
|
counter: bcounter,
|
||||||
|
}
|
||||||
c = cols[parentPath.ShortRef()]
|
c = cols[parentPath.ShortRef()]
|
||||||
}
|
}
|
||||||
|
|
||||||
c.streams = append(c.streams, ds)
|
c.streams = append(c.streams, ds)
|
||||||
}
|
}
|
||||||
|
|
||||||
res := make([]data.Collection, 0, len(cols))
|
// Can't use the maps package to extract the values because we need to convert
|
||||||
|
// from *kopiaDataCollection to data.RestoreCollection too.
|
||||||
|
res := make([]data.RestoreCollection, 0, len(cols))
|
||||||
for _, c := range cols {
|
for _, c := range cols {
|
||||||
res = append(res, c)
|
res = append(res, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, errs.ErrorOrNil()
|
return res, errs.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteSnapshot removes the provided manifest from kopia.
|
// DeleteSnapshot removes the provided manifest from kopia.
|
||||||
@ -425,7 +441,7 @@ func (w Wrapper) DeleteSnapshot(
|
|||||||
mid := manifest.ID(snapshotID)
|
mid := manifest.ID(snapshotID)
|
||||||
|
|
||||||
if len(mid) == 0 {
|
if len(mid) == 0 {
|
||||||
return errors.New("attempt to delete unidentified snapshot")
|
return clues.New("attempt to delete unidentified snapshot").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := repo.WriteSession(
|
err := repo.WriteSession(
|
||||||
@ -434,7 +450,7 @@ func (w Wrapper) DeleteSnapshot(
|
|||||||
repo.WriteSessionOptions{Purpose: "KopiaWrapperBackupDeletion"},
|
repo.WriteSessionOptions{Purpose: "KopiaWrapperBackupDeletion"},
|
||||||
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||||
if err := rw.DeleteManifest(ctx, mid); err != nil {
|
if err := rw.DeleteManifest(ctx, mid); err != nil {
|
||||||
return errors.Wrap(err, "deleting snapshot")
|
return clues.Wrap(err, "deleting snapshot").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -443,7 +459,7 @@ func (w Wrapper) DeleteSnapshot(
|
|||||||
// Telling kopia to always flush may hide other errors if it fails while
|
// Telling kopia to always flush may hide other errors if it fails while
|
||||||
// flushing the write session (hence logging above).
|
// flushing the write session (hence logging above).
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "kopia deleting backup manifest")
|
return clues.Wrap(err, "deleting backup manifest").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -464,7 +480,7 @@ func (w Wrapper) FetchPrevSnapshotManifests(
|
|||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
) ([]*ManifestEntry, error) {
|
) ([]*ManifestEntry, error) {
|
||||||
if w.c == nil {
|
if w.c == nil {
|
||||||
return nil, errors.WithStack(errNotConnected)
|
return nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil
|
return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -52,7 +53,7 @@ var (
|
|||||||
func testForFiles(
|
func testForFiles(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
expected map[string][]byte,
|
expected map[string][]byte,
|
||||||
collections []data.Collection,
|
collections []data.RestoreCollection,
|
||||||
) {
|
) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
@ -196,7 +197,7 @@ func (suite *KopiaIntegrationSuite) TearDownTest() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
||||||
collections := []data.Collection{
|
collections := []data.BackupCollection{
|
||||||
mockconnector.NewMockExchangeCollection(
|
mockconnector.NewMockExchangeCollection(
|
||||||
suite.testPath1,
|
suite.testPath1,
|
||||||
5,
|
5,
|
||||||
@ -269,7 +270,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
|||||||
nil,
|
nil,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
)
|
fault.New(true))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files")
|
assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files")
|
||||||
@ -353,11 +354,11 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
stats, _, _, err := w.BackupCollections(
|
stats, _, _, err := w.BackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
nil,
|
nil,
|
||||||
[]data.Collection{dc1, dc2},
|
[]data.BackupCollection{dc1, dc2},
|
||||||
nil,
|
nil,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
)
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, k.Compression(ctx, "gzip"))
|
require.NoError(t, k.Compression(ctx, "gzip"))
|
||||||
@ -374,14 +375,49 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
fp1,
|
fp1,
|
||||||
fp2,
|
fp2,
|
||||||
},
|
},
|
||||||
nil)
|
nil,
|
||||||
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 2, len(result))
|
assert.Equal(t, 2, len(result))
|
||||||
|
|
||||||
testForFiles(t, expected, result)
|
testForFiles(t, expected, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockBackupCollection struct {
|
||||||
|
path path.Path
|
||||||
|
streams []data.Stream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockBackupCollection) Items() <-chan data.Stream {
|
||||||
|
res := make(chan data.Stream)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(res)
|
||||||
|
|
||||||
|
for _, s := range c.streams {
|
||||||
|
res <- s
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c mockBackupCollection) FullPath() path.Path {
|
||||||
|
return c.path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c mockBackupCollection) PreviousPath() path.Path {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c mockBackupCollection) State() data.CollectionState {
|
||||||
|
return data.NewState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c mockBackupCollection) DoNotMergeItems() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
@ -396,8 +432,8 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
tags[k] = ""
|
tags[k] = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
collections := []data.Collection{
|
collections := []data.BackupCollection{
|
||||||
&kopiaDataCollection{
|
&mockBackupCollection{
|
||||||
path: suite.testPath1,
|
path: suite.testPath1,
|
||||||
streams: []data.Stream{
|
streams: []data.Stream{
|
||||||
&mockconnector.MockExchangeData{
|
&mockconnector.MockExchangeData{
|
||||||
@ -410,7 +446,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&kopiaDataCollection{
|
&mockBackupCollection{
|
||||||
path: suite.testPath2,
|
path: suite.testPath2,
|
||||||
streams: []data.Stream{
|
streams: []data.Stream{
|
||||||
&mockconnector.MockExchangeData{
|
&mockconnector.MockExchangeData{
|
||||||
@ -440,7 +476,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
nil,
|
nil,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
)
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, 0, stats.ErrorCount)
|
assert.Equal(t, 0, stats.ErrorCount)
|
||||||
@ -461,11 +497,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
|||||||
string(stats.SnapshotID),
|
string(stats.SnapshotID),
|
||||||
[]path.Path{failedPath},
|
[]path.Path{failedPath},
|
||||||
&ic,
|
&ic,
|
||||||
)
|
fault.New(true))
|
||||||
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
||||||
// may run into kopia-assisted incrementals issues because only mod time and
|
// may run into kopia-assisted incrementals issues because only mod time and
|
||||||
// not file size is checked for StreamingFiles.
|
// not file size is checked for StreamingFiles.
|
||||||
assert.ErrorIs(t, err, ErrNotFound, "errored file is restorable")
|
assert.ErrorIs(t, err, data.ErrNotFound, "errored file is restorable")
|
||||||
}
|
}
|
||||||
|
|
||||||
type backedupFile struct {
|
type backedupFile struct {
|
||||||
@ -477,7 +513,7 @@ type backedupFile struct {
|
|||||||
func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() {
|
func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
collections []data.Collection
|
collections []data.BackupCollection
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "NilCollections",
|
name: "NilCollections",
|
||||||
@ -485,7 +521,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "EmptyCollections",
|
name: "EmptyCollections",
|
||||||
collections: []data.Collection{},
|
collections: []data.BackupCollection{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -501,7 +537,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
)
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, BackupStats{}, *s)
|
assert.Equal(t, BackupStats{}, *s)
|
||||||
@ -624,10 +660,10 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
|
|
||||||
suite.w = &Wrapper{c}
|
suite.w = &Wrapper{c}
|
||||||
|
|
||||||
collections := []data.Collection{}
|
collections := []data.BackupCollection{}
|
||||||
|
|
||||||
for _, parent := range []path.Path{suite.testPath1, suite.testPath2} {
|
for _, parent := range []path.Path{suite.testPath1, suite.testPath2} {
|
||||||
collection := &kopiaDataCollection{path: parent}
|
collection := &mockBackupCollection{path: parent}
|
||||||
|
|
||||||
for _, item := range suite.files[parent.String()] {
|
for _, item := range suite.files[parent.String()] {
|
||||||
collection.streams = append(
|
collection.streams = append(
|
||||||
@ -660,7 +696,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
nil,
|
nil,
|
||||||
tags,
|
tags,
|
||||||
false,
|
false,
|
||||||
)
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, stats.ErrorCount, 0)
|
require.Equal(t, stats.ErrorCount, 0)
|
||||||
require.Equal(t, stats.TotalFileCount, expectedFiles)
|
require.Equal(t, stats.TotalFileCount, expectedFiles)
|
||||||
@ -723,7 +759,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
excludeItem bool
|
excludeItem bool
|
||||||
expectedCachedItems int
|
expectedCachedItems int
|
||||||
expectedUncachedItems int
|
expectedUncachedItems int
|
||||||
cols func() []data.Collection
|
cols func() []data.BackupCollection
|
||||||
backupIDCheck require.ValueAssertionFunc
|
backupIDCheck require.ValueAssertionFunc
|
||||||
restoreCheck assert.ErrorAssertionFunc
|
restoreCheck assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
@ -732,7 +768,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
excludeItem: true,
|
excludeItem: true,
|
||||||
expectedCachedItems: len(suite.filesByPath) - 1,
|
expectedCachedItems: len(suite.filesByPath) - 1,
|
||||||
expectedUncachedItems: 0,
|
expectedUncachedItems: 0,
|
||||||
cols: func() []data.Collection {
|
cols: func() []data.BackupCollection {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
backupIDCheck: require.NotEmpty,
|
backupIDCheck: require.NotEmpty,
|
||||||
@ -743,7 +779,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
// No snapshot should be made since there were no changes.
|
// No snapshot should be made since there were no changes.
|
||||||
expectedCachedItems: 0,
|
expectedCachedItems: 0,
|
||||||
expectedUncachedItems: 0,
|
expectedUncachedItems: 0,
|
||||||
cols: func() []data.Collection {
|
cols: func() []data.BackupCollection {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
// Backup doesn't run.
|
// Backup doesn't run.
|
||||||
@ -753,14 +789,14 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
name: "NoExcludeItemWithChanges",
|
name: "NoExcludeItemWithChanges",
|
||||||
expectedCachedItems: len(suite.filesByPath),
|
expectedCachedItems: len(suite.filesByPath),
|
||||||
expectedUncachedItems: 1,
|
expectedUncachedItems: 1,
|
||||||
cols: func() []data.Collection {
|
cols: func() []data.BackupCollection {
|
||||||
c := mockconnector.NewMockExchangeCollection(
|
c := mockconnector.NewMockExchangeCollection(
|
||||||
suite.testPath1,
|
suite.testPath1,
|
||||||
1,
|
1,
|
||||||
)
|
)
|
||||||
c.ColState = data.NotMovedState
|
c.ColState = data.NotMovedState
|
||||||
|
|
||||||
return []data.Collection{c}
|
return []data.BackupCollection{c}
|
||||||
},
|
},
|
||||||
backupIDCheck: require.NotEmpty,
|
backupIDCheck: require.NotEmpty,
|
||||||
restoreCheck: assert.NoError,
|
restoreCheck: assert.NoError,
|
||||||
@ -790,7 +826,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
excluded,
|
excluded,
|
||||||
tags,
|
tags,
|
||||||
true,
|
true,
|
||||||
)
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, test.expectedCachedItems, stats.CachedFileCount)
|
assert.Equal(t, test.expectedCachedItems, stats.CachedFileCount)
|
||||||
assert.Equal(t, test.expectedUncachedItems, stats.UncachedFileCount)
|
assert.Equal(t, test.expectedUncachedItems, stats.UncachedFileCount)
|
||||||
@ -810,7 +846,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
suite.files[suite.testPath1.String()][0].itemPath,
|
suite.files[suite.testPath1.String()][0].itemPath,
|
||||||
},
|
},
|
||||||
&ic,
|
&ic,
|
||||||
)
|
fault.New(true))
|
||||||
test.restoreCheck(t, err)
|
test.restoreCheck(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -867,7 +903,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
|||||||
suite.testPath1,
|
suite.testPath1,
|
||||||
suite.files[suite.testPath2.String()][0].itemPath,
|
suite.files[suite.testPath2.String()][0].itemPath,
|
||||||
},
|
},
|
||||||
expectedCollections: 2,
|
expectedCollections: 0,
|
||||||
expectedErr: assert.Error,
|
expectedErr: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -877,7 +913,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
|||||||
doesntExist,
|
doesntExist,
|
||||||
suite.files[suite.testPath2.String()][0].itemPath,
|
suite.files[suite.testPath2.String()][0].itemPath,
|
||||||
},
|
},
|
||||||
expectedCollections: 2,
|
expectedCollections: 0,
|
||||||
expectedErr: assert.Error,
|
expectedErr: assert.Error,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -904,9 +940,14 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
|||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(suite.snapshotID),
|
string(suite.snapshotID),
|
||||||
test.inputPaths,
|
test.inputPaths,
|
||||||
&ic)
|
&ic,
|
||||||
|
fault.New(true))
|
||||||
test.expectedErr(t, err)
|
test.expectedErr(t, err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
assert.Len(t, result, test.expectedCollections)
|
assert.Len(t, result, test.expectedCollections)
|
||||||
assert.Less(t, int64(0), ic.i)
|
assert.Less(t, int64(0), ic.i)
|
||||||
testForFiles(t, expected, result)
|
testForFiles(t, expected, result)
|
||||||
@ -946,7 +987,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems_Errors()
|
|||||||
suite.ctx,
|
suite.ctx,
|
||||||
test.snapshotID,
|
test.snapshotID,
|
||||||
test.paths,
|
test.paths,
|
||||||
nil)
|
nil,
|
||||||
|
fault.New(true))
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Empty(t, c)
|
assert.Empty(t, c)
|
||||||
})
|
})
|
||||||
@ -966,7 +1008,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestDeleteSnapshot() {
|
|||||||
suite.ctx,
|
suite.ctx,
|
||||||
string(suite.snapshotID),
|
string(suite.snapshotID),
|
||||||
[]path.Path{itemPath},
|
[]path.Path{itemPath},
|
||||||
&ic)
|
&ic,
|
||||||
|
fault.New(true))
|
||||||
assert.Error(t, err, "snapshot should be deleted")
|
assert.Error(t, err, "snapshot should be deleted")
|
||||||
assert.Empty(t, c)
|
assert.Empty(t, c)
|
||||||
assert.Zero(t, ic.i)
|
assert.Zero(t, ic.i)
|
||||||
|
|||||||
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -100,7 +101,7 @@ type backupStats struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type detailsWriter interface {
|
type detailsWriter interface {
|
||||||
WriteBackupDetails(context.Context, *details.Details) (string, error)
|
WriteBackupDetails(context.Context, *details.Details, *fault.Errors) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -238,12 +239,12 @@ func (op *BackupOperation) do(
|
|||||||
return nil, errors.Wrap(err, "producing manifests and metadata")
|
return nil, errors.Wrap(err, "producing manifests and metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
gc, err := connectToM365(ctx, op.Selectors, op.account)
|
gc, err := connectToM365(ctx, op.Selectors, op.account, op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "connectng to m365")
|
return nil, errors.Wrap(err, "connectng to m365")
|
||||||
}
|
}
|
||||||
|
|
||||||
cs, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options)
|
cs, excludes, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "producing backup data collections")
|
return nil, errors.Wrap(err, "producing backup data collections")
|
||||||
}
|
}
|
||||||
@ -257,8 +258,10 @@ func (op *BackupOperation) do(
|
|||||||
reasons,
|
reasons,
|
||||||
mans,
|
mans,
|
||||||
cs,
|
cs,
|
||||||
|
excludes,
|
||||||
backupID,
|
backupID,
|
||||||
op.incremental && canUseMetaData)
|
op.incremental && canUseMetaData,
|
||||||
|
op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "persisting collection backups")
|
return nil, errors.Wrap(err, "persisting collection backups")
|
||||||
}
|
}
|
||||||
@ -271,7 +274,8 @@ func (op *BackupOperation) do(
|
|||||||
detailsStore,
|
detailsStore,
|
||||||
mans,
|
mans,
|
||||||
toMerge,
|
toMerge,
|
||||||
deets)
|
deets,
|
||||||
|
op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "merging details")
|
return nil, errors.Wrap(err, "merging details")
|
||||||
}
|
}
|
||||||
@ -307,9 +311,9 @@ func produceBackupDataCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
gc *connector.GraphConnector,
|
gc *connector.GraphConnector,
|
||||||
sel selectors.Selector,
|
sel selectors.Selector,
|
||||||
metadata []data.Collection,
|
metadata []data.RestoreCollection,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup"))
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup"))
|
||||||
defer func() {
|
defer func() {
|
||||||
complete <- struct{}{}
|
complete <- struct{}{}
|
||||||
@ -317,11 +321,9 @@ func produceBackupDataCollections(
|
|||||||
closer()
|
closer()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// TODO(ashmrtn): When we're ready to wire up the global exclude list return
|
cols, excludes, errs := gc.DataCollections(ctx, sel, metadata, ctrlOpts)
|
||||||
// all values.
|
|
||||||
cols, _, errs := gc.DataCollections(ctx, sel, metadata, ctrlOpts)
|
|
||||||
|
|
||||||
return cols, errs
|
return cols, excludes, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -332,10 +334,11 @@ type backuper interface {
|
|||||||
BackupCollections(
|
BackupCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bases []kopia.IncrementalBase,
|
bases []kopia.IncrementalBase,
|
||||||
cs []data.Collection,
|
cs []data.BackupCollection,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
buildTreeWithBase bool,
|
buildTreeWithBase bool,
|
||||||
|
errs *fault.Errors,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error)
|
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -390,9 +393,11 @@ func consumeBackupDataCollections(
|
|||||||
tenantID string,
|
tenantID string,
|
||||||
reasons []kopia.Reason,
|
reasons []kopia.Reason,
|
||||||
mans []*kopia.ManifestEntry,
|
mans []*kopia.ManifestEntry,
|
||||||
cs []data.Collection,
|
cs []data.BackupCollection,
|
||||||
|
excludes map[string]struct{},
|
||||||
backupID model.StableID,
|
backupID model.StableID,
|
||||||
isIncremental bool,
|
isIncremental bool,
|
||||||
|
errs *fault.Errors,
|
||||||
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
|
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
|
||||||
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data"))
|
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data"))
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -456,9 +461,12 @@ func consumeBackupDataCollections(
|
|||||||
ctx,
|
ctx,
|
||||||
bases,
|
bases,
|
||||||
cs,
|
cs,
|
||||||
|
// TODO(ashmrtn): When we're ready to enable incremental backups for
|
||||||
|
// OneDrive replace this with `excludes`.
|
||||||
nil,
|
nil,
|
||||||
tags,
|
tags,
|
||||||
isIncremental)
|
isIncremental,
|
||||||
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if kopiaStats == nil {
|
if kopiaStats == nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
@ -498,6 +506,7 @@ func mergeDetails(
|
|||||||
mans []*kopia.ManifestEntry,
|
mans []*kopia.ManifestEntry,
|
||||||
shortRefsFromPrevBackup map[string]path.Path,
|
shortRefsFromPrevBackup map[string]path.Path,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
|
errs *fault.Errors,
|
||||||
) error {
|
) error {
|
||||||
// Don't bother loading any of the base details if there's nothing we need to
|
// Don't bother loading any of the base details if there's nothing we need to
|
||||||
// merge.
|
// merge.
|
||||||
@ -527,7 +536,8 @@ func mergeDetails(
|
|||||||
ctx,
|
ctx,
|
||||||
model.StableID(bID),
|
model.StableID(bID),
|
||||||
ms,
|
ms,
|
||||||
detailsStore)
|
detailsStore,
|
||||||
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.New("fetching base details for backup").WithClues(mctx)
|
return clues.New("fetching base details for backup").WithClues(mctx)
|
||||||
}
|
}
|
||||||
@ -648,7 +658,7 @@ func (op *BackupOperation) createBackupModels(
|
|||||||
return clues.New("no backup details to record").WithClues(ctx)
|
return clues.New("no backup details to record").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails)
|
detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails, op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Wrap(err, "creating backupDetails model").WithClues(ctx)
|
return clues.Wrap(err, "creating backupDetails model").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/store"
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
@ -250,7 +251,7 @@ func checkMetadataFilesExist(
|
|||||||
pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName)
|
pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName)
|
||||||
}
|
}
|
||||||
|
|
||||||
cols, err := kw.RestoreMultipleItems(ctx, bup.SnapshotID, paths, nil)
|
cols, err := kw.RestoreMultipleItems(ctx, bup.SnapshotID, paths, nil, fault.New(true))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
for _, col := range cols {
|
for _, col := range cols {
|
||||||
@ -346,8 +347,7 @@ func generateContainerOfItems(
|
|||||||
sel,
|
sel,
|
||||||
dest,
|
dest,
|
||||||
control.Options{RestorePermissions: true},
|
control.Options{RestorePermissions: true},
|
||||||
dataColls,
|
dataColls)
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return deets
|
return deets
|
||||||
@ -387,10 +387,10 @@ func buildCollections(
|
|||||||
tenant, user string,
|
tenant, user string,
|
||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
colls []incrementalCollection,
|
colls []incrementalCollection,
|
||||||
) []data.Collection {
|
) []data.RestoreCollection {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
collections := make([]data.Collection, 0, len(colls))
|
collections := make([]data.RestoreCollection, 0, len(colls))
|
||||||
|
|
||||||
for _, c := range colls {
|
for _, c := range colls {
|
||||||
pth := toDataLayerPath(
|
pth := toDataLayerPath(
|
||||||
@ -409,7 +409,7 @@ func buildCollections(
|
|||||||
mc.Data[i] = c.items[i].data
|
mc.Data[i] = c.items[i].data
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, mc)
|
collections = append(collections, data.NotFoundRestoreCollection{Collection: mc})
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections
|
return collections
|
||||||
@ -669,7 +669,12 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
m365, err := acct.M365Config()
|
m365, err := acct.M365Config()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
gc, err := connector.NewGraphConnector(
|
||||||
|
ctx,
|
||||||
|
graph.HTTPClient(graph.NoTimeout()),
|
||||||
|
acct,
|
||||||
|
connector.Users,
|
||||||
|
fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ac, err := api.NewClient(m365)
|
ac, err := api.NewClient(m365)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user