Merge branch 'main' into sharepoint-restore-selectors
This commit is contained in:
commit
8d5ee37c53
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@ -501,7 +501,7 @@ jobs:
|
||||
|
||||
# deploy the image
|
||||
- name: Build image and push to GitHub Container Registry
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ./build/Dockerfile
|
||||
|
||||
17
CHANGELOG.md
17
CHANGELOG.md
@ -6,6 +6,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased] (alpha)
|
||||
### Added
|
||||
|
||||
### Fixed
|
||||
- Support for item.Attachment:Mail restore
|
||||
|
||||
### Changed
|
||||
|
||||
### Known Issues
|
||||
- Nested attachments are currently not restored due to an [issue](https://github.com/microsoft/kiota-serialization-json-go/issues/61) discovered in the Graph APIs
|
||||
|
||||
## [v0.3.0] (alpha) - 2023-2-07
|
||||
|
||||
### Added
|
||||
|
||||
@ -17,9 +28,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Add versions to backups so that we can understand/handle older backup formats
|
||||
|
||||
### Fixed
|
||||
- Backing up a calendar that has the same name as the default calendar
|
||||
|
||||
- Added additional backoff-retry to all OneDrive queries.
|
||||
- Users with `null` userType values are no longer excluded from user queries.
|
||||
- Fix bug when backing up a calendar that has the same name as the default calendar
|
||||
|
||||
### Known Issues
|
||||
|
||||
@ -156,7 +168,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Miscellaneous
|
||||
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
||||
|
||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.2.0...HEAD
|
||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.3.0...HEAD
|
||||
[v0.3.0]: https://github.com/alcionai/corso/compare/v0.2.0...v0.3.0
|
||||
[v0.2.0]: https://github.com/alcionai/corso/compare/v0.1.0...v0.2.0
|
||||
[v0.1.0]: https://github.com/alcionai/corso/compare/v0.0.4...v0.1.0
|
||||
[v0.0.4]: https://github.com/alcionai/corso/compare/v0.0.3...v0.0.4
|
||||
|
||||
@ -6,7 +6,7 @@ COPY src .
|
||||
ARG CORSO_BUILD_LDFLAGS=""
|
||||
RUN go build -o corso -ldflags "$CORSO_BUILD_LDFLAGS"
|
||||
|
||||
FROM alpine:3.16
|
||||
FROM alpine:3.17
|
||||
|
||||
LABEL org.opencontainers.image.title="Corso"
|
||||
LABEL org.opencontainers.image.description="Free, Secure, and Open-Source Backup for Microsoft 365"
|
||||
|
||||
@ -15,6 +15,11 @@ lint: check-lint-version
|
||||
golangci-lint run
|
||||
staticcheck ./...
|
||||
|
||||
fmt:
|
||||
gofumpt -w .
|
||||
goimports -w .
|
||||
gci write --skip-generated -s 'standard,default,prefix(github.com/alcionai/corso)' .
|
||||
|
||||
check-lint-version: check-lint
|
||||
@if [ "$(LINT_VERSION)" != "$(WANTED_LINT_VERSION)" ]; then \
|
||||
echo >&2 $(BAD_LINT_MSG); \
|
||||
@ -74,4 +79,4 @@ load-test:
|
||||
-mutexprofile=mutex.prof \
|
||||
-trace=trace.out \
|
||||
-outputdir=test_results \
|
||||
./pkg/repository/repository_load_test.go
|
||||
./pkg/repository/loadtest/repository_load_test.go
|
||||
@ -12,10 +12,11 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/options"
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -272,20 +273,23 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
sel := exchangeBackupCreateSelectors(user, exchangeData)
|
||||
|
||||
users, err := m365.UserPNs(ctx, acct)
|
||||
// TODO: log/print recoverable errors
|
||||
errs := fault.New(false)
|
||||
|
||||
users, err := m365.UserPNs(ctx, acct, errs)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 user(s)"))
|
||||
}
|
||||
|
||||
var (
|
||||
errs *multierror.Error
|
||||
bIDs []model.StableID
|
||||
merrs *multierror.Error
|
||||
bIDs []model.StableID
|
||||
)
|
||||
|
||||
for _, discSel := range sel.SplitByResourceOwner(users) {
|
||||
bo, err := r.NewBackup(ctx, discSel.Selector)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, errors.Wrapf(
|
||||
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||
err,
|
||||
"Failed to initialize Exchange backup for user %s",
|
||||
discSel.DiscreteOwner,
|
||||
@ -296,7 +300,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
err = bo.Run(ctx)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, errors.Wrapf(
|
||||
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||
err,
|
||||
"Failed to run Exchange backup for user %s",
|
||||
discSel.DiscreteOwner,
|
||||
@ -308,30 +312,31 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
bIDs = append(bIDs, bo.Results.BackupID)
|
||||
}
|
||||
|
||||
bups, err := r.Backups(ctx, bIDs)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage"))
|
||||
bups, ferrs := r.Backups(ctx, bIDs)
|
||||
// TODO: print/log recoverable errors
|
||||
if ferrs.Err() != nil {
|
||||
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
||||
}
|
||||
|
||||
backup.PrintAll(ctx, bups)
|
||||
|
||||
if e := errs.ErrorOrNil(); e != nil {
|
||||
if e := merrs.ErrorOrNil(); e != nil {
|
||||
return Only(ctx, e)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func exchangeBackupCreateSelectors(userIDs, data []string) *selectors.ExchangeBackup {
|
||||
func exchangeBackupCreateSelectors(userIDs, cats []string) *selectors.ExchangeBackup {
|
||||
sel := selectors.NewExchangeBackup(userIDs)
|
||||
|
||||
if len(data) == 0 {
|
||||
if len(cats) == 0 {
|
||||
sel.Include(sel.ContactFolders(selectors.Any()))
|
||||
sel.Include(sel.MailFolders(selectors.Any()))
|
||||
sel.Include(sel.EventCalendars(selectors.Any()))
|
||||
}
|
||||
|
||||
for _, d := range data {
|
||||
for _, d := range cats {
|
||||
switch d {
|
||||
case dataContacts:
|
||||
sel.Include(sel.ContactFolders(selectors.Any()))
|
||||
@ -345,12 +350,12 @@ func exchangeBackupCreateSelectors(userIDs, data []string) *selectors.ExchangeBa
|
||||
return sel
|
||||
}
|
||||
|
||||
func validateExchangeBackupCreateFlags(userIDs, data []string) error {
|
||||
func validateExchangeBackupCreateFlags(userIDs, cats []string) error {
|
||||
if len(userIDs) == 0 {
|
||||
return errors.New("--user requires one or more email addresses or the wildcard '*'")
|
||||
}
|
||||
|
||||
for _, d := range data {
|
||||
for _, d := range cats {
|
||||
if d != dataContacts && d != dataEmail && d != dataEvents {
|
||||
return errors.New(
|
||||
d + " is an unrecognized data type; must be one of " + dataContacts + ", " + dataEmail + ", or " + dataEvents)
|
||||
@ -393,7 +398,7 @@ func listExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
if len(backupID) > 0 {
|
||||
b, err := r.Backup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
||||
}
|
||||
|
||||
@ -486,6 +491,8 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// runDetailsExchangeCmd actually performs the lookup in backup details.
|
||||
// the fault.Errors return is always non-nil. Callers should check if
|
||||
// errs.Err() == nil.
|
||||
func runDetailsExchangeCmd(
|
||||
ctx context.Context,
|
||||
r repository.BackupGetter,
|
||||
@ -496,19 +503,20 @@ func runDetailsExchangeCmd(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, _, err := r.BackupDetails(ctx, backupID)
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Err() != nil {
|
||||
if errors.Is(errs.Err(), data.ErrNotFound) {
|
||||
return nil, errors.Errorf("No backup exists with the id %s", backupID)
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "Failed to get backup details in the repository")
|
||||
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||
|
||||
return sel.Reduce(ctx, d), nil
|
||||
return sel.Reduce(ctx, d, errs), nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -296,8 +296,9 @@ func (suite *PreparedBackupExchangeIntegrationSuite) SetupSuite() {
|
||||
b, err := suite.repo.Backup(ctx, bop.Results.BackupID)
|
||||
require.NoError(t, err, "retrieving recent backup by ID")
|
||||
require.Equal(t, bIDs, string(b.ID), "repo backup matches results id")
|
||||
_, b, err = suite.repo.BackupDetails(ctx, bIDs)
|
||||
require.NoError(t, err, "retrieving recent backup details by ID")
|
||||
_, b, errs := suite.repo.BackupDetails(ctx, bIDs)
|
||||
require.NoError(t, errs.Err(), "retrieving recent backup details by ID")
|
||||
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID")
|
||||
require.Equal(t, bIDs, string(b.ID), "repo details matches results id")
|
||||
|
||||
suite.backupOps[set] = string(b.ID)
|
||||
@ -396,8 +397,9 @@ func (suite *PreparedBackupExchangeIntegrationSuite) TestExchangeDetailsCmd() {
|
||||
bID := suite.backupOps[set]
|
||||
|
||||
// fetch the details from the repo first
|
||||
deets, _, err := suite.repo.BackupDetails(ctx, string(bID))
|
||||
require.NoError(t, err)
|
||||
deets, _, errs := suite.repo.BackupDetails(ctx, string(bID))
|
||||
require.NoError(t, errs.Err())
|
||||
require.Empty(t, errs.Errs())
|
||||
|
||||
cmd := tester.StubRootCmd(
|
||||
"backup", "details", "exchange",
|
||||
|
||||
@ -223,33 +223,13 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectors() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
test.Opts)
|
||||
assert.NoError(t, err, "failure")
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadBackupID() {
|
||||
t := suite.T()
|
||||
ctx, flush := tester.NewContext()
|
||||
backupGetter := &testdata.MockBackupGetter{}
|
||||
|
||||
defer flush()
|
||||
|
||||
output, err := runDetailsExchangeCmd(
|
||||
ctx,
|
||||
backupGetter,
|
||||
"backup-ID",
|
||||
utils.ExchangeOpts{},
|
||||
)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Empty(t, output)
|
||||
}
|
||||
|
||||
func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
@ -260,10 +240,8 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
)
|
||||
|
||||
assert.Error(t, err)
|
||||
test.Opts)
|
||||
assert.Error(t, err, "failure")
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
}
|
||||
|
||||
@ -12,10 +12,11 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/options"
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -195,20 +196,23 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
sel := oneDriveBackupCreateSelectors(user)
|
||||
|
||||
users, err := m365.UserPNs(ctx, acct)
|
||||
// TODO: log/print recoverable errors
|
||||
errs := fault.New(false)
|
||||
|
||||
users, err := m365.UserPNs(ctx, acct, errs)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrap(err, "Failed to retrieve M365 users"))
|
||||
}
|
||||
|
||||
var (
|
||||
errs *multierror.Error
|
||||
bIDs []model.StableID
|
||||
merrs *multierror.Error
|
||||
bIDs []model.StableID
|
||||
)
|
||||
|
||||
for _, discSel := range sel.SplitByResourceOwner(users) {
|
||||
bo, err := r.NewBackup(ctx, discSel.Selector)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, errors.Wrapf(
|
||||
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||
err,
|
||||
"Failed to initialize OneDrive backup for user %s",
|
||||
discSel.DiscreteOwner,
|
||||
@ -219,7 +223,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
err = bo.Run(ctx)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, errors.Wrapf(
|
||||
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||
err,
|
||||
"Failed to run OneDrive backup for user %s",
|
||||
discSel.DiscreteOwner,
|
||||
@ -231,14 +235,15 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
bIDs = append(bIDs, bo.Results.BackupID)
|
||||
}
|
||||
|
||||
bups, err := r.Backups(ctx, bIDs)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage"))
|
||||
bups, ferrs := r.Backups(ctx, bIDs)
|
||||
// TODO: print/log recoverable errors
|
||||
if ferrs.Err() != nil {
|
||||
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
||||
}
|
||||
|
||||
backup.PrintAll(ctx, bups)
|
||||
|
||||
if e := errs.ErrorOrNil(); e != nil {
|
||||
if e := merrs.ErrorOrNil(); e != nil {
|
||||
return Only(ctx, e)
|
||||
}
|
||||
|
||||
@ -293,7 +298,7 @@ func listOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
if len(backupID) > 0 {
|
||||
b, err := r.Backup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
||||
}
|
||||
|
||||
@ -378,6 +383,8 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// runDetailsOneDriveCmd actually performs the lookup in backup details.
|
||||
// the fault.Errors return is always non-nil. Callers should check if
|
||||
// errs.Err() == nil.
|
||||
func runDetailsOneDriveCmd(
|
||||
ctx context.Context,
|
||||
r repository.BackupGetter,
|
||||
@ -388,19 +395,20 @@ func runDetailsOneDriveCmd(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, _, err := r.BackupDetails(ctx, backupID)
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Err() != nil {
|
||||
if errors.Is(errs.Err(), data.ErrNotFound) {
|
||||
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "Failed to get backup details in the repository")
|
||||
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||
|
||||
return sel.Reduce(ctx, d), nil
|
||||
return sel.Reduce(ctx, d, errs), nil
|
||||
}
|
||||
|
||||
// `corso backup delete onedrive [<flag>...]`
|
||||
|
||||
@ -98,10 +98,8 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectors() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
)
|
||||
test.Opts)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
})
|
||||
}
|
||||
@ -117,9 +115,7 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
)
|
||||
|
||||
test.Opts)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
|
||||
@ -14,10 +14,11 @@ import (
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -210,7 +211,10 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites)
|
||||
// TODO: log/print recoverable errors
|
||||
errs := fault.New(false)
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Sites, errs)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs"))
|
||||
}
|
||||
@ -221,14 +225,14 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
var (
|
||||
errs *multierror.Error
|
||||
bIDs []model.StableID
|
||||
merrs *multierror.Error
|
||||
bIDs []model.StableID
|
||||
)
|
||||
|
||||
for _, discSel := range sel.SplitByResourceOwner(gc.GetSiteIDs()) {
|
||||
bo, err := r.NewBackup(ctx, discSel.Selector)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, errors.Wrapf(
|
||||
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||
err,
|
||||
"Failed to initialize SharePoint backup for site %s",
|
||||
discSel.DiscreteOwner,
|
||||
@ -239,7 +243,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
err = bo.Run(ctx)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, errors.Wrapf(
|
||||
merrs = multierror.Append(merrs, errors.Wrapf(
|
||||
err,
|
||||
"Failed to run SharePoint backup for site %s",
|
||||
discSel.DiscreteOwner,
|
||||
@ -251,21 +255,22 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
bIDs = append(bIDs, bo.Results.BackupID)
|
||||
}
|
||||
|
||||
bups, err := r.Backups(ctx, bIDs)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrap(err, "Unable to retrieve backup results from storage"))
|
||||
bups, ferrs := r.Backups(ctx, bIDs)
|
||||
// TODO: print/log recoverable errors
|
||||
if ferrs.Err() != nil {
|
||||
return Only(ctx, errors.Wrap(ferrs.Err(), "Unable to retrieve backup results from storage"))
|
||||
}
|
||||
|
||||
backup.PrintAll(ctx, bups)
|
||||
|
||||
if e := errs.ErrorOrNil(); e != nil {
|
||||
if e := merrs.ErrorOrNil(); e != nil {
|
||||
return Only(ctx, e)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateSharePointBackupCreateFlags(sites, weburls, data []string) error {
|
||||
func validateSharePointBackupCreateFlags(sites, weburls, cats []string) error {
|
||||
if len(sites) == 0 && len(weburls) == 0 {
|
||||
return errors.New(
|
||||
"requires one or more --" +
|
||||
@ -275,7 +280,7 @@ func validateSharePointBackupCreateFlags(sites, weburls, data []string) error {
|
||||
)
|
||||
}
|
||||
|
||||
for _, d := range data {
|
||||
for _, d := range cats {
|
||||
if d != dataLibraries && d != dataPages {
|
||||
return errors.New(
|
||||
d + " is an unrecognized data type; either " + dataLibraries + "or " + dataPages,
|
||||
@ -289,7 +294,7 @@ func validateSharePointBackupCreateFlags(sites, weburls, data []string) error {
|
||||
// TODO: users might specify a data type, this only supports AllData().
|
||||
func sharePointBackupCreateSelectors(
|
||||
ctx context.Context,
|
||||
sites, weburls, data []string,
|
||||
sites, weburls, cats []string,
|
||||
gc *connector.GraphConnector,
|
||||
) (*selectors.SharePointBackup, error) {
|
||||
if len(sites) == 0 && len(weburls) == 0 {
|
||||
@ -314,19 +319,22 @@ func sharePointBackupCreateSelectors(
|
||||
}
|
||||
}
|
||||
|
||||
union, err := gc.UnionSiteIDsAndWebURLs(ctx, sites, weburls)
|
||||
// TODO: log/print recoverable errors
|
||||
errs := fault.New(false)
|
||||
|
||||
union, err := gc.UnionSiteIDsAndWebURLs(ctx, sites, weburls, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sel := selectors.NewSharePointBackup(union)
|
||||
if len(data) == 0 {
|
||||
if len(cats) == 0 {
|
||||
sel.Include(sel.AllData())
|
||||
|
||||
return sel, nil
|
||||
}
|
||||
|
||||
for _, d := range data {
|
||||
for _, d := range cats {
|
||||
switch d {
|
||||
case dataLibraries:
|
||||
sel.Include(sel.Libraries(selectors.Any()))
|
||||
@ -371,7 +379,7 @@ func listSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
if len(backupID) > 0 {
|
||||
b, err := r.Backup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return Only(ctx, errors.Errorf("No backup exists with the id %s", backupID))
|
||||
}
|
||||
|
||||
@ -497,6 +505,8 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// runDetailsSharePointCmd actually performs the lookup in backup details.
|
||||
// the fault.Errors return is always non-nil. Callers should check if
|
||||
// errs.Err() == nil.
|
||||
func runDetailsSharePointCmd(
|
||||
ctx context.Context,
|
||||
r repository.BackupGetter,
|
||||
@ -507,17 +517,18 @@ func runDetailsSharePointCmd(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, _, err := r.BackupDetails(ctx, backupID)
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
d, _, errs := r.BackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Err() != nil {
|
||||
if errors.Is(errs.Err(), data.ErrNotFound) {
|
||||
return nil, errors.Errorf("no backup exists with the id %s", backupID)
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "Failed to get backup details in the repository")
|
||||
return nil, errors.Wrap(errs.Err(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
|
||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||
|
||||
return sel.Reduce(ctx, d), nil
|
||||
return sel.Reduce(ctx, d, errs), nil
|
||||
}
|
||||
|
||||
@ -213,10 +213,8 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectors() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
)
|
||||
test.Opts)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
})
|
||||
}
|
||||
@ -232,9 +230,7 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts,
|
||||
)
|
||||
|
||||
test.Opts)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
)
|
||||
@ -228,7 +228,7 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
ds, err := ro.Run(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
||||
}
|
||||
|
||||
|
||||
@ -110,8 +110,10 @@ func (suite *RestoreExchangeIntegrationSuite) SetupSuite() {
|
||||
// sanity check, ensure we can find the backup and its details immediately
|
||||
_, err = suite.repo.Backup(ctx, bop.Results.BackupID)
|
||||
require.NoError(t, err, "retrieving recent backup by ID")
|
||||
_, _, err = suite.repo.BackupDetails(ctx, string(bop.Results.BackupID))
|
||||
require.NoError(t, err, "retrieving recent backup details by ID")
|
||||
|
||||
_, _, errs := suite.repo.BackupDetails(ctx, string(bop.Results.BackupID))
|
||||
require.NoError(t, errs.Err(), "retrieving recent backup details by ID")
|
||||
require.Empty(t, errs.Errs(), "retrieving recent backup details by ID")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
)
|
||||
@ -171,7 +171,7 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
ds, err := ro.Run(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
||||
}
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
)
|
||||
@ -182,7 +182,7 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
ds, err := ro.Run(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, kopia.ErrNotFound) {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
|
||||
}
|
||||
|
||||
|
||||
14
src/cli/utils/testdata/opts.go
vendored
14
src/cli/utils/testdata/opts.go
vendored
@ -10,6 +10,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
@ -497,8 +498,11 @@ func (MockBackupGetter) Backup(
|
||||
return nil, errors.New("unexpected call to mock")
|
||||
}
|
||||
|
||||
func (MockBackupGetter) Backups(context.Context, []model.StableID) ([]*backup.Backup, error) {
|
||||
return nil, errors.New("unexpected call to mock")
|
||||
func (MockBackupGetter) Backups(
|
||||
context.Context,
|
||||
[]model.StableID,
|
||||
) ([]*backup.Backup, *fault.Errors) {
|
||||
return nil, fault.New(false).Fail(errors.New("unexpected call to mock"))
|
||||
}
|
||||
|
||||
func (MockBackupGetter) BackupsByTag(
|
||||
@ -511,10 +515,10 @@ func (MockBackupGetter) BackupsByTag(
|
||||
func (bg *MockBackupGetter) BackupDetails(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*details.Details, *backup.Backup, error) {
|
||||
) (*details.Details, *backup.Backup, *fault.Errors) {
|
||||
if bg == nil {
|
||||
return testdata.GetDetailsSet(), nil, nil
|
||||
return testdata.GetDetailsSet(), nil, fault.New(true)
|
||||
}
|
||||
|
||||
return nil, nil, errors.New("unexpected call to mock")
|
||||
return nil, nil, fault.New(false).Fail(errors.New("unexpected call to mock"))
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
@ -114,7 +115,10 @@ func getGCAndVerifyUser(ctx context.Context, userID string) (*connector.GraphCon
|
||||
}
|
||||
|
||||
// build a graph connector
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||
// TODO: log/print recoverable errors
|
||||
errs := fault.New(false)
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
||||
if err != nil {
|
||||
return nil, account.Account{}, errors.Wrap(err, "connecting to graph api")
|
||||
}
|
||||
@ -152,8 +156,8 @@ func buildCollections(
|
||||
tenant, user string,
|
||||
dest control.RestoreDestination,
|
||||
colls []collection,
|
||||
) ([]data.Collection, error) {
|
||||
collections := make([]data.Collection, 0, len(colls))
|
||||
) ([]data.RestoreCollection, error) {
|
||||
collections := make([]data.RestoreCollection, 0, len(colls))
|
||||
|
||||
for _, c := range colls {
|
||||
pth, err := toDataLayerPath(
|
||||
@ -175,7 +179,7 @@ func buildCollections(
|
||||
mc.Data[i] = c.items[i].data
|
||||
}
|
||||
|
||||
collections = append(collections, mc)
|
||||
collections = append(collections, data.NotFoundRestoreCollection{Collection: mc})
|
||||
}
|
||||
|
||||
return collections, nil
|
||||
|
||||
@ -23,6 +23,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
@ -178,7 +179,10 @@ func getGC(ctx context.Context) (*connector.GraphConnector, account.M365Config,
|
||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details"))
|
||||
}
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||
// TODO: log/print recoverable errors
|
||||
errs := fault.New(false)
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
||||
if err != nil {
|
||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API"))
|
||||
}
|
||||
|
||||
@ -17,6 +17,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
@ -260,7 +261,10 @@ func getGC(ctx context.Context) (*connector.GraphConnector, error) {
|
||||
}
|
||||
|
||||
// build a graph connector
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||
// TODO: log/print recoverable errors
|
||||
errs := fault.New(false)
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users, errs)
|
||||
if err != nil {
|
||||
return nil, Only(ctx, errors.Wrap(err, "connecting to graph api"))
|
||||
}
|
||||
|
||||
10
src/go.mod
10
src/go.mod
@ -4,8 +4,8 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005
|
||||
github.com/aws/aws-sdk-go v1.44.192
|
||||
github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e
|
||||
github.com/aws/aws-sdk-go v1.44.197
|
||||
github.com/aws/aws-xray-sdk-go v1.8.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
@ -26,7 +26,7 @@ require (
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/tidwall/pretty v1.2.1
|
||||
github.com/tomlazar/table v0.1.2
|
||||
github.com/vbauerster/mpb/v8 v8.1.4
|
||||
github.com/vbauerster/mpb/v8 v8.1.6
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15
|
||||
golang.org/x/tools v0.5.0
|
||||
@ -97,7 +97,7 @@ require (
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/rs/xid v1.4.0 // indirect
|
||||
github.com/segmentio/backo-go v1.0.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
@ -114,7 +114,7 @@ require (
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.5.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.4.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/text v0.6.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.52.0 // indirect
|
||||
|
||||
19
src/go.sum
19
src/go.sum
@ -52,8 +52,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 h1:eTgICcmcydEWG8J+hgnidf0pzujV3Gd2XqmknykZkzA=
|
||||
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
|
||||
github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e h1:KMRGDB9lh0wC/WYVmQ28MJ07qiHszCSH2PRwkw2YElM=
|
||||
github.com/alcionai/clues v0.0.0-20230202001016-cbda58c9de9e/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
||||
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
||||
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM=
|
||||
github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.197 h1:pkg/NZsov9v/CawQWy+qWVzJMIZRQypCtYjUBXFomF8=
|
||||
github.com/aws/aws-sdk-go v1.44.197/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -342,8 +342,9 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
|
||||
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
|
||||
@ -402,8 +403,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
||||
github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4=
|
||||
github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vbauerster/mpb/v8 v8.1.4 h1:MOcLTIbbAA892wVjRiuFHa1nRlNvifQMDVh12Bq/xIs=
|
||||
github.com/vbauerster/mpb/v8 v8.1.4/go.mod h1:2fRME8lCLU9gwJwghZb1bO9A3Plc8KPeQ/ayGj+Ek4I=
|
||||
github.com/vbauerster/mpb/v8 v8.1.6 h1:EswHDkAsy4OQ7QBAmU1MUPz4vHzl6KlINjlh7vJoxvY=
|
||||
github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8=
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
|
||||
@ -610,8 +611,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
@ -5,6 +5,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// TODO: Remove in favor of clues.Stack()
|
||||
|
||||
// Err provides boiler-plate functions that other types of errors can use
|
||||
// if they wish to be compared with `errors.As()`. This struct ensures that
|
||||
// stack traces are printed when requested (if present) and that Err
|
||||
|
||||
14
src/internal/common/ptr/pointer.go
Normal file
14
src/internal/common/ptr/pointer.go
Normal file
@ -0,0 +1,14 @@
|
||||
package ptr
|
||||
|
||||
// Val helper method for unwrapping strings
|
||||
// Microsoft Graph saves many variables as string pointers.
|
||||
// Function will safely check if the point is nil prior to
|
||||
// dereferencing the pointer. If the pointer is nil,
|
||||
// an empty string is returned.
|
||||
func Val(ptr *string) string {
|
||||
if ptr == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return *ptr
|
||||
}
|
||||
@ -1,5 +1,6 @@
|
||||
package common
|
||||
|
||||
// TODO: can be replaced with slices.Contains()
|
||||
func ContainsString(super []string, sub string) bool {
|
||||
for _, s := range super {
|
||||
if s == sub {
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -85,7 +86,10 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
var ErrNoTimeString = errors.New("no substring contains a known time format")
|
||||
var (
|
||||
ErrNoTimeString = errors.New("no substring contains a known time format")
|
||||
errParsingStringToTime = errors.New("parsing string as time.Time")
|
||||
)
|
||||
|
||||
// Now produces the current time as a string in the standard format.
|
||||
func Now() string {
|
||||
@ -132,7 +136,7 @@ func FormatLegacyTime(t time.Time) string {
|
||||
// the provided string. Always returns a UTC timezone value.
|
||||
func ParseTime(s string) (time.Time, error) {
|
||||
if len(s) == 0 {
|
||||
return time.Time{}, errors.New("cannot interpret an empty string as time.Time")
|
||||
return time.Time{}, clues.Stack(errParsingStringToTime, errors.New("empty string"))
|
||||
}
|
||||
|
||||
for _, form := range formats {
|
||||
@ -142,14 +146,14 @@ func ParseTime(s string) (time.Time, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return time.Time{}, errors.New("unable to parse time string: " + s)
|
||||
return time.Time{}, clues.Stack(errParsingStringToTime, errors.New(s))
|
||||
}
|
||||
|
||||
// ExtractTime greedily retrieves a timestamp substring from the provided string.
|
||||
// returns ErrNoTimeString if no match is found.
|
||||
func ExtractTime(s string) (time.Time, error) {
|
||||
if len(s) == 0 {
|
||||
return time.Time{}, errors.New("cannot extract time.Time from an empty string")
|
||||
return time.Time{}, clues.Stack(errParsingStringToTime, errors.New("empty string"))
|
||||
}
|
||||
|
||||
for _, re := range regexes {
|
||||
@ -159,5 +163,5 @@ func ExtractTime(s string) (time.Time, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return time.Time{}, errors.Wrap(ErrNoTimeString, s)
|
||||
return time.Time{}, clues.Stack(ErrNoTimeString, errors.New(s))
|
||||
}
|
||||
|
||||
@ -16,6 +16,8 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -34,9 +36,9 @@ import (
|
||||
func (gc *GraphConnector) DataCollections(
|
||||
ctx context.Context,
|
||||
sels selectors.Selector,
|
||||
metadata []data.Collection,
|
||||
metadata []data.RestoreCollection,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, map[string]struct{}, error) {
|
||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||
ctx, end := D.Span(ctx, "gc:dataCollections", D.Index("service", sels.Service.String()))
|
||||
defer end()
|
||||
|
||||
@ -51,7 +53,7 @@ func (gc *GraphConnector) DataCollections(
|
||||
}
|
||||
|
||||
if !serviceEnabled {
|
||||
return []data.Collection{}, nil, nil
|
||||
return []data.BackupCollection{}, nil, nil
|
||||
}
|
||||
|
||||
switch sels.Service {
|
||||
@ -90,7 +92,7 @@ func (gc *GraphConnector) DataCollections(
|
||||
ctx,
|
||||
gc.itemClient,
|
||||
sels,
|
||||
gc.credentials.AzureTenantID,
|
||||
gc.credentials,
|
||||
gc.Service,
|
||||
gc,
|
||||
ctrlOpts)
|
||||
@ -182,9 +184,9 @@ func (fm odFolderMatcher) Matches(dir string) bool {
|
||||
func (gc *GraphConnector) OneDriveDataCollections(
|
||||
ctx context.Context,
|
||||
selector selectors.Selector,
|
||||
metadata []data.Collection,
|
||||
metadata []data.RestoreCollection,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, map[string]struct{}, error) {
|
||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||
odb, err := selector.ToOneDriveBackup()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "oneDriveDataCollection: parsing selector")
|
||||
@ -192,7 +194,7 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
||||
|
||||
var (
|
||||
user = selector.DiscreteOwner
|
||||
collections = []data.Collection{}
|
||||
collections = []data.BackupCollection{}
|
||||
allExcludes = map[string]struct{}{}
|
||||
errs error
|
||||
)
|
||||
@ -226,3 +228,46 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
||||
|
||||
return collections, allExcludes, errs
|
||||
}
|
||||
|
||||
// RestoreDataCollections restores data from the specified collections
|
||||
// into M365 using the GraphAPI.
|
||||
// SideEffect: gc.status is updated at the completion of operation
|
||||
func (gc *GraphConnector) RestoreDataCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
acct account.Account,
|
||||
selector selectors.Selector,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
) (*details.Details, error) {
|
||||
ctx, end := D.Span(ctx, "connector:restore")
|
||||
defer end()
|
||||
|
||||
var (
|
||||
status *support.ConnectorOperationStatus
|
||||
err error
|
||||
deets = &details.Builder{}
|
||||
)
|
||||
|
||||
creds, err := acct.M365Config()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "malformed azure credentials")
|
||||
}
|
||||
|
||||
switch selector.Service {
|
||||
case selectors.ServiceExchange:
|
||||
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets)
|
||||
case selectors.ServiceOneDrive:
|
||||
status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets)
|
||||
case selectors.ServiceSharePoint:
|
||||
status, err = sharepoint.RestoreCollections(ctx, backupVersion, creds, gc.Service, dest, dcs, deets)
|
||||
default:
|
||||
err = errors.Errorf("restore data from service %s not supported", selector.Service.String())
|
||||
}
|
||||
|
||||
gc.incrementAwaitingMessages()
|
||||
gc.UpdateStatus(status)
|
||||
|
||||
return deets.Details(), err
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
||||
ctx,
|
||||
graph.HTTPClient(graph.NoTimeout()),
|
||||
test.getSelector(),
|
||||
connector.credentials.AzureTenantID,
|
||||
connector.credentials,
|
||||
connector.Service,
|
||||
connector,
|
||||
control.Options{})
|
||||
|
||||
@ -22,7 +22,7 @@ func TestBetaUnitSuite(t *testing.T) {
|
||||
|
||||
func (suite *BetaUnitSuite) TestBetaService_Adapter() {
|
||||
t := suite.T()
|
||||
a := tester.NewM365Account(t)
|
||||
a := tester.NewMockM365Account(t)
|
||||
m365, err := a.M365Config()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@ -291,6 +291,8 @@ func (c Contacts) Serialize(
|
||||
return nil, fmt.Errorf("expected Contactable, got %T", item)
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "item_id", *contact.GetId())
|
||||
|
||||
var (
|
||||
err error
|
||||
writer = kioser.NewJsonSerializationWriter()
|
||||
@ -299,7 +301,7 @@ func (c Contacts) Serialize(
|
||||
defer writer.Close()
|
||||
|
||||
if err = writer.WriteObjectValue("", contact); err != nil {
|
||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
bs, err := writer.GetSerializedContent()
|
||||
|
||||
@ -340,6 +340,8 @@ func (c Events) Serialize(
|
||||
return nil, fmt.Errorf("expected Eventable, got %T", item)
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "item_id", *event.GetId())
|
||||
|
||||
var (
|
||||
err error
|
||||
writer = kioser.NewJsonSerializationWriter()
|
||||
@ -348,7 +350,7 @@ func (c Events) Serialize(
|
||||
defer writer.Close()
|
||||
|
||||
if err = writer.WriteObjectValue("", event); err != nil {
|
||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
bs, err := writer.GetSerializedContent()
|
||||
|
||||
@ -321,6 +321,8 @@ func (c Mail) Serialize(
|
||||
return nil, fmt.Errorf("expected Messageable, got %T", item)
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "item_id", *msg.GetId())
|
||||
|
||||
var (
|
||||
err error
|
||||
writer = kioser.NewJsonSerializationWriter()
|
||||
@ -329,7 +331,7 @@ func (c Mail) Serialize(
|
||||
defer writer.Close()
|
||||
|
||||
if err = writer.WriteObjectValue("", msg); err != nil {
|
||||
return nil, support.SetNonRecoverableError(errors.Wrap(err, itemID))
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
bs, err := writer.GetSerializedContent()
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/connector/uploadsession"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -63,18 +64,16 @@ func uploadAttachment(
|
||||
|
||||
attachment, err = support.ToItemAttachment(attachment)
|
||||
if err != nil {
|
||||
name := ""
|
||||
if prev.GetName() != nil {
|
||||
name = *prev.GetName()
|
||||
}
|
||||
name := ptr.Val(prev.GetName())
|
||||
msg := "item attachment restore not supported for this type. skipping upload."
|
||||
|
||||
// TODO: Update to support PII protection
|
||||
logger.Ctx(ctx).Infow("item attachment uploads are not supported ",
|
||||
// TODO: (rkeepers) Update to support PII protection
|
||||
logger.Ctx(ctx).Infow(msg,
|
||||
"err", err,
|
||||
"attachment_name", name,
|
||||
"attachment_type", attachmentType,
|
||||
"internal_item_type", getItemAttachmentItemType(prev),
|
||||
"attachment_id", *prev.GetId(),
|
||||
"attachment_id", ptr.Val(prev.GetId()),
|
||||
)
|
||||
|
||||
return nil
|
||||
@ -128,9 +127,6 @@ func getItemAttachmentItemType(query models.Attachmentable) string {
|
||||
}
|
||||
|
||||
item := attachment.GetItem()
|
||||
if item.GetOdataType() == nil {
|
||||
return empty
|
||||
}
|
||||
|
||||
return *item.GetOdataType()
|
||||
return ptr.Val(item.GetOdataType())
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ type DeltaPath struct {
|
||||
// and path lookup maps.
|
||||
func parseMetadataCollections(
|
||||
ctx context.Context,
|
||||
colls []data.Collection,
|
||||
colls []data.RestoreCollection,
|
||||
) (CatDeltaPaths, error) {
|
||||
// cdp stores metadata
|
||||
cdp := CatDeltaPaths{
|
||||
@ -163,11 +163,11 @@ func parseMetadataCollections(
|
||||
func DataCollections(
|
||||
ctx context.Context,
|
||||
selector selectors.Selector,
|
||||
metadata []data.Collection,
|
||||
metadata []data.RestoreCollection,
|
||||
acct account.M365Config,
|
||||
su support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, map[string]struct{}, error) {
|
||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||
eb, err := selector.ToExchangeBackup()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "exchangeDataCollection: parsing selector")
|
||||
@ -175,7 +175,7 @@ func DataCollections(
|
||||
|
||||
var (
|
||||
user = selector.DiscreteOwner
|
||||
collections = []data.Collection{}
|
||||
collections = []data.BackupCollection{}
|
||||
errs error
|
||||
)
|
||||
|
||||
@ -231,10 +231,10 @@ func createCollections(
|
||||
dps DeltaPaths,
|
||||
ctrlOpts control.Options,
|
||||
su support.StatusUpdater,
|
||||
) ([]data.Collection, error) {
|
||||
) ([]data.BackupCollection, error) {
|
||||
var (
|
||||
errs *multierror.Error
|
||||
allCollections = make([]data.Collection, 0)
|
||||
allCollections = make([]data.BackupCollection, 0)
|
||||
ac = api.Client{Credentials: creds}
|
||||
category = scope.Category().PathType()
|
||||
)
|
||||
@ -245,7 +245,7 @@ func createCollections(
|
||||
}
|
||||
|
||||
// Create collection of ExchangeDataCollection
|
||||
collections := make(map[string]data.Collection)
|
||||
collections := make(map[string]data.BackupCollection)
|
||||
|
||||
qp := graph.QueryParams{
|
||||
Category: category,
|
||||
|
||||
@ -174,7 +174,9 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
cdps, err := parseMetadataCollections(ctx, []data.Collection{coll})
|
||||
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{Collection: coll},
|
||||
})
|
||||
test.expectError(t, err)
|
||||
|
||||
emails := cdps[path.EmailCategory]
|
||||
@ -335,7 +337,7 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
||||
require.NoError(t, err)
|
||||
assert.Less(t, 1, len(collections), "retrieved metadata and data collections")
|
||||
|
||||
var metadata data.Collection
|
||||
var metadata data.BackupCollection
|
||||
|
||||
for _, coll := range collections {
|
||||
if coll.FullPath().Service() == path.ExchangeMetadataService {
|
||||
@ -345,7 +347,9 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
||||
|
||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||
|
||||
cdps, err := parseMetadataCollections(ctx, []data.Collection{metadata})
|
||||
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{Collection: metadata},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
dps := cdps[test.scope.Category().PathType()]
|
||||
|
||||
@ -24,10 +24,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.Collection = &Collection{}
|
||||
_ data.Stream = &Stream{}
|
||||
_ data.StreamInfo = &Stream{}
|
||||
_ data.StreamModTime = &Stream{}
|
||||
_ data.BackupCollection = &Collection{}
|
||||
_ data.Stream = &Stream{}
|
||||
_ data.StreamInfo = &Stream{}
|
||||
_ data.StreamModTime = &Stream{}
|
||||
)
|
||||
|
||||
const (
|
||||
@ -107,7 +107,7 @@ func NewCollection(
|
||||
added: make(map[string]struct{}, 0),
|
||||
removed: make(map[string]struct{}, 0),
|
||||
prevPath: prev,
|
||||
state: stateOf(prev, curr),
|
||||
state: data.StateOf(prev, curr),
|
||||
statusUpdater: statusUpdater,
|
||||
user: user,
|
||||
items: items,
|
||||
@ -116,22 +116,6 @@ func NewCollection(
|
||||
return collection
|
||||
}
|
||||
|
||||
func stateOf(prev, curr path.Path) data.CollectionState {
|
||||
if curr == nil || len(curr.String()) == 0 {
|
||||
return data.DeletedState
|
||||
}
|
||||
|
||||
if prev == nil || len(prev.String()) == 0 {
|
||||
return data.NewState
|
||||
}
|
||||
|
||||
if curr.Folder() != prev.Folder() {
|
||||
return data.MovedState
|
||||
}
|
||||
|
||||
return data.NotMovedState
|
||||
}
|
||||
|
||||
// Items utility function to asynchronously execute process to fill data channel with
|
||||
// M365 exchange objects and returns the data channel
|
||||
func (col *Collection) Items() <-chan data.Stream {
|
||||
|
||||
@ -12,10 +12,8 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
@ -118,59 +116,6 @@ func (suite *ExchangeDataCollectionSuite) TestExchangeDataCollection_NewExchange
|
||||
suite.Equal(fullPath, edc.FullPath())
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestNewCollection_state() {
|
||||
fooP, err := path.Builder{}.
|
||||
Append("foo").
|
||||
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
||||
require.NoError(suite.T(), err)
|
||||
barP, err := path.Builder{}.
|
||||
Append("bar").
|
||||
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
prev path.Path
|
||||
curr path.Path
|
||||
expect data.CollectionState
|
||||
}{
|
||||
{
|
||||
name: "new",
|
||||
curr: fooP,
|
||||
expect: data.NewState,
|
||||
},
|
||||
{
|
||||
name: "not moved",
|
||||
prev: fooP,
|
||||
curr: fooP,
|
||||
expect: data.NotMovedState,
|
||||
},
|
||||
{
|
||||
name: "moved",
|
||||
prev: fooP,
|
||||
curr: barP,
|
||||
expect: data.MovedState,
|
||||
},
|
||||
{
|
||||
name: "deleted",
|
||||
prev: fooP,
|
||||
expect: data.DeletedState,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
c := NewCollection(
|
||||
"u",
|
||||
test.curr, test.prev,
|
||||
0,
|
||||
&mockItemer{}, nil,
|
||||
control.Options{},
|
||||
false)
|
||||
assert.Equal(t, test.expect, c.State())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ExchangeDataCollectionSuite) TestGetItemWithRetries() {
|
||||
table := []struct {
|
||||
name string
|
||||
|
||||
@ -130,12 +130,13 @@ type containerDeleter interface {
|
||||
|
||||
// TestRestoreExchangeObject verifies path.Category usage for restored objects
|
||||
func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
||||
a := tester.NewM365Account(suite.T())
|
||||
t := suite.T()
|
||||
a := tester.NewM365Account(t)
|
||||
m365, err := a.M365Config()
|
||||
require.NoError(suite.T(), err)
|
||||
require.NoError(t, err)
|
||||
|
||||
service, err := createService(m365)
|
||||
require.NoError(suite.T(), err)
|
||||
require.NoError(t, err)
|
||||
|
||||
deleters := map[path.CategoryType]containerDeleter{
|
||||
path.EmailCategory: suite.ac.Mail(),
|
||||
@ -187,6 +188,63 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
||||
return *folder.GetId()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mail: Item Attachment_Mail",
|
||||
bytes: mockconnector.GetMockMessageWithItemAttachmentMail("Mail Item Attachment"),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := "TestRestoreMailItemAttachment: " + common.FormatSimpleDateTime(now)
|
||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||
require.NoError(t, err)
|
||||
|
||||
return *folder.GetId()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mail: Hydrated Item Attachment Mail",
|
||||
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentMail(t,
|
||||
mockconnector.GetMockMessageBytes("Basic Item Attachment"),
|
||||
"Mail Item Attachment",
|
||||
),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := "TestRestoreMailBasicItemAttachment: " + common.FormatSimpleDateTime(now)
|
||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||
require.NoError(t, err)
|
||||
|
||||
return *folder.GetId()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mail: Hydrated Item Attachment Mail One Attach",
|
||||
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentMail(t,
|
||||
mockconnector.GetMockMessageWithDirectAttachment("Item Attachment Included"),
|
||||
"Mail Item Attachment",
|
||||
),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := "ItemMailAttachmentwAttachment " + common.FormatSimpleDateTime(now)
|
||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||
require.NoError(t, err)
|
||||
|
||||
return *folder.GetId()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mail: Item Attachment_Contact",
|
||||
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentContact(t,
|
||||
mockconnector.GetMockContactBytes("Victor"),
|
||||
"Contact Item Attachment",
|
||||
),
|
||||
category: path.EmailCategory,
|
||||
destination: func(t *testing.T, ctx context.Context) string {
|
||||
folderName := "ItemMailAttachment_Contact " + common.FormatSimpleDateTime(now)
|
||||
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
|
||||
require.NoError(t, err)
|
||||
|
||||
return *folder.GetId()
|
||||
},
|
||||
},
|
||||
{ // Restore will upload the Message without uploading the attachment
|
||||
name: "Test Mail: Item Attachment_NestedEvent",
|
||||
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
|
||||
@ -291,6 +349,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
|
||||
)
|
||||
assert.NoError(t, err, support.ConnectorStackErrorTrace(err))
|
||||
assert.NotNil(t, info, "item info was not populated")
|
||||
assert.NotNil(t, deleters)
|
||||
assert.NoError(t, deleters[test.category].DeleteContainer(ctx, userID, destination))
|
||||
})
|
||||
}
|
||||
|
||||
@ -25,14 +25,14 @@ type addedAndRemovedItemIDsGetter interface {
|
||||
|
||||
// filterContainersAndFillCollections is a utility function
|
||||
// that places the M365 object ids belonging to specific directories
|
||||
// into a Collection. Messages outside of those directories are omitted.
|
||||
// into a BackupCollection. Messages outside of those directories are omitted.
|
||||
// @param collection is filled with during this function.
|
||||
// Supports all exchange applications: Contacts, Events, and Mail
|
||||
func filterContainersAndFillCollections(
|
||||
ctx context.Context,
|
||||
qp graph.QueryParams,
|
||||
getter addedAndRemovedItemIDsGetter,
|
||||
collections map[string]data.Collection,
|
||||
collections map[string]data.BackupCollection,
|
||||
statusUpdater support.StatusUpdater,
|
||||
resolver graph.ContainerResolver,
|
||||
scope selectors.ExchangeScope,
|
||||
|
||||
@ -280,7 +280,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
collections := map[string]data.Collection{}
|
||||
collections := map[string]data.BackupCollection{}
|
||||
|
||||
err := filterContainersAndFillCollections(
|
||||
ctx,
|
||||
@ -433,7 +433,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_repea
|
||||
resolver = newMockResolver(container1)
|
||||
)
|
||||
|
||||
collections := map[string]data.Collection{}
|
||||
collections := map[string]data.BackupCollection{}
|
||||
|
||||
err := filterContainersAndFillCollections(
|
||||
ctx,
|
||||
@ -785,7 +785,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
collections := map[string]data.Collection{}
|
||||
collections := map[string]data.BackupCollection{}
|
||||
|
||||
err := filterContainersAndFillCollections(
|
||||
ctx,
|
||||
|
||||
@ -283,6 +283,20 @@ func SendMailToBackStore(
|
||||
|
||||
for _, attachment := range attached {
|
||||
if err := uploadAttachment(ctx, uploader, attachment); err != nil {
|
||||
if attachment.GetOdataType() != nil &&
|
||||
*attachment.GetOdataType() == "#microsoft.graph.itemAttachment" {
|
||||
var name string
|
||||
if attachment.GetName() != nil {
|
||||
name = *attachment.GetName()
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
"item attachment upload not successful. content not accepted by M365 server",
|
||||
"Attachment Name", name)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
errs = support.WrapAndAppend(
|
||||
fmt.Sprintf("uploading attachment for message %s: %s",
|
||||
id, support.ConnectorStackErrorTrace(err)),
|
||||
@ -297,7 +311,7 @@ func SendMailToBackStore(
|
||||
return errs
|
||||
}
|
||||
|
||||
// RestoreExchangeDataCollections restores M365 objects in data.Collection to MSFT
|
||||
// RestoreExchangeDataCollections restores M365 objects in data.RestoreCollection to MSFT
|
||||
// store through GraphAPI.
|
||||
// @param dest: container destination to M365
|
||||
func RestoreExchangeDataCollections(
|
||||
@ -305,7 +319,7 @@ func RestoreExchangeDataCollections(
|
||||
creds account.M365Config,
|
||||
gs graph.Servicer,
|
||||
dest control.RestoreDestination,
|
||||
dcs []data.Collection,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
) (*support.ConnectorOperationStatus, error) {
|
||||
var (
|
||||
@ -364,7 +378,7 @@ func RestoreExchangeDataCollections(
|
||||
func restoreCollection(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
dc data.Collection,
|
||||
dc data.RestoreCollection,
|
||||
folderID string,
|
||||
policy control.CollisionPolicy,
|
||||
deets *details.Builder,
|
||||
|
||||
@ -18,6 +18,12 @@ type BetaClientSuite struct {
|
||||
}
|
||||
|
||||
func TestBetaClientSuite(t *testing.T) {
|
||||
tester.RunOnAny(
|
||||
t,
|
||||
tester.CorsoCITests,
|
||||
tester.CorsoGraphConnectorTests,
|
||||
)
|
||||
|
||||
suite.Run(t, new(BetaClientSuite))
|
||||
}
|
||||
|
||||
|
||||
@ -21,6 +21,7 @@ const (
|
||||
func (i HorizontalSectionLayoutType) String() string {
|
||||
return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i]
|
||||
}
|
||||
|
||||
func ParseHorizontalSectionLayoutType(v string) (interface{}, error) {
|
||||
result := NONE_HORIZONTALSECTIONLAYOUTTYPE
|
||||
switch v {
|
||||
@ -45,6 +46,7 @@ func ParseHorizontalSectionLayoutType(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializeHorizontalSectionLayoutType(values []HorizontalSectionLayoutType) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -17,6 +17,7 @@ const (
|
||||
func (i PageLayoutType) String() string {
|
||||
return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i]
|
||||
}
|
||||
|
||||
func ParsePageLayoutType(v string) (interface{}, error) {
|
||||
result := MICROSOFTRESERVED_PAGELAYOUTTYPE
|
||||
switch v {
|
||||
@ -33,6 +34,7 @@ func ParsePageLayoutType(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializePageLayoutType(values []PageLayoutType) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -17,6 +17,7 @@ const (
|
||||
func (i PagePromotionType) String() string {
|
||||
return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i]
|
||||
}
|
||||
|
||||
func ParsePagePromotionType(v string) (interface{}, error) {
|
||||
result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE
|
||||
switch v {
|
||||
@ -33,6 +34,7 @@ func ParsePagePromotionType(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializePagePromotionType(values []PagePromotionType) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -18,6 +18,7 @@ const (
|
||||
func (i SectionEmphasisType) String() string {
|
||||
return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i]
|
||||
}
|
||||
|
||||
func ParseSectionEmphasisType(v string) (interface{}, error) {
|
||||
result := NONE_SECTIONEMPHASISTYPE
|
||||
switch v {
|
||||
@ -36,6 +37,7 @@ func ParseSectionEmphasisType(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializeSectionEmphasisType(values []SectionEmphasisType) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -16,6 +16,7 @@ const (
|
||||
func (i SiteAccessType) String() string {
|
||||
return []string{"block", "full", "limited"}[i]
|
||||
}
|
||||
|
||||
func ParseSiteAccessType(v string) (interface{}, error) {
|
||||
result := BLOCK_SITEACCESSTYPE
|
||||
switch v {
|
||||
@ -30,6 +31,7 @@ func ParseSiteAccessType(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializeSiteAccessType(values []SiteAccessType) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -25,6 +25,7 @@ const (
|
||||
func (i SiteSecurityLevel) String() string {
|
||||
return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i]
|
||||
}
|
||||
|
||||
func ParseSiteSecurityLevel(v string) (interface{}, error) {
|
||||
result := USERDEFINED_SITESECURITYLEVEL
|
||||
switch v {
|
||||
@ -45,6 +46,7 @@ func ParseSiteSecurityLevel(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializeSiteSecurityLevel(values []SiteSecurityLevel) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -18,6 +18,7 @@ const (
|
||||
func (i TitleAreaLayoutType) String() string {
|
||||
return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i]
|
||||
}
|
||||
|
||||
func ParseTitleAreaLayoutType(v string) (interface{}, error) {
|
||||
result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE
|
||||
switch v {
|
||||
@ -36,6 +37,7 @@ func ParseTitleAreaLayoutType(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializeTitleAreaLayoutType(values []TitleAreaLayoutType) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -16,6 +16,7 @@ const (
|
||||
func (i TitleAreaTextAlignmentType) String() string {
|
||||
return []string{"left", "center", "unknownFutureValue"}[i]
|
||||
}
|
||||
|
||||
func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) {
|
||||
result := LEFT_TITLEAREATEXTALIGNMENTTYPE
|
||||
switch v {
|
||||
@ -30,6 +31,7 @@ func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) {
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func SerializeTitleAreaTextAlignmentType(values []TitleAreaTextAlignmentType) []string {
|
||||
result := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
|
||||
@ -2,6 +2,7 @@ package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
@ -176,3 +177,50 @@ func hasErrorCode(err error, codes ...string) bool {
|
||||
|
||||
return slices.Contains(codes, *oDataError.GetError().GetCode())
|
||||
}
|
||||
|
||||
// ErrData is a helper function that extracts ODataError metadata from
|
||||
// the error. If the error is not an ODataError type, returns an empty
|
||||
// slice. The returned value is guaranteed to be an even-length pairing
|
||||
// of key, value tuples.
|
||||
func ErrData(e error) []any {
|
||||
result := make([]any, 0)
|
||||
|
||||
if e == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
odErr, ok := e.(odataerrors.ODataErrorable)
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
|
||||
// Get MainError
|
||||
mainErr := odErr.GetError()
|
||||
|
||||
result = appendIf(result, "odataerror_code", mainErr.GetCode())
|
||||
result = appendIf(result, "odataerror_message", mainErr.GetMessage())
|
||||
result = appendIf(result, "odataerror_target", mainErr.GetTarget())
|
||||
|
||||
for i, d := range mainErr.GetDetails() {
|
||||
pfx := fmt.Sprintf("odataerror_details_%d_", i)
|
||||
result = appendIf(result, pfx+"code", d.GetCode())
|
||||
result = appendIf(result, pfx+"message", d.GetMessage())
|
||||
result = appendIf(result, pfx+"target", d.GetTarget())
|
||||
}
|
||||
|
||||
inner := mainErr.GetInnererror()
|
||||
if inner != nil {
|
||||
result = appendIf(result, "odataerror_inner_cli_req_id", inner.GetClientRequestId())
|
||||
result = appendIf(result, "odataerror_inner_req_id", inner.GetRequestId())
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func appendIf(a []any, k string, v *string) []any {
|
||||
if v == nil {
|
||||
return a
|
||||
}
|
||||
|
||||
return append(a, k, *v)
|
||||
}
|
||||
|
||||
@ -14,8 +14,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.Collection = &MetadataCollection{}
|
||||
_ data.Stream = &MetadataItem{}
|
||||
_ data.BackupCollection = &MetadataCollection{}
|
||||
_ data.Stream = &MetadataItem{}
|
||||
)
|
||||
|
||||
// MetadataCollection in a simple collection that assumes all items to be
|
||||
@ -67,7 +67,7 @@ func MakeMetadataCollection(
|
||||
cat path.CategoryType,
|
||||
metadata []MetadataCollectionEntry,
|
||||
statusUpdater support.StatusUpdater,
|
||||
) (data.Collection, error) {
|
||||
) (data.BackupCollection, error) {
|
||||
if len(metadata) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
ka "github.com/microsoft/kiota-authentication-azure-go"
|
||||
khttp "github.com/microsoft/kiota-http-go"
|
||||
@ -16,6 +15,7 @@ import (
|
||||
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
|
||||
@ -4,11 +4,13 @@ package connector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime/trace"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
@ -17,18 +19,13 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery"
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -71,10 +68,11 @@ func NewGraphConnector(
|
||||
itemClient *http.Client,
|
||||
acct account.Account,
|
||||
r resource,
|
||||
errs *fault.Errors,
|
||||
) (*GraphConnector, error) {
|
||||
m365, err := acct.M365Config()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieving m365 account configuration")
|
||||
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
|
||||
}
|
||||
|
||||
gc := GraphConnector{
|
||||
@ -87,12 +85,12 @@ func NewGraphConnector(
|
||||
|
||||
gc.Service, err = gc.createService()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating service connection")
|
||||
return nil, clues.Wrap(err, "creating service connection").WithClues(ctx)
|
||||
}
|
||||
|
||||
gc.Owners, err = api.NewClient(m365)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating api client")
|
||||
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): When selectors only encapsulate a single resource owner that
|
||||
@ -106,7 +104,7 @@ func NewGraphConnector(
|
||||
}
|
||||
|
||||
if r == AllResources || r == Sites {
|
||||
if err = gc.setTenantSites(ctx); err != nil {
|
||||
if err = gc.setTenantSites(ctx, errs); err != nil {
|
||||
return nil, errors.Wrap(err, "retrieveing tenant site list")
|
||||
}
|
||||
}
|
||||
@ -162,7 +160,7 @@ func (gc *GraphConnector) GetUsersIds() []string {
|
||||
// setTenantSites queries the M365 to identify the sites in the
|
||||
// workspace. The sites field is updated during this method
|
||||
// iff the returned error is nil.
|
||||
func (gc *GraphConnector) setTenantSites(ctx context.Context) error {
|
||||
func (gc *GraphConnector) setTenantSites(ctx context.Context, errs *fault.Errors) error {
|
||||
gc.Sites = map[string]string{}
|
||||
|
||||
ctx, end := D.Span(ctx, "gc:setTenantSites")
|
||||
@ -175,7 +173,7 @@ func (gc *GraphConnector) setTenantSites(ctx context.Context) error {
|
||||
sharepoint.GetAllSitesForTenant,
|
||||
models.CreateSiteCollectionResponseFromDiscriminatorValue,
|
||||
identifySite,
|
||||
)
|
||||
errs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -194,22 +192,23 @@ const personalSitePath = "sharepoint.com/personal/"
|
||||
func identifySite(item any) (string, string, error) {
|
||||
m, ok := item.(models.Siteable)
|
||||
if !ok {
|
||||
return "", "", errors.New("iteration retrieved non-Site item")
|
||||
return "", "", clues.New("iteration retrieved non-Site item").With("item_type", fmt.Sprintf("%T", item))
|
||||
}
|
||||
|
||||
if m.GetName() == nil {
|
||||
// the built-in site at "https://{tenant-domain}/search" never has a name.
|
||||
if m.GetWebUrl() != nil && strings.HasSuffix(*m.GetWebUrl(), "/search") {
|
||||
return "", "", errKnownSkippableCase
|
||||
// TODO: pii siteID, on this and all following cases
|
||||
return "", "", clues.Stack(errKnownSkippableCase).With("site_id", *m.GetId())
|
||||
}
|
||||
|
||||
return "", "", errors.Errorf("no name for Site: %s", *m.GetId())
|
||||
return "", "", clues.New("site has no name").With("site_id", *m.GetId())
|
||||
}
|
||||
|
||||
// personal (ie: oneDrive) sites have to be filtered out server-side.
|
||||
url := m.GetWebUrl()
|
||||
if url != nil && strings.Contains(*url, personalSitePath) {
|
||||
return "", "", errKnownSkippableCase
|
||||
return "", "", clues.Stack(errKnownSkippableCase).With("site_id", *m.GetId())
|
||||
}
|
||||
|
||||
return *m.GetWebUrl(), *m.GetId(), nil
|
||||
@ -230,9 +229,13 @@ func (gc *GraphConnector) GetSiteIDs() []string {
|
||||
// each element in the url must fully match. Ex: the webURL value "foo" will match "www.ex.com/foo",
|
||||
// but not match "www.ex.com/foobar".
|
||||
// The returned IDs are reduced to a set of unique values.
|
||||
func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls []string) ([]string, error) {
|
||||
func (gc *GraphConnector) UnionSiteIDsAndWebURLs(
|
||||
ctx context.Context,
|
||||
ids, urls []string,
|
||||
errs *fault.Errors,
|
||||
) ([]string, error) {
|
||||
if len(gc.Sites) == 0 {
|
||||
if err := gc.setTenantSites(ctx); err != nil {
|
||||
if err := gc.setTenantSites(ctx, errs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -261,49 +264,6 @@ func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls
|
||||
return idsl, nil
|
||||
}
|
||||
|
||||
// RestoreDataCollections restores data from the specified collections
|
||||
// into M365 using the GraphAPI.
|
||||
// SideEffect: gc.status is updated at the completion of operation
|
||||
func (gc *GraphConnector) RestoreDataCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
acct account.Account,
|
||||
selector selectors.Selector,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
dcs []data.Collection,
|
||||
) (*details.Details, error) {
|
||||
ctx, end := D.Span(ctx, "connector:restore")
|
||||
defer end()
|
||||
|
||||
var (
|
||||
status *support.ConnectorOperationStatus
|
||||
err error
|
||||
deets = &details.Builder{}
|
||||
)
|
||||
|
||||
creds, err := acct.M365Config()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "malformed azure credentials")
|
||||
}
|
||||
|
||||
switch selector.Service {
|
||||
case selectors.ServiceExchange:
|
||||
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets)
|
||||
case selectors.ServiceOneDrive:
|
||||
status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets)
|
||||
case selectors.ServiceSharePoint:
|
||||
status, err = sharepoint.RestoreCollections(ctx, backupVersion, gc.Service, dest, dcs, deets)
|
||||
default:
|
||||
err = errors.Errorf("restore data from service %s not supported", selector.Service.String())
|
||||
}
|
||||
|
||||
gc.incrementAwaitingMessages()
|
||||
gc.UpdateStatus(status)
|
||||
|
||||
return deets.Details(), err
|
||||
}
|
||||
|
||||
// AwaitStatus waits for all gc tasks to complete and then returns status
|
||||
func (gc *GraphConnector) AwaitStatus() *support.ConnectorOperationStatus {
|
||||
defer func() {
|
||||
@ -354,35 +314,35 @@ func getResources(
|
||||
query func(context.Context, graph.Servicer) (serialization.Parsable, error),
|
||||
parser func(parseNode serialization.ParseNode) (serialization.Parsable, error),
|
||||
identify func(any) (string, string, error),
|
||||
errs *fault.Errors,
|
||||
) (map[string]string, error) {
|
||||
resources := map[string]string{}
|
||||
|
||||
response, err := query(ctx, gs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(
|
||||
err,
|
||||
"retrieving resources for tenant %s: %s",
|
||||
tenantID,
|
||||
support.ConnectorStackErrorTrace(err),
|
||||
)
|
||||
return nil, clues.Wrap(err, "retrieving tenant's resources").
|
||||
WithClues(ctx).
|
||||
WithAll(graph.ErrData(err)...)
|
||||
}
|
||||
|
||||
iter, err := msgraphgocore.NewPageIterator(response, gs.Adapter(), parser)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, support.ConnectorStackErrorTrace(err))
|
||||
return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...)
|
||||
}
|
||||
|
||||
var iterErrs error
|
||||
|
||||
callbackFunc := func(item any) bool {
|
||||
if errs.Failed() {
|
||||
return false
|
||||
}
|
||||
|
||||
k, v, err := identify(item)
|
||||
if err != nil {
|
||||
if errors.Is(err, errKnownSkippableCase) {
|
||||
return true
|
||||
if !errors.Is(err, errKnownSkippableCase) {
|
||||
errs.Add(clues.Stack(err).
|
||||
WithClues(ctx).
|
||||
With("query_url", gs.Adapter().GetBaseUrl()))
|
||||
}
|
||||
|
||||
iterErrs = support.WrapAndAppend(gs.Adapter().GetBaseUrl(), err, iterErrs)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -392,20 +352,8 @@ func getResources(
|
||||
}
|
||||
|
||||
if err := iter.Iterate(ctx, callbackFunc); err != nil {
|
||||
return nil, errors.Wrap(err, support.ConnectorStackErrorTrace(err))
|
||||
return nil, clues.Stack(err).WithClues(ctx).WithAll(graph.ErrData(err)...)
|
||||
}
|
||||
|
||||
return resources, iterErrs
|
||||
}
|
||||
|
||||
// IsRecoverableError returns true iff error is a RecoverableGCEerror
|
||||
func IsRecoverableError(e error) bool {
|
||||
var recoverable support.RecoverableGCError
|
||||
return errors.As(e, &recoverable)
|
||||
}
|
||||
|
||||
// IsNonRecoverableError returns true iff error is a NonRecoverableGCEerror
|
||||
func IsNonRecoverableError(e error) bool {
|
||||
var nonRecoverable support.NonRecoverableGCError
|
||||
return errors.As(e, &nonRecoverable)
|
||||
return resources, errs.Err()
|
||||
}
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/credentials"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
@ -66,9 +67,9 @@ func (suite *DisconnectedGraphConnectorSuite) TestBadConnection() {
|
||||
|
||||
for _, test := range table {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
gc, err := NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), test.acct(t), Users)
|
||||
gc, err := NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), test.acct(t), Users, fault.New(true))
|
||||
assert.Nil(t, gc, test.name+" failed")
|
||||
assert.NotNil(t, err, test.name+"failed")
|
||||
assert.NotNil(t, err, test.name+" failed")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -116,58 +117,6 @@ func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_Status() {
|
||||
suite.Equal(2, gc.Status().FolderCount)
|
||||
}
|
||||
|
||||
func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_ErrorChecking() {
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
returnRecoverable assert.BoolAssertionFunc
|
||||
returnNonRecoverable assert.BoolAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "Neither Option",
|
||||
err: errors.New("regular error"),
|
||||
returnRecoverable: assert.False,
|
||||
returnNonRecoverable: assert.False,
|
||||
},
|
||||
{
|
||||
name: "Validate Recoverable",
|
||||
err: support.SetRecoverableError(errors.New("Recoverable")),
|
||||
returnRecoverable: assert.True,
|
||||
returnNonRecoverable: assert.False,
|
||||
},
|
||||
{
|
||||
name: "Validate NonRecoverable",
|
||||
err: support.SetNonRecoverableError(errors.New("Non-recoverable")),
|
||||
returnRecoverable: assert.False,
|
||||
returnNonRecoverable: assert.True,
|
||||
},
|
||||
{
|
||||
name: "Wrapped Recoverable",
|
||||
err: support.WrapAndAppend(
|
||||
"Wrapped Recoverable",
|
||||
support.SetRecoverableError(errors.New("Recoverable")),
|
||||
nil),
|
||||
returnRecoverable: assert.True,
|
||||
returnNonRecoverable: assert.False,
|
||||
},
|
||||
{
|
||||
name: "On Nil",
|
||||
err: nil,
|
||||
returnRecoverable: assert.False,
|
||||
returnNonRecoverable: assert.False,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
recoverable := IsRecoverableError(test.err)
|
||||
nonRecoverable := IsNonRecoverableError(test.err)
|
||||
test.returnRecoverable(suite.T(), recoverable, "Test: %s Recoverable-received %v", test.name, recoverable)
|
||||
test.returnNonRecoverable(suite.T(), nonRecoverable, "Test: %s non-recoverable: %v", test.name, nonRecoverable)
|
||||
t.Logf("Is nil: %v", test.err == nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs() {
|
||||
users := []string{
|
||||
"elliotReid@someHospital.org",
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package connector
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
@ -21,6 +23,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
@ -163,6 +166,10 @@ type colInfo struct {
|
||||
pathElements []string
|
||||
category path.CategoryType
|
||||
items []itemInfo
|
||||
// auxItems are items that can be retrieved with Fetch but won't be returned
|
||||
// by Items(). These files do not directly participate in comparisosn at the
|
||||
// end of a test.
|
||||
auxItems []itemInfo
|
||||
}
|
||||
|
||||
type restoreBackupInfo struct {
|
||||
@ -652,6 +659,35 @@ func compareExchangeEvent(
|
||||
checkEvent(t, expectedEvent, itemEvent)
|
||||
}
|
||||
|
||||
func permissionEqual(expected onedrive.UserPermission, got onedrive.UserPermission) bool {
|
||||
if !strings.EqualFold(expected.Email, got.Email) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (expected.Expiration == nil && got.Expiration != nil) ||
|
||||
(expected.Expiration != nil && got.Expiration == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
if expected.Expiration != nil &&
|
||||
got.Expiration != nil &&
|
||||
!expected.Expiration.Equal(*got.Expiration) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(expected.Roles) != len(got.Roles) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, r := range got.Roles {
|
||||
if !slices.Contains(expected.Roles, r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareOneDriveItem(
|
||||
t *testing.T,
|
||||
expected map[string][]byte,
|
||||
@ -695,13 +731,7 @@ func compareOneDriveItem(
|
||||
}
|
||||
|
||||
assert.Equal(t, len(expectedMeta.Permissions), len(itemMeta.Permissions), "number of permissions after restore")
|
||||
|
||||
// FIXME(meain): The permissions before and after might not be in the same order.
|
||||
for i, p := range expectedMeta.Permissions {
|
||||
assert.Equal(t, p.Email, itemMeta.Permissions[i].Email)
|
||||
assert.Equal(t, p.Roles, itemMeta.Permissions[i].Roles)
|
||||
assert.Equal(t, p.Expiration, itemMeta.Permissions[i].Expiration)
|
||||
}
|
||||
testElementsMatch(t, expectedMeta.Permissions, itemMeta.Permissions, permissionEqual)
|
||||
}
|
||||
|
||||
func compareItem(
|
||||
@ -740,7 +770,7 @@ func compareItem(
|
||||
func checkHasCollections(
|
||||
t *testing.T,
|
||||
expected map[string]map[string][]byte,
|
||||
got []data.Collection,
|
||||
got []data.BackupCollection,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
@ -762,10 +792,10 @@ func checkCollections(
|
||||
t *testing.T,
|
||||
expectedItems int,
|
||||
expected map[string]map[string][]byte,
|
||||
got []data.Collection,
|
||||
got []data.BackupCollection,
|
||||
restorePermissions bool,
|
||||
) int {
|
||||
collectionsWithItems := []data.Collection{}
|
||||
collectionsWithItems := []data.BackupCollection{}
|
||||
|
||||
skipped := 0
|
||||
gotItems := 0
|
||||
@ -944,14 +974,33 @@ func backupOutputPathFromRestore(
|
||||
)
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Make this an actual mock class that can be used in other
|
||||
// packages.
|
||||
type mockRestoreCollection struct {
|
||||
data.Collection
|
||||
auxItems map[string]data.Stream
|
||||
}
|
||||
|
||||
func (rc mockRestoreCollection) Fetch(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
) (data.Stream, error) {
|
||||
res := rc.auxItems[name]
|
||||
if res == nil {
|
||||
return nil, data.ErrNotFound
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func collectionsForInfo(
|
||||
t *testing.T,
|
||||
service path.ServiceType,
|
||||
tenant, user string,
|
||||
dest control.RestoreDestination,
|
||||
allInfo []colInfo,
|
||||
) (int, int, []data.Collection, map[string]map[string][]byte) {
|
||||
collections := make([]data.Collection, 0, len(allInfo))
|
||||
) (int, int, []data.RestoreCollection, map[string]map[string][]byte) {
|
||||
collections := make([]data.RestoreCollection, 0, len(allInfo))
|
||||
expectedData := make(map[string]map[string][]byte, len(allInfo))
|
||||
totalItems := 0
|
||||
kopiaEntries := 0
|
||||
@ -966,7 +1015,7 @@ func collectionsForInfo(
|
||||
info.pathElements,
|
||||
false,
|
||||
)
|
||||
c := mockconnector.NewMockExchangeCollection(pth, len(info.items))
|
||||
mc := mockconnector.NewMockExchangeCollection(pth, len(info.items))
|
||||
baseDestPath := backupOutputPathFromRestore(t, dest, pth)
|
||||
|
||||
baseExpected := expectedData[baseDestPath.String()]
|
||||
@ -976,8 +1025,8 @@ func collectionsForInfo(
|
||||
}
|
||||
|
||||
for i := 0; i < len(info.items); i++ {
|
||||
c.Names[i] = info.items[i].name
|
||||
c.Data[i] = info.items[i].data
|
||||
mc.Names[i] = info.items[i].name
|
||||
mc.Data[i] = info.items[i].data
|
||||
|
||||
baseExpected[info.items[i].lookupKey] = info.items[i].data
|
||||
|
||||
@ -989,6 +1038,15 @@ func collectionsForInfo(
|
||||
}
|
||||
}
|
||||
|
||||
c := mockRestoreCollection{Collection: mc, auxItems: map[string]data.Stream{}}
|
||||
|
||||
for _, aux := range info.auxItems {
|
||||
c.auxItems[aux.name] = &mockconnector.MockExchangeData{
|
||||
ID: aux.name,
|
||||
Reader: io.NopCloser(bytes.NewReader(aux.data)),
|
||||
}
|
||||
}
|
||||
|
||||
collections = append(collections, c)
|
||||
kopiaEntries += len(info.items)
|
||||
}
|
||||
@ -1002,8 +1060,8 @@ func collectionsForInfoVersion0(
|
||||
tenant, user string,
|
||||
dest control.RestoreDestination,
|
||||
allInfo []colInfo,
|
||||
) (int, int, []data.Collection, map[string]map[string][]byte) {
|
||||
collections := make([]data.Collection, 0, len(allInfo))
|
||||
) (int, int, []data.RestoreCollection, map[string]map[string][]byte) {
|
||||
collections := make([]data.RestoreCollection, 0, len(allInfo))
|
||||
expectedData := make(map[string]map[string][]byte, len(allInfo))
|
||||
totalItems := 0
|
||||
kopiaEntries := 0
|
||||
@ -1034,7 +1092,9 @@ func collectionsForInfoVersion0(
|
||||
baseExpected[info.items[i].lookupKey] = info.items[i].data
|
||||
}
|
||||
|
||||
collections = append(collections, c)
|
||||
collections = append(collections, data.NotFoundRestoreCollection{
|
||||
Collection: c,
|
||||
})
|
||||
totalItems += len(info.items)
|
||||
kopiaEntries += len(info.items)
|
||||
}
|
||||
@ -1079,7 +1139,8 @@ func getSelectorWith(
|
||||
|
||||
func loadConnector(ctx context.Context, t *testing.T, itemClient *http.Client, r resource) *GraphConnector {
|
||||
a := tester.NewM365Account(t)
|
||||
connector, err := NewGraphConnector(ctx, itemClient, a, r)
|
||||
|
||||
connector, err := NewGraphConnector(ctx, itemClient, a, r, fault.New(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
return connector
|
||||
|
||||
@ -24,6 +24,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
@ -125,9 +126,15 @@ func (suite *GraphConnectorUnitSuite) TestUnionSiteIDsAndWebURLs() {
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
//nolint
|
||||
result, err := gc.UnionSiteIDsAndWebURLs(context.Background(), test.ids, test.urls)
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
errs := fault.New(true)
|
||||
|
||||
result, err := gc.UnionSiteIDsAndWebURLs(ctx, test.ids, test.urls, errs)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, errs.Err())
|
||||
assert.Empty(t, errs.Errs())
|
||||
assert.ElementsMatch(t, test.expect, result)
|
||||
})
|
||||
}
|
||||
@ -204,18 +211,24 @@ func (suite *GraphConnectorIntegrationSuite) TestSetTenantSites() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
service, err := newConnector.createService()
|
||||
require.NoError(suite.T(), err)
|
||||
require.NoError(t, err)
|
||||
|
||||
newConnector.Service = service
|
||||
assert.Equal(t, 0, len(newConnector.Sites))
|
||||
|
||||
suite.Equal(0, len(newConnector.Sites))
|
||||
err = newConnector.setTenantSites(ctx)
|
||||
suite.NoError(err)
|
||||
suite.Less(0, len(newConnector.Sites))
|
||||
errs := fault.New(true)
|
||||
|
||||
err = newConnector.setTenantSites(ctx, errs)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, errs.Err())
|
||||
assert.Empty(t, errs.Errs())
|
||||
assert.Less(t, 0, len(newConnector.Sites))
|
||||
|
||||
for _, site := range newConnector.Sites {
|
||||
suite.NotContains("sharepoint.com/personal/", site)
|
||||
assert.NotContains(t, "sharepoint.com/personal/", site)
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,7 +270,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||
dest := tester.DefaultTestRestoreDestination()
|
||||
table := []struct {
|
||||
name string
|
||||
col []data.Collection
|
||||
col []data.RestoreCollection
|
||||
sel selectors.Selector
|
||||
}{
|
||||
{
|
||||
@ -269,7 +282,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||
},
|
||||
{
|
||||
name: "ExchangeEmpty",
|
||||
col: []data.Collection{},
|
||||
col: []data.RestoreCollection{},
|
||||
sel: selectors.Selector{
|
||||
Service: selectors.ServiceExchange,
|
||||
},
|
||||
@ -283,7 +296,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||
},
|
||||
{
|
||||
name: "OneDriveEmpty",
|
||||
col: []data.Collection{},
|
||||
col: []data.RestoreCollection{},
|
||||
sel: selectors.Selector{
|
||||
Service: selectors.ServiceOneDrive,
|
||||
},
|
||||
@ -297,7 +310,7 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||
},
|
||||
{
|
||||
name: "SharePointEmpty",
|
||||
col: []data.Collection{},
|
||||
col: []data.RestoreCollection{},
|
||||
sel: selectors.Selector{
|
||||
Service: selectors.ServiceSharePoint,
|
||||
},
|
||||
@ -370,7 +383,7 @@ func runRestoreBackupTest(
|
||||
opts control.Options,
|
||||
) {
|
||||
var (
|
||||
collections []data.Collection
|
||||
collections []data.RestoreCollection
|
||||
expectedData = map[string]map[string][]byte{}
|
||||
totalItems = 0
|
||||
totalKopiaItems = 0
|
||||
@ -495,7 +508,7 @@ func runRestoreBackupTestVersion0(
|
||||
opts control.Options,
|
||||
) {
|
||||
var (
|
||||
collections []data.Collection
|
||||
collections []data.RestoreCollection
|
||||
expectedData = map[string]map[string][]byte{}
|
||||
totalItems = 0
|
||||
totalKopiaItems = 0
|
||||
@ -885,6 +898,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -911,6 +931,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -938,6 +965,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
lookupKey: "folder-a" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -961,6 +995,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -982,6 +1023,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1014,6 +1062,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -1035,6 +1090,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1190,6 +1252,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -1216,6 +1285,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -1243,6 +1319,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
||||
lookupKey: "folder-a" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -1266,6 +1349,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -1287,6 +1377,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1508,6 +1605,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1541,6 +1645,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -1562,6 +1673,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1595,6 +1713,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
lookupKey: "b" + onedrive.DirMetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pathElements: []string{
|
||||
@ -1616,6 +1741,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1660,6 +1792,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1704,6 +1843,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() {
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: []byte("{}"),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1762,6 +1908,13 @@ func (suite *GraphConnectorIntegrationSuite) TestPermissionsBackupAndNoRestore()
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
auxItems: []itemInfo{
|
||||
{
|
||||
name: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}),
|
||||
lookupKey: "test-file.txt" + onedrive.MetaFileSuffix,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@ -27,10 +27,10 @@ type MockExchangeDataCollection struct {
|
||||
}
|
||||
|
||||
var (
|
||||
_ data.Collection = &MockExchangeDataCollection{}
|
||||
_ data.Stream = &MockExchangeData{}
|
||||
_ data.StreamInfo = &MockExchangeData{}
|
||||
_ data.StreamSize = &MockExchangeData{}
|
||||
_ data.BackupCollection = &MockExchangeDataCollection{}
|
||||
_ data.Stream = &MockExchangeData{}
|
||||
_ data.StreamInfo = &MockExchangeData{}
|
||||
_ data.StreamSize = &MockExchangeData{}
|
||||
)
|
||||
|
||||
// NewMockExchangeDataCollection creates an data collection that will return the specified number of
|
||||
|
||||
@ -14,8 +14,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.Stream = &MockListData{}
|
||||
_ data.Collection = &MockListCollection{}
|
||||
_ data.Stream = &MockListData{}
|
||||
_ data.BackupCollection = &MockListCollection{}
|
||||
)
|
||||
|
||||
type MockListCollection struct {
|
||||
|
||||
@ -3,6 +3,13 @@ package mockconnector
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
js "github.com/microsoft/kiota-serialization-json-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
)
|
||||
@ -360,6 +367,143 @@ func GetMockMessageWithItemAttachmentEvent(subject string) []byte {
|
||||
return []byte(message)
|
||||
}
|
||||
|
||||
func GetMockMessageWithItemAttachmentMail(subject string) []byte {
|
||||
//nolint:lll
|
||||
// Order of fields:
|
||||
// 1. subject
|
||||
// 2. alias
|
||||
// 3. sender address
|
||||
// 4. from address
|
||||
// 5. toRecipients email address
|
||||
template := `{
|
||||
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages(attachments())/$entity",
|
||||
"@odata.etag": "W/\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADKTqr3\"",
|
||||
"id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADKo35SAAA=",
|
||||
"createdDateTime": "2023-02-06T20:03:40Z",
|
||||
"lastModifiedDateTime": "2023-02-06T20:03:42Z",
|
||||
"changeKey": "CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADKTqr3",
|
||||
"categories": [],
|
||||
"receivedDateTime": "2023-02-06T20:03:40Z",
|
||||
"sentDateTime": "2023-02-06T20:03:37Z",
|
||||
"hasAttachments": true,
|
||||
"internetMessageId": "<SJ0PR17MB5622C17321AE356F5202A857C3DA9@SJ0PR17MB5622.namprd17.prod.outlook.com>",
|
||||
"subject": "%[1]s",
|
||||
"bodyPreview": "Nested Items are not encapsulated in a trivial manner. Review the findings.\r\n\r\nBest,\r\n\r\nYour Test Case",
|
||||
"importance": "normal",
|
||||
"parentFolderId": "AQMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4ADVkZWQwNmNlMTgALgAAAw_9XBStqZdPuOVIalVTz7sBAHzBhzS2FNNNiXdGkRghSr4AAAIBDAAAAA==",
|
||||
"conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAPe8pEQOrBxLvFNhfDtMyEI=",
|
||||
"conversationIndex": "AQHZOmYA97ykRA6sHEu8U2F8O0zIQg==",
|
||||
"isDeliveryReceiptRequested": false,
|
||||
"isReadReceiptRequested": false,
|
||||
"isRead": false,
|
||||
"isDraft": false,
|
||||
"webLink": "https://outlook.office365.com/owal=ReadMessageItem",
|
||||
"inferenceClassification": "focused",
|
||||
"body": {
|
||||
"contentType": "html",
|
||||
"content": "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><style type=\"text/css\" style=\"display:none\">\r\n<!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n-->\r\n</style></head><body dir=\"ltr\"><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Nested Items are not encapsulated in a trivial manner. Review the findings.</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Best, </div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Your Test Case</div></body></html>"
|
||||
},
|
||||
"sender": {
|
||||
"emailAddress": {
|
||||
"name": "%[2]s",
|
||||
"address": "%[3]s"
|
||||
}
|
||||
},
|
||||
"from": {
|
||||
"emailAddress": {
|
||||
"name": "%[2]s",
|
||||
"address": "%[4]s"
|
||||
}
|
||||
},
|
||||
"toRecipients": [
|
||||
{
|
||||
"emailAddress": {
|
||||
"name": "%[2]s",
|
||||
"address": "%[5]s"
|
||||
}
|
||||
}
|
||||
],
|
||||
"ccRecipients": [],
|
||||
"bccRecipients": [],
|
||||
"replyTo": [],
|
||||
"flag": {
|
||||
"flagStatus": "notFlagged"
|
||||
},
|
||||
"attachments": [
|
||||
{
|
||||
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#/attachments(microsoft.graph.itemAttachment/item())/$entity",
|
||||
"@odata.type": "#microsoft.graph.itemAttachment",
|
||||
"id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADKo35SAAABEgAQABv3spWM8g5IriSvYJe5kO8=",
|
||||
"lastModifiedDateTime": "2023-02-06T20:03:40Z",
|
||||
"name": "Not Something Small. 28-Jul-2022_20:53:33 Different",
|
||||
"contentType": null,
|
||||
"size": 10959,
|
||||
"isInline": false,
|
||||
"item@odata.associationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')/$ref",
|
||||
"item@odata.navigationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')",
|
||||
"item": {
|
||||
"@odata.type": "#microsoft.graph.message",
|
||||
"id": "",
|
||||
"createdDateTime": "2023-02-06T20:03:40Z",
|
||||
"lastModifiedDateTime": "2023-02-06T20:03:40Z",
|
||||
"receivedDateTime": "2022-07-28T20:53:33Z",
|
||||
"sentDateTime": "2022-07-28T20:53:33Z",
|
||||
"hasAttachments": false,
|
||||
"internetMessageId": "<MWHPR1401MB1952C46D4A46B6398F562B0FA6E99@MWHPR1401MB1952.namprd14.prod.outlook.com>",
|
||||
"subject": "Not Something Small. 28-Jul-2022_20:53:33 Different",
|
||||
"bodyPreview": "I've been going through with the changing of messages. It shouldn't have the same calls, right? Call Me?\r\n\r\nWe want to be able to send multiple messages and we want to be able to respond and do other things that make sense for our users. In this case. Let",
|
||||
"importance": "normal",
|
||||
"conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAOlAM0OrVQlHkhUZeZMPxgg=",
|
||||
"conversationIndex": "AQHYosQZ6UAzQ6tVCUeSFRl5kw/GCA==",
|
||||
"isDeliveryReceiptRequested": false,
|
||||
"isReadReceiptRequested": false,
|
||||
"isRead": true,
|
||||
"isDraft": false,
|
||||
"webLink": "https://outlook.office365.com/owa/?AttachmentItemID=Aviewmodel=ItemAttachment",
|
||||
"body": {
|
||||
"contentType": "html",
|
||||
"content": "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><meta name=\"Generator\" content=\"Microsoft Word 15 (filtered medium)\"><style><!--@font-face{font-family:\"Cambria Math\"}@font-face{font-family:Calibri}p.MsoNormal, li.MsoNormal, div.MsoNormal{margin:0in;font-size:11.0pt;font-family:\"Calibri\",sans-serif}span.EmailStyle17{font-family:\"Calibri\",sans-serif;color:windowtext}.MsoChpDefault{font-family:\"Calibri\",sans-serif}@page WordSection1{margin:1.0in 1.0in 1.0in 1.0in}div.WordSection1{}--></style></head><body lang=\"EN-US\" link=\"#0563C1\" vlink=\"#954F72\" style=\"word-wrap:break-word\"><div class=\"WordSection1\"><p class=\"MsoNormal\">I've been going through with the changing of messages. It shouldn't have the same calls, right? Call Me? </p><p class=\"MsoNormal\"> </p><p class=\"MsoNormal\">We want to be able to send multiple messages and we want to be able to respond and do other things that make sense for our users. In this case. Let’s consider a Mailbox</p></div></body></html>"
|
||||
},
|
||||
"sender": {
|
||||
"emailAddress": {
|
||||
"name": "%[2]s",
|
||||
"address": "%[3]s"
|
||||
}
|
||||
},
|
||||
"from": {
|
||||
"emailAddress": {
|
||||
"name": "%[2]s",
|
||||
"address": "%[4]s"
|
||||
}
|
||||
},
|
||||
"toRecipients": [
|
||||
{
|
||||
"emailAddress": {
|
||||
"name": "Direct Report",
|
||||
"address": "notAvailable@8qzvrj.onmicrosoft.com"
|
||||
}
|
||||
}
|
||||
],
|
||||
"flag": {
|
||||
"flagStatus": "notFlagged"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
message := fmt.Sprintf(
|
||||
template,
|
||||
subject,
|
||||
defaultAlias,
|
||||
defaultMessageSender,
|
||||
defaultMessageFrom,
|
||||
defaultMessageTo,
|
||||
)
|
||||
|
||||
return []byte(message)
|
||||
}
|
||||
|
||||
func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte {
|
||||
//nolint:lll
|
||||
// Order of fields:
|
||||
@ -545,3 +689,73 @@ func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte {
|
||||
|
||||
return []byte(message)
|
||||
}
|
||||
|
||||
func GetMockMessageWithNestedItemAttachmentMail(t *testing.T, nested []byte, subject string) []byte {
|
||||
base := GetMockMessageBytes(subject)
|
||||
message, err := hydrateMessage(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
nestedMessage, err := hydrateMessage(nested)
|
||||
require.NoError(t, err)
|
||||
|
||||
iaNode := models.NewItemAttachment()
|
||||
attachmentSize := int32(len(nested))
|
||||
iaNode.SetSize(&attachmentSize)
|
||||
|
||||
internalName := "Nested Message"
|
||||
iaNode.SetName(&internalName)
|
||||
iaNode.SetItem(nestedMessage)
|
||||
message.SetAttachments([]models.Attachmentable{iaNode})
|
||||
|
||||
return serialize(t, message)
|
||||
}
|
||||
|
||||
func GetMockMessageWithNestedItemAttachmentContact(t *testing.T, nested []byte, subject string) []byte {
|
||||
base := GetMockMessageBytes(subject)
|
||||
message, err := hydrateMessage(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", nested)
|
||||
require.NoError(t, err)
|
||||
|
||||
anObject, err := parseNode.GetObjectValue(models.CreateContactFromDiscriminatorValue)
|
||||
require.NoError(t, err)
|
||||
|
||||
contact := anObject.(models.Contactable)
|
||||
internalName := "Nested Contact"
|
||||
iaNode := models.NewItemAttachment()
|
||||
attachmentSize := int32(len(nested))
|
||||
iaNode.SetSize(&attachmentSize)
|
||||
iaNode.SetName(&internalName)
|
||||
iaNode.SetItem(contact)
|
||||
message.SetAttachments([]models.Attachmentable{iaNode})
|
||||
|
||||
return serialize(t, message)
|
||||
}
|
||||
|
||||
func serialize(t *testing.T, item absser.Parsable) []byte {
|
||||
wtr := js.NewJsonSerializationWriter()
|
||||
err := wtr.WriteObjectValue("", item)
|
||||
require.NoError(t, err)
|
||||
|
||||
byteArray, err := wtr.GetSerializedContent()
|
||||
require.NoError(t, err)
|
||||
|
||||
return byteArray
|
||||
}
|
||||
|
||||
func hydrateMessage(byteArray []byte) (models.Messageable, error) {
|
||||
parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", byteArray)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "deserializing bytes into base m365 object")
|
||||
}
|
||||
|
||||
anObject, err := parseNode.GetObjectValue(models.CreateMessageFromDiscriminatorValue)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "parsing m365 object factory")
|
||||
}
|
||||
|
||||
message := anObject.(models.Messageable)
|
||||
|
||||
return message, nil
|
||||
}
|
||||
|
||||
@ -30,6 +30,7 @@ const pageSize = int32(999)
|
||||
|
||||
type driveItemPager struct {
|
||||
gs graph.Servicer
|
||||
driveID string
|
||||
builder *msdrives.ItemRootDeltaRequestBuilder
|
||||
options *msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
@ -49,6 +50,7 @@ func NewItemPager(
|
||||
|
||||
res := &driveItemPager{
|
||||
gs: gs,
|
||||
driveID: driveID,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().DrivesById(driveID).Root().Delta(),
|
||||
}
|
||||
@ -78,6 +80,10 @@ func (p *driveItemPager) SetNext(link string) {
|
||||
p.builder = msdrives.NewItemRootDeltaRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *driveItemPager) Reset() {
|
||||
p.builder = p.gs.Client().DrivesById(p.driveID).Root().Delta()
|
||||
}
|
||||
|
||||
func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||
return getValues[models.DriveItemable](l)
|
||||
}
|
||||
|
||||
@ -42,10 +42,10 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.Collection = &Collection{}
|
||||
_ data.Stream = &Item{}
|
||||
_ data.StreamInfo = &Item{}
|
||||
_ data.StreamModTime = &Item{}
|
||||
_ data.BackupCollection = &Collection{}
|
||||
_ data.Stream = &Item{}
|
||||
_ data.StreamInfo = &Item{}
|
||||
_ data.StreamModTime = &Item{}
|
||||
)
|
||||
|
||||
// Collection represents a set of OneDrive objects retrieved from M365
|
||||
@ -97,17 +97,19 @@ func NewCollection(
|
||||
statusUpdater support.StatusUpdater,
|
||||
source driveSource,
|
||||
ctrlOpts control.Options,
|
||||
doNotMergeItems bool,
|
||||
) *Collection {
|
||||
c := &Collection{
|
||||
itemClient: itemClient,
|
||||
folderPath: folderPath,
|
||||
driveItems: map[string]models.DriveItemable{},
|
||||
driveID: driveID,
|
||||
source: source,
|
||||
service: service,
|
||||
data: make(chan data.Stream, collectionChannelBufferSize),
|
||||
statusUpdater: statusUpdater,
|
||||
ctrl: ctrlOpts,
|
||||
itemClient: itemClient,
|
||||
folderPath: folderPath,
|
||||
driveItems: map[string]models.DriveItemable{},
|
||||
driveID: driveID,
|
||||
source: source,
|
||||
service: service,
|
||||
data: make(chan data.Stream, collectionChannelBufferSize),
|
||||
statusUpdater: statusUpdater,
|
||||
ctrl: ctrlOpts,
|
||||
doNotMergeItems: doNotMergeItems,
|
||||
}
|
||||
|
||||
// Allows tests to set a mock populator
|
||||
@ -278,35 +280,23 @@ func (oc *Collection) populateItems(ctx context.Context) {
|
||||
|
||||
if oc.source == OneDriveSource {
|
||||
// Fetch metadata for the file
|
||||
for i := 1; i <= maxRetries; i++ {
|
||||
if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup {
|
||||
// We are still writing the metadata file but with
|
||||
// empty permissions as we don't have a way to
|
||||
// signify that the permissions was explicitly
|
||||
// not added.
|
||||
itemMeta = io.NopCloser(strings.NewReader("{}"))
|
||||
itemMetaSize = 2
|
||||
if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup {
|
||||
// We are still writing the metadata file but with
|
||||
// empty permissions as we don't have a way to
|
||||
// signify that the permissions was explicitly
|
||||
// not added.
|
||||
itemMeta = io.NopCloser(strings.NewReader("{}"))
|
||||
itemMetaSize = 2
|
||||
} else {
|
||||
err = graph.RunWithRetry(func() error {
|
||||
itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item)
|
||||
return err
|
||||
})
|
||||
|
||||
break
|
||||
if err != nil {
|
||||
errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions"))
|
||||
return
|
||||
}
|
||||
|
||||
itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item)
|
||||
|
||||
// retry on Timeout type errors, break otherwise.
|
||||
if err == nil ||
|
||||
!graph.IsErrTimeout(err) ||
|
||||
!graph.IsInternalServerError(err) {
|
||||
break
|
||||
}
|
||||
|
||||
if i < maxRetries {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -168,7 +168,8 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
||||
suite,
|
||||
suite.testStatusUpdater(&wg, &collStatus),
|
||||
test.source,
|
||||
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}})
|
||||
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}},
|
||||
true)
|
||||
require.NotNil(t, coll)
|
||||
assert.Equal(t, folderPath, coll.FullPath())
|
||||
|
||||
@ -301,7 +302,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
||||
suite,
|
||||
suite.testStatusUpdater(&wg, &collStatus),
|
||||
test.source,
|
||||
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}})
|
||||
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}},
|
||||
true)
|
||||
|
||||
mockItem := models.NewDriveItem()
|
||||
mockItem.SetId(&testItemID)
|
||||
@ -372,7 +374,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionDisablePermissionsBackup() {
|
||||
suite,
|
||||
suite.testStatusUpdater(&wg, &collStatus),
|
||||
test.source,
|
||||
control.Options{ToggleFeatures: control.Toggles{}})
|
||||
control.Options{ToggleFeatures: control.Toggles{}},
|
||||
true)
|
||||
|
||||
now := time.Now()
|
||||
mockItem := models.NewDriveItem()
|
||||
|
||||
@ -61,9 +61,9 @@ type Collections struct {
|
||||
|
||||
ctrl control.Options
|
||||
|
||||
// collectionMap allows lookup of the data.Collection
|
||||
// collectionMap allows lookup of the data.BackupCollection
|
||||
// for a OneDrive folder
|
||||
CollectionMap map[string]data.Collection
|
||||
CollectionMap map[string]data.BackupCollection
|
||||
|
||||
// Not the most ideal, but allows us to change the pager function for testing
|
||||
// as needed. This will allow us to mock out some scenarios during testing.
|
||||
@ -100,7 +100,7 @@ func NewCollections(
|
||||
resourceOwner: resourceOwner,
|
||||
source: source,
|
||||
matcher: matcher,
|
||||
CollectionMap: map[string]data.Collection{},
|
||||
CollectionMap: map[string]data.BackupCollection{},
|
||||
drivePagerFunc: PagerForSource,
|
||||
itemPagerFunc: defaultItemPager,
|
||||
service: service,
|
||||
@ -111,7 +111,7 @@ func NewCollections(
|
||||
|
||||
func deserializeMetadata(
|
||||
ctx context.Context,
|
||||
cols []data.Collection,
|
||||
cols []data.RestoreCollection,
|
||||
) (map[string]string, map[string]map[string]string, error) {
|
||||
logger.Ctx(ctx).Infow(
|
||||
"deserialzing previous backup metadata",
|
||||
@ -249,9 +249,9 @@ func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) erro
|
||||
// be excluded from the upcoming backup.
|
||||
func (c *Collections) Get(
|
||||
ctx context.Context,
|
||||
prevMetadata []data.Collection,
|
||||
) ([]data.Collection, map[string]struct{}, error) {
|
||||
_, _, err := deserializeMetadata(ctx, prevMetadata)
|
||||
prevMetadata []data.RestoreCollection,
|
||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||
prevDeltas, _, err := deserializeMetadata(ctx, prevMetadata)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -287,6 +287,8 @@ func (c *Collections) Get(
|
||||
driveID := *d.GetId()
|
||||
driveName := *d.GetName()
|
||||
|
||||
prevDelta := prevDeltas[driveID]
|
||||
|
||||
delta, paths, excluded, err := collectItems(
|
||||
ctx,
|
||||
c.itemPagerFunc(
|
||||
@ -297,6 +299,7 @@ func (c *Collections) Get(
|
||||
driveID,
|
||||
driveName,
|
||||
c.UpdateCollections,
|
||||
prevDelta,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -307,8 +310,8 @@ func (c *Collections) Get(
|
||||
// remove entries for which there is no corresponding delta token/folder. If
|
||||
// we leave empty delta tokens then we may end up setting the State field
|
||||
// for collections when not actually getting delta results.
|
||||
if len(delta) > 0 {
|
||||
deltaURLs[driveID] = delta
|
||||
if len(delta.URL) > 0 {
|
||||
deltaURLs[driveID] = delta.URL
|
||||
}
|
||||
|
||||
// Avoid the edge case where there's no paths but we do have a valid delta
|
||||
@ -324,7 +327,7 @@ func (c *Collections) Get(
|
||||
observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items to backup", c.NumItems)))
|
||||
|
||||
// Add an extra for the metadata collection.
|
||||
collections := make([]data.Collection, 0, len(c.CollectionMap)+1)
|
||||
collections := make([]data.BackupCollection, 0, len(c.CollectionMap)+1)
|
||||
for _, coll := range c.CollectionMap {
|
||||
collections = append(collections, coll)
|
||||
}
|
||||
@ -356,7 +359,7 @@ func (c *Collections) Get(
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Track and return the set of items to exclude.
|
||||
return collections, nil, nil
|
||||
return collections, excludedItems, nil
|
||||
}
|
||||
|
||||
// UpdateCollections initializes and adds the provided drive items to Collections
|
||||
@ -371,6 +374,7 @@ func (c *Collections) UpdateCollections(
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
invalidPrevDelta bool,
|
||||
) error {
|
||||
for _, item := range items {
|
||||
if item.GetRoot() != nil {
|
||||
@ -462,7 +466,9 @@ func (c *Collections) UpdateCollections(
|
||||
c.service,
|
||||
c.statusUpdater,
|
||||
c.source,
|
||||
c.ctrl)
|
||||
c.ctrl,
|
||||
invalidPrevDelta,
|
||||
)
|
||||
|
||||
c.CollectionMap[collectionPath.String()] = col
|
||||
c.NumContainers++
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -645,6 +646,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
||||
tt.inputFolderMap,
|
||||
outputFolderMap,
|
||||
excludes,
|
||||
false,
|
||||
)
|
||||
tt.expect(t, err)
|
||||
assert.Equal(t, len(tt.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
||||
@ -981,7 +983,7 @@ func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
cols := []data.Collection{}
|
||||
cols := []data.RestoreCollection{}
|
||||
|
||||
for _, c := range test.cols {
|
||||
mc, err := graph.MakeMetadataCollection(
|
||||
@ -994,7 +996,7 @@ func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
cols = append(cols, mc)
|
||||
cols = append(cols, data.NotFoundRestoreCollection{Collection: mc})
|
||||
}
|
||||
|
||||
deltas, paths, err := deserializeMetadata(ctx, cols)
|
||||
@ -1047,6 +1049,7 @@ func (p *mockItemPager) GetPage(context.Context) (gapi.DeltaPageLinker, error) {
|
||||
}
|
||||
|
||||
func (p *mockItemPager) SetNext(string) {}
|
||||
func (p *mockItemPager) Reset() {}
|
||||
|
||||
func (p *mockItemPager) ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||
idx := p.getIdx
|
||||
@ -1131,6 +1134,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
||||
expectedDeltaURLs map[string]string
|
||||
expectedFolderPaths map[string]map[string]string
|
||||
expectedDelList map[string]struct{}
|
||||
doNotMergeItems bool
|
||||
}{
|
||||
{
|
||||
name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors",
|
||||
@ -1342,6 +1346,135 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
||||
expectedFolderPaths: nil,
|
||||
expectedDelList: nil,
|
||||
},
|
||||
{
|
||||
name: "OneDrive_OneItemPage_DeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]deltaPagerResult{
|
||||
driveID1: {
|
||||
{
|
||||
err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
items: []models.DriveItemable{
|
||||
driveItem("file", "file", testBaseDrivePath, true, false, false),
|
||||
},
|
||||
deltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string][]string{
|
||||
expectedPathAsSlice(
|
||||
suite.T(),
|
||||
tenant,
|
||||
user,
|
||||
testBaseDrivePath,
|
||||
)[0]: {"file"},
|
||||
},
|
||||
expectedDeltaURLs: map[string]string{
|
||||
driveID1: delta,
|
||||
},
|
||||
expectedFolderPaths: map[string]map[string]string{
|
||||
// We need an empty map here so deserializing metadata knows the delta
|
||||
// token for this drive is valid.
|
||||
driveID1: {},
|
||||
},
|
||||
expectedDelList: map[string]struct{}{},
|
||||
doNotMergeItems: true,
|
||||
},
|
||||
{
|
||||
name: "OneDrive_MultipleCollections_DeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]deltaPagerResult{
|
||||
driveID1: {
|
||||
{
|
||||
err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
items: []models.DriveItemable{
|
||||
driveItem("file", "file", testBaseDrivePath, true, false, false),
|
||||
},
|
||||
nextLink: &next,
|
||||
},
|
||||
{
|
||||
items: []models.DriveItemable{
|
||||
driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false),
|
||||
},
|
||||
deltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string][]string{
|
||||
expectedPathAsSlice(
|
||||
suite.T(),
|
||||
tenant,
|
||||
user,
|
||||
testBaseDrivePath,
|
||||
)[0]: {"file"},
|
||||
expectedPathAsSlice(
|
||||
suite.T(),
|
||||
tenant,
|
||||
user,
|
||||
testBaseDrivePath+"/folder",
|
||||
)[0]: {"file"},
|
||||
},
|
||||
expectedDeltaURLs: map[string]string{
|
||||
driveID1: delta,
|
||||
},
|
||||
expectedFolderPaths: map[string]map[string]string{
|
||||
// We need an empty map here so deserializing metadata knows the delta
|
||||
// token for this drive is valid.
|
||||
driveID1: {},
|
||||
},
|
||||
expectedDelList: map[string]struct{}{},
|
||||
doNotMergeItems: true,
|
||||
},
|
||||
{
|
||||
name: "OneDrive_MultipleCollections_NoDeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]deltaPagerResult{
|
||||
driveID1: {
|
||||
{
|
||||
items: []models.DriveItemable{
|
||||
driveItem("file", "file", testBaseDrivePath, true, false, false),
|
||||
},
|
||||
nextLink: &next,
|
||||
},
|
||||
{
|
||||
items: []models.DriveItemable{
|
||||
driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false),
|
||||
},
|
||||
deltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string][]string{
|
||||
expectedPathAsSlice(
|
||||
suite.T(),
|
||||
tenant,
|
||||
user,
|
||||
testBaseDrivePath,
|
||||
)[0]: {"file"},
|
||||
expectedPathAsSlice(
|
||||
suite.T(),
|
||||
tenant,
|
||||
user,
|
||||
testBaseDrivePath+"/folder",
|
||||
)[0]: {"file"},
|
||||
},
|
||||
expectedDeltaURLs: map[string]string{
|
||||
driveID1: delta,
|
||||
},
|
||||
expectedFolderPaths: map[string]map[string]string{
|
||||
// We need an empty map here so deserializing metadata knows the delta
|
||||
// token for this drive is valid.
|
||||
driveID1: {},
|
||||
},
|
||||
expectedDelList: map[string]struct{}{},
|
||||
doNotMergeItems: false,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
@ -1386,7 +1519,7 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
||||
c.itemPagerFunc = itemPagerFunc
|
||||
|
||||
// TODO(ashmrtn): Allow passing previous metadata.
|
||||
cols, _, err := c.Get(ctx, nil)
|
||||
cols, delList, err := c.Get(ctx, nil)
|
||||
test.errCheck(t, err)
|
||||
|
||||
if err != nil {
|
||||
@ -1396,7 +1529,9 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
||||
for _, baseCol := range cols {
|
||||
folderPath := baseCol.FullPath().String()
|
||||
if folderPath == metadataPath.String() {
|
||||
deltas, paths, err := deserializeMetadata(ctx, []data.Collection{baseCol})
|
||||
deltas, paths, err := deserializeMetadata(ctx, []data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{Collection: baseCol},
|
||||
})
|
||||
if !assert.NoError(t, err, "deserializing metadata") {
|
||||
continue
|
||||
}
|
||||
@ -1421,11 +1556,10 @@ func (suite *OneDriveCollectionsSuite) TestGet() {
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, test.expectedCollections[folderPath], itemIDs)
|
||||
assert.Equal(t, test.doNotMergeItems, baseCol.DoNotMergeItems(), "DoNotMergeItems")
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Uncomment this when we begin return the set of items to
|
||||
// remove from the upcoming backup.
|
||||
// assert.Equal(t, test.expectedDelList, delList)
|
||||
assert.Equal(t, test.expectedDelList, delList)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1482,3 +1616,98 @@ func delItem(
|
||||
|
||||
return item
|
||||
}
|
||||
|
||||
func getDeltaError() error {
|
||||
syncStateNotFound := "SyncStateNotFound" // TODO(meain): export graph.errCodeSyncStateNotFound
|
||||
me := odataerrors.NewMainError()
|
||||
me.SetCode(&syncStateNotFound)
|
||||
|
||||
deltaError := odataerrors.NewODataError()
|
||||
deltaError.SetError(me)
|
||||
|
||||
return deltaError
|
||||
}
|
||||
|
||||
func (suite *OneDriveCollectionsSuite) TestCollectItems() {
|
||||
next := "next"
|
||||
delta := "delta"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
items []deltaPagerResult
|
||||
deltaURL string
|
||||
prevDeltaSuccess bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "delta on first run",
|
||||
deltaURL: delta,
|
||||
items: []deltaPagerResult{
|
||||
{deltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: true,
|
||||
},
|
||||
{
|
||||
name: "next then delta",
|
||||
deltaURL: delta,
|
||||
items: []deltaPagerResult{
|
||||
{nextLink: &next},
|
||||
{deltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: true,
|
||||
},
|
||||
{
|
||||
name: "invalid prev delta",
|
||||
deltaURL: delta,
|
||||
items: []deltaPagerResult{
|
||||
{err: getDeltaError()},
|
||||
{deltaLink: &delta}, // works on retry
|
||||
},
|
||||
prevDeltaSuccess: false,
|
||||
},
|
||||
{
|
||||
name: "fail a normal delta query",
|
||||
items: []deltaPagerResult{
|
||||
{nextLink: &next},
|
||||
{err: assert.AnError},
|
||||
},
|
||||
prevDeltaSuccess: true,
|
||||
err: assert.AnError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
itemPager := &mockItemPager{
|
||||
toReturn: test.items,
|
||||
}
|
||||
|
||||
collectorFunc := func(
|
||||
ctx context.Context,
|
||||
driveID, driveName string,
|
||||
driveItems []models.DriveItemable,
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
doNotMergeItems bool,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
delta, _, _, err := collectItems(
|
||||
ctx,
|
||||
itemPager,
|
||||
"",
|
||||
"General",
|
||||
collectorFunc,
|
||||
"",
|
||||
)
|
||||
|
||||
require.ErrorIs(suite.T(), err, test.err, "delta fetch err")
|
||||
require.Equal(suite.T(), test.deltaURL, delta.URL, "delta url")
|
||||
require.Equal(suite.T(), !test.prevDeltaSuccess, delta.Reset, "delta reset")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -35,6 +35,17 @@ const (
|
||||
contextDeadlineExceeded = "context deadline exceeded"
|
||||
)
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
// a delta-queriable folder.
|
||||
// FIXME: This is same as exchange.api.DeltaUpdate
|
||||
type DeltaUpdate struct {
|
||||
// the deltaLink itself
|
||||
URL string
|
||||
// true if the old delta was marked as invalid
|
||||
Reset bool
|
||||
}
|
||||
|
||||
type drivePager interface {
|
||||
GetPage(context.Context) (gapi.PageLinker, error)
|
||||
SetNext(nextLink string)
|
||||
@ -132,11 +143,13 @@ type itemCollector func(
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
validPrevDelta bool,
|
||||
) error
|
||||
|
||||
type itemPager interface {
|
||||
GetPage(context.Context) (gapi.DeltaPageLinker, error)
|
||||
SetNext(nextLink string)
|
||||
Reset()
|
||||
ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error)
|
||||
}
|
||||
|
||||
@ -172,22 +185,39 @@ func collectItems(
|
||||
pager itemPager,
|
||||
driveID, driveName string,
|
||||
collector itemCollector,
|
||||
) (string, map[string]string, map[string]struct{}, error) {
|
||||
prevDelta string,
|
||||
) (DeltaUpdate, map[string]string, map[string]struct{}, error) {
|
||||
var (
|
||||
newDeltaURL = ""
|
||||
// TODO(ashmrtn): Eventually this should probably be a parameter so we can
|
||||
// take in previous paths.
|
||||
oldPaths = map[string]string{}
|
||||
newPaths = map[string]string{}
|
||||
excluded = map[string]struct{}{}
|
||||
oldPaths = map[string]string{}
|
||||
newPaths = map[string]string{}
|
||||
excluded = map[string]struct{}{}
|
||||
invalidPrevDelta = false
|
||||
)
|
||||
|
||||
maps.Copy(newPaths, oldPaths)
|
||||
|
||||
if len(prevDelta) != 0 {
|
||||
pager.SetNext(prevDelta)
|
||||
}
|
||||
|
||||
for {
|
||||
page, err := pager.GetPage(ctx)
|
||||
|
||||
if graph.IsErrInvalidDelta(err) {
|
||||
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
||||
|
||||
invalidPrevDelta = true
|
||||
|
||||
pager.Reset()
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", nil, nil, errors.Wrapf(
|
||||
return DeltaUpdate{}, nil, nil, errors.Wrapf(
|
||||
err,
|
||||
"failed to query drive items. details: %s",
|
||||
support.ConnectorStackErrorTrace(err),
|
||||
@ -196,12 +226,12 @@ func collectItems(
|
||||
|
||||
vals, err := pager.ValuesIn(page)
|
||||
if err != nil {
|
||||
return "", nil, nil, errors.Wrap(err, "extracting items from response")
|
||||
return DeltaUpdate{}, nil, nil, errors.Wrap(err, "extracting items from response")
|
||||
}
|
||||
|
||||
err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded)
|
||||
err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded, invalidPrevDelta)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return DeltaUpdate{}, nil, nil, err
|
||||
}
|
||||
|
||||
nextLink, deltaLink := gapi.NextAndDeltaLink(page)
|
||||
@ -219,7 +249,7 @@ func collectItems(
|
||||
pager.SetNext(nextLink)
|
||||
}
|
||||
|
||||
return newDeltaURL, newPaths, excluded, nil
|
||||
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
|
||||
}
|
||||
|
||||
// getFolder will lookup the specified folder name under `parentFolderID`
|
||||
@ -351,6 +381,7 @@ func GetAllFolders(
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
doNotMergeItems bool,
|
||||
) error {
|
||||
for _, item := range items {
|
||||
// Skip the root item.
|
||||
@ -379,6 +410,7 @@ func GetAllFolders(
|
||||
|
||||
return nil
|
||||
},
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "getting items for drive %s", *d.GetName())
|
||||
|
||||
@ -106,6 +106,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
doNotMergeItems bool,
|
||||
) error {
|
||||
for _, item := range items {
|
||||
if item.GetFile() != nil {
|
||||
@ -126,6 +127,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||
suite.userDriveID,
|
||||
"General",
|
||||
itemCollector,
|
||||
"",
|
||||
)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/pkg/errors"
|
||||
@ -64,7 +65,7 @@ func RestoreCollections(
|
||||
service graph.Servicer,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
dcs []data.Collection,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
) (*support.ConnectorOperationStatus, error) {
|
||||
var (
|
||||
@ -148,7 +149,7 @@ func RestoreCollection(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
service graph.Servicer,
|
||||
dc data.Collection,
|
||||
dc data.RestoreCollection,
|
||||
parentPerms []UserPermission,
|
||||
source driveSource,
|
||||
restoreContainerName string,
|
||||
@ -164,7 +165,6 @@ func RestoreCollection(
|
||||
metrics = support.CollectionMetrics{}
|
||||
copyBuffer = make([]byte, copyBufferSize)
|
||||
directory = dc.FullPath()
|
||||
restoredIDs = map[string]string{}
|
||||
itemInfo details.ItemInfo
|
||||
itemID string
|
||||
folderPerms = map[string][]UserPermission{}
|
||||
@ -226,37 +226,44 @@ func RestoreCollection(
|
||||
metrics.TotalBytes += int64(len(copyBuffer))
|
||||
trimmedName := strings.TrimSuffix(name, DataFileSuffix)
|
||||
|
||||
itemID, itemInfo, err = restoreData(ctx, service, trimmedName, itemData,
|
||||
drivePath.DriveID, restoreFolderID, copyBuffer, source)
|
||||
itemID, itemInfo, err = restoreData(
|
||||
ctx,
|
||||
service,
|
||||
trimmedName,
|
||||
itemData,
|
||||
drivePath.DriveID,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
source)
|
||||
if err != nil {
|
||||
errUpdater(itemData.UUID(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
restoredIDs[trimmedName] = itemID
|
||||
|
||||
deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo)
|
||||
|
||||
// Mark it as success without processing .meta
|
||||
// file if we are not restoring permissions
|
||||
if !restorePerms {
|
||||
metrics.Successes++
|
||||
}
|
||||
} else if strings.HasSuffix(name, MetaFileSuffix) {
|
||||
if !restorePerms {
|
||||
continue
|
||||
}
|
||||
|
||||
meta, err := getMetadata(itemData.ToReader())
|
||||
// Fetch item permissions from the collection and restore them.
|
||||
metaName := trimmedName + MetaFileSuffix
|
||||
|
||||
permsFile, err := dc.Fetch(ctx, metaName)
|
||||
if err != nil {
|
||||
errUpdater(itemData.UUID(), err)
|
||||
errUpdater(metaName, clues.Wrap(err, "getting item metadata"))
|
||||
continue
|
||||
}
|
||||
|
||||
trimmedName := strings.TrimSuffix(name, MetaFileSuffix)
|
||||
restoreID, ok := restoredIDs[trimmedName]
|
||||
if !ok {
|
||||
errUpdater(itemData.UUID(), fmt.Errorf("item not available to restore permissions"))
|
||||
metaReader := permsFile.ToReader()
|
||||
meta, err := getMetadata(metaReader)
|
||||
metaReader.Close()
|
||||
|
||||
if err != nil {
|
||||
errUpdater(metaName, clues.Wrap(err, "deserializing item metadata"))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -264,21 +271,22 @@ func RestoreCollection(
|
||||
ctx,
|
||||
service,
|
||||
drivePath.DriveID,
|
||||
restoreID,
|
||||
itemID,
|
||||
parentPerms,
|
||||
meta.Permissions,
|
||||
permissionIDMappings,
|
||||
)
|
||||
if err != nil {
|
||||
errUpdater(itemData.UUID(), err)
|
||||
errUpdater(trimmedName, clues.Wrap(err, "restoring item permissions"))
|
||||
continue
|
||||
}
|
||||
|
||||
// Objects count is incremented when we restore a
|
||||
// data file and success count is incremented when
|
||||
// we restore a meta file as every data file
|
||||
// should have an associated meta file
|
||||
metrics.Successes++
|
||||
} else if strings.HasSuffix(name, MetaFileSuffix) {
|
||||
// Just skip this for the moment since we moved the code to the above
|
||||
// item restore path. We haven't yet stopped fetching these items in
|
||||
// RestoreOp, so we still need to handle them in some way.
|
||||
continue
|
||||
} else if strings.HasSuffix(name, DirMetaFileSuffix) {
|
||||
trimmedName := strings.TrimSuffix(name, DirMetaFileSuffix)
|
||||
folderID, err := createRestoreFolder(
|
||||
|
||||
@ -4,3 +4,5 @@ type Tuple struct {
|
||||
Name string
|
||||
ID string
|
||||
}
|
||||
|
||||
const fetchChannelSize = 5
|
||||
|
||||
@ -1,15 +1,16 @@
|
||||
package api
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func createTestBetaService(t *testing.T, credentials account.M365Config) *api.BetaService {
|
||||
func createTestBetaService(t *testing.T, credentials account.M365Config) *discover.BetaService {
|
||||
adapter, err := graph.CreateAdapter(
|
||||
credentials.AzureTenantID,
|
||||
credentials.AzureClientID,
|
||||
@ -17,5 +18,5 @@ func createTestBetaService(t *testing.T, credentials account.M365Config) *api.Be
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
return api.NewBetaService(adapter)
|
||||
return discover.NewBetaService(adapter)
|
||||
}
|
||||
|
||||
@ -2,46 +2,100 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
)
|
||||
|
||||
// GetSitePages retrieves a collection of Pages related to the give Site.
|
||||
// Returns error if error experienced during the call
|
||||
func GetSitePage(
|
||||
func GetSitePages(
|
||||
ctx context.Context,
|
||||
serv *api.BetaService,
|
||||
serv *discover.BetaService,
|
||||
siteID string,
|
||||
pages []string,
|
||||
) ([]models.SitePageable, error) {
|
||||
col := make([]models.SitePageable, 0)
|
||||
opts := retrieveSitePageOptions()
|
||||
var (
|
||||
col = make([]models.SitePageable, 0)
|
||||
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
||||
opts = retrieveSitePageOptions()
|
||||
err, errs error
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
)
|
||||
|
||||
defer close(semaphoreCh)
|
||||
|
||||
errUpdater := func(id string, err error) {
|
||||
m.Lock()
|
||||
errs = support.WrapAndAppend(id, err, errs)
|
||||
m.Unlock()
|
||||
}
|
||||
updatePages := func(page models.SitePageable) {
|
||||
m.Lock()
|
||||
col = append(col, page)
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
for _, entry := range pages {
|
||||
page, err := serv.Client().SitesById(siteID).PagesById(entry).Get(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, support.ConnectorStackErrorTraceWrap(err, "fetching page: "+entry)
|
||||
}
|
||||
semaphoreCh <- struct{}{}
|
||||
|
||||
col = append(col, page)
|
||||
wg.Add(1)
|
||||
|
||||
go func(pageID string) {
|
||||
defer wg.Done()
|
||||
defer func() { <-semaphoreCh }()
|
||||
|
||||
var page models.SitePageable
|
||||
|
||||
err = graph.RunWithRetry(func() error {
|
||||
page, err = serv.Client().SitesById(siteID).PagesById(pageID).Get(ctx, opts)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
errUpdater(pageID, errors.Wrap(err, support.ConnectorStackErrorTrace(err)+" fetching page"))
|
||||
} else {
|
||||
updatePages(page)
|
||||
}
|
||||
}(entry)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
return col, nil
|
||||
}
|
||||
|
||||
// fetchPages utility function to return the tuple of item
|
||||
func FetchPages(ctx context.Context, bs *api.BetaService, siteID string) ([]Tuple, error) {
|
||||
func FetchPages(ctx context.Context, bs *discover.BetaService, siteID string) ([]Tuple, error) {
|
||||
var (
|
||||
builder = bs.Client().SitesById(siteID).Pages()
|
||||
opts = fetchPageOptions()
|
||||
pageTuples = make([]Tuple, 0)
|
||||
resp models.SitePageCollectionResponseable
|
||||
err error
|
||||
)
|
||||
|
||||
for {
|
||||
resp, err := builder.Get(ctx, opts)
|
||||
err = graph.RunWithRetry(func() error {
|
||||
resp, err = builder.Get(ctx, opts)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, support.ConnectorStackErrorTraceWrap(err, "failed fetching site page")
|
||||
}
|
||||
@ -80,6 +134,21 @@ func fetchPageOptions() *sites.ItemPagesRequestBuilderGetRequestConfiguration {
|
||||
return options
|
||||
}
|
||||
|
||||
// DeleteSitePage removes the selected page from the SharePoint Site
|
||||
// https://learn.microsoft.com/en-us/graph/api/sitepage-delete?view=graph-rest-beta
|
||||
func DeleteSitePage(
|
||||
ctx context.Context,
|
||||
serv *discover.BetaService,
|
||||
siteID, pageID string,
|
||||
) error {
|
||||
err := serv.Client().SitesById(siteID).PagesById(pageID).Delete(ctx, nil)
|
||||
if err != nil {
|
||||
return support.ConnectorStackErrorTraceWrap(err, "deleting page: "+pageID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retrievePageOptions returns options to expand
|
||||
func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration {
|
||||
fields := []string{"canvasLayout"}
|
||||
@ -91,3 +160,113 @@ func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequ
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
func RestoreSitePage(
|
||||
ctx context.Context,
|
||||
service *discover.BetaService,
|
||||
itemData data.Stream,
|
||||
siteID, destName string,
|
||||
) (details.ItemInfo, error) {
|
||||
ctx, end := D.Span(ctx, "gc:sharepoint:restorePage", D.Label("item_uuid", itemData.UUID()))
|
||||
defer end()
|
||||
|
||||
var (
|
||||
dii = details.ItemInfo{}
|
||||
pageID = itemData.UUID()
|
||||
pageName = pageID
|
||||
)
|
||||
|
||||
byteArray, err := io.ReadAll(itemData.ToReader())
|
||||
if err != nil {
|
||||
return dii, errors.Wrap(err, "reading sharepoint page bytes from stream")
|
||||
}
|
||||
|
||||
// Hydrate Page
|
||||
page, err := support.CreatePageFromBytes(byteArray)
|
||||
if err != nil {
|
||||
return dii, errors.Wrapf(err, "creating Page object %s", pageID)
|
||||
}
|
||||
|
||||
pageNamePtr := page.GetName()
|
||||
if pageNamePtr != nil {
|
||||
pageName = *pageNamePtr
|
||||
}
|
||||
|
||||
newName := fmt.Sprintf("%s_%s", destName, pageName)
|
||||
page.SetName(&newName)
|
||||
|
||||
// Restore is a 2-Step Process in Graph API
|
||||
// 1. Create the Page on the site
|
||||
// 2. Publish the site
|
||||
// See: https://learn.microsoft.com/en-us/graph/api/sitepage-create?view=graph-rest-beta
|
||||
restoredPage, err := service.Client().SitesById(siteID).Pages().Post(ctx, page, nil)
|
||||
if err != nil {
|
||||
sendErr := support.ConnectorStackErrorTraceWrap(
|
||||
err,
|
||||
"creating page from ID: %s"+pageName+" API Error Details",
|
||||
)
|
||||
|
||||
return dii, sendErr
|
||||
}
|
||||
|
||||
pageID = *restoredPage.GetId()
|
||||
// Publish page to make visible
|
||||
// See https://learn.microsoft.com/en-us/graph/api/sitepage-publish?view=graph-rest-beta
|
||||
if restoredPage.GetWebUrl() == nil {
|
||||
return dii, fmt.Errorf("creating page %s incomplete. Field `webURL` not populated", pageID)
|
||||
}
|
||||
|
||||
err = service.Client().
|
||||
SitesById(siteID).
|
||||
PagesById(pageID).
|
||||
Publish().
|
||||
Post(ctx, nil)
|
||||
if err != nil {
|
||||
return dii, support.ConnectorStackErrorTraceWrap(
|
||||
err,
|
||||
"publishing page ID: "+*restoredPage.GetId()+" API Error Details",
|
||||
)
|
||||
}
|
||||
|
||||
dii.SharePoint = PageInfo(restoredPage, int64(len(byteArray)))
|
||||
// Storing new pageID in unused field.
|
||||
dii.SharePoint.ParentPath = pageID
|
||||
|
||||
return dii, nil
|
||||
}
|
||||
|
||||
// ==============================
|
||||
// Helpers
|
||||
// ==============================
|
||||
// PageInfo extracts useful metadata into struct for book keeping
|
||||
func PageInfo(page models.SitePageable, size int64) *details.SharePointInfo {
|
||||
var (
|
||||
name, webURL string
|
||||
created, modified time.Time
|
||||
)
|
||||
|
||||
if page.GetTitle() != nil {
|
||||
name = *page.GetTitle()
|
||||
}
|
||||
|
||||
if page.GetWebUrl() != nil {
|
||||
webURL = *page.GetWebUrl()
|
||||
}
|
||||
|
||||
if page.GetCreatedDateTime() != nil {
|
||||
created = *page.GetCreatedDateTime()
|
||||
}
|
||||
|
||||
if page.GetLastModifiedDateTime() != nil {
|
||||
modified = *page.GetLastModifiedDateTime()
|
||||
}
|
||||
|
||||
return &details.SharePointInfo{
|
||||
ItemType: details.SharePointItem,
|
||||
ItemName: name,
|
||||
Created: created,
|
||||
Modified: modified,
|
||||
WebURL: webURL,
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,20 +1,28 @@
|
||||
package api
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
)
|
||||
|
||||
type SharePointPageSuite struct {
|
||||
suite.Suite
|
||||
siteID string
|
||||
creds account.M365Config
|
||||
siteID string
|
||||
creds account.M365Config
|
||||
service *discover.BetaService
|
||||
}
|
||||
|
||||
func (suite *SharePointPageSuite) SetupSuite() {
|
||||
@ -27,6 +35,7 @@ func (suite *SharePointPageSuite) SetupSuite() {
|
||||
require.NoError(t, err)
|
||||
|
||||
suite.creds = m365
|
||||
suite.service = createTestBetaService(t, suite.creds)
|
||||
}
|
||||
|
||||
func TestSharePointPageSuite(t *testing.T) {
|
||||
@ -42,9 +51,7 @@ func (suite *SharePointPageSuite) TestFetchPages() {
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
service := createTestBetaService(t, suite.creds)
|
||||
|
||||
pgs, err := FetchPages(ctx, service, suite.siteID)
|
||||
pgs, err := api.FetchPages(ctx, suite.service, suite.siteID)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, pgs)
|
||||
assert.NotZero(t, len(pgs))
|
||||
@ -54,18 +61,52 @@ func (suite *SharePointPageSuite) TestFetchPages() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *SharePointPageSuite) TestGetSitePage() {
|
||||
func (suite *SharePointPageSuite) TestGetSitePages() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
service := createTestBetaService(t, suite.creds)
|
||||
tuples, err := FetchPages(ctx, service, suite.siteID)
|
||||
tuples, err := api.FetchPages(ctx, suite.service, suite.siteID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tuples)
|
||||
|
||||
jobs := []string{tuples[0].ID}
|
||||
pages, err := GetSitePage(ctx, service, suite.siteID, jobs)
|
||||
pages, err := api.GetSitePages(ctx, suite.service, suite.siteID, jobs)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, pages)
|
||||
}
|
||||
|
||||
func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
destName := "Corso_Restore_" + common.FormatNow(common.SimpleTimeTesting)
|
||||
testName := "MockPage"
|
||||
|
||||
// Create Test Page
|
||||
//nolint:lll
|
||||
byteArray := mockconnector.GetMockPage("Byte Test")
|
||||
|
||||
pageData := sharepoint.NewItem(
|
||||
testName,
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
)
|
||||
|
||||
info, err := api.RestoreSitePage(
|
||||
ctx,
|
||||
suite.service,
|
||||
pageData,
|
||||
suite.siteID,
|
||||
destName,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info)
|
||||
|
||||
// Clean Up
|
||||
pageID := info.SharePoint.ParentPath
|
||||
err = api.DeleteSitePage(ctx, suite.service, suite.siteID, pageID)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -3,18 +3,22 @@ package sharepoint
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
kw "github.com/microsoft/kiota-serialization-json-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
sapi "github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
@ -24,18 +28,26 @@ type DataCategory int
|
||||
//go:generate stringer -type=DataCategory
|
||||
const (
|
||||
collectionChannelBufferSize = 50
|
||||
fetchChannelSize = 5
|
||||
Unknown DataCategory = iota
|
||||
List
|
||||
Drive
|
||||
Pages
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.Collection = &Collection{}
|
||||
_ data.Stream = &Item{}
|
||||
_ data.StreamInfo = &Item{}
|
||||
_ data.StreamModTime = &Item{}
|
||||
_ data.BackupCollection = &Collection{}
|
||||
_ data.Stream = &Item{}
|
||||
_ data.StreamInfo = &Item{}
|
||||
_ data.StreamModTime = &Item{}
|
||||
)
|
||||
|
||||
type numMetrics struct {
|
||||
attempts int
|
||||
success int
|
||||
totalBytes int64
|
||||
}
|
||||
|
||||
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
|
||||
// by the oneDrive.Collection as the calls are identical for populating the Collection
|
||||
type Collection struct {
|
||||
@ -46,7 +58,9 @@ type Collection struct {
|
||||
// jobs contain the SharePoint.Site.ListIDs for the associated list(s).
|
||||
jobs []string
|
||||
// M365 IDs of the items of this collection
|
||||
category DataCategory
|
||||
service graph.Servicer
|
||||
ctrl control.Options
|
||||
betaService *api.BetaService
|
||||
statusUpdater support.StatusUpdater
|
||||
}
|
||||
@ -55,7 +69,9 @@ type Collection struct {
|
||||
func NewCollection(
|
||||
folderPath path.Path,
|
||||
service graph.Servicer,
|
||||
category DataCategory,
|
||||
statusUpdater support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
) *Collection {
|
||||
c := &Collection{
|
||||
fullPath: folderPath,
|
||||
@ -63,6 +79,8 @@ func NewCollection(
|
||||
data: make(chan data.Stream, collectionChannelBufferSize),
|
||||
service: service,
|
||||
statusUpdater: statusUpdater,
|
||||
category: category,
|
||||
ctrl: ctrlOpts,
|
||||
}
|
||||
|
||||
return c
|
||||
@ -106,6 +124,15 @@ type Item struct {
|
||||
deleted bool
|
||||
}
|
||||
|
||||
func NewItem(name string, d io.ReadCloser) *Item {
|
||||
item := &Item{
|
||||
id: name,
|
||||
data: d,
|
||||
}
|
||||
|
||||
return item
|
||||
}
|
||||
|
||||
func (sd *Item) UUID() string {
|
||||
return sd.id
|
||||
}
|
||||
@ -133,7 +160,7 @@ func (sc *Collection) finishPopulation(ctx context.Context, attempts, success in
|
||||
status := support.CreateStatus(
|
||||
ctx,
|
||||
support.Backup,
|
||||
len(sc.jobs),
|
||||
1, // 1 folder
|
||||
support.CollectionMetrics{
|
||||
Objects: attempted,
|
||||
Successes: success,
|
||||
@ -151,12 +178,14 @@ func (sc *Collection) finishPopulation(ctx context.Context, attempts, success in
|
||||
// populate utility function to retrieve data from back store for a given collection
|
||||
func (sc *Collection) populate(ctx context.Context) {
|
||||
var (
|
||||
objects, success int
|
||||
totalBytes, arrayLength int64
|
||||
errs error
|
||||
writer = kw.NewJsonSerializationWriter()
|
||||
metrics numMetrics
|
||||
errs error
|
||||
writer = kw.NewJsonSerializationWriter()
|
||||
)
|
||||
|
||||
defer func() {
|
||||
sc.finishPopulation(ctx, metrics.attempts, metrics.success, int64(metrics.totalBytes), errs)
|
||||
}()
|
||||
// TODO: Insert correct ID for CollectionProgress
|
||||
colProgress, closer := observe.CollectionProgress(
|
||||
ctx,
|
||||
@ -167,25 +196,49 @@ func (sc *Collection) populate(ctx context.Context) {
|
||||
|
||||
defer func() {
|
||||
close(colProgress)
|
||||
sc.finishPopulation(ctx, objects, success, totalBytes, errs)
|
||||
}()
|
||||
|
||||
// Retrieve list data from M365
|
||||
// Switch retrieval function based on category
|
||||
switch sc.category {
|
||||
case List:
|
||||
metrics, errs = sc.retrieveLists(ctx, writer, colProgress)
|
||||
case Pages:
|
||||
metrics, errs = sc.retrievePages(ctx, writer, colProgress)
|
||||
}
|
||||
}
|
||||
|
||||
// retrieveLists utility function for collection that downloads and serializes
|
||||
// models.Listable objects based on M365 IDs from the jobs field.
|
||||
func (sc *Collection) retrieveLists(
|
||||
ctx context.Context,
|
||||
wtr *kw.JsonSerializationWriter,
|
||||
progress chan<- struct{},
|
||||
) (numMetrics, error) {
|
||||
var (
|
||||
errs error
|
||||
metrics numMetrics
|
||||
)
|
||||
|
||||
lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs)
|
||||
if err != nil {
|
||||
errs = support.WrapAndAppend(sc.fullPath.ResourceOwner(), err, errs)
|
||||
return metrics, errors.Wrap(err, sc.fullPath.ResourceOwner())
|
||||
}
|
||||
|
||||
objects += len(lists)
|
||||
// Write Data and Send
|
||||
metrics.attempts += len(lists)
|
||||
// For each models.Listable, object is serialized and the metrics are collected.
|
||||
// The progress is objected via the passed in channel.
|
||||
for _, lst := range lists {
|
||||
byteArray, err := serializeListContent(writer, lst)
|
||||
byteArray, err := serializeContent(wtr, lst)
|
||||
if err != nil {
|
||||
errs = support.WrapAndAppend(*lst.GetId(), err, errs)
|
||||
if sc.ctrl.FailFast {
|
||||
return metrics, errs
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
arrayLength = int64(len(byteArray))
|
||||
arrayLength := int64(len(byteArray))
|
||||
|
||||
if arrayLength > 0 {
|
||||
t := time.Now()
|
||||
@ -193,9 +246,9 @@ func (sc *Collection) populate(ctx context.Context) {
|
||||
t = *t1
|
||||
}
|
||||
|
||||
totalBytes += arrayLength
|
||||
metrics.totalBytes += arrayLength
|
||||
|
||||
success++
|
||||
metrics.success++
|
||||
sc.data <- &Item{
|
||||
id: *lst.GetId(),
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
@ -203,15 +256,76 @@ func (sc *Collection) populate(ctx context.Context) {
|
||||
modTime: t,
|
||||
}
|
||||
|
||||
colProgress <- struct{}{}
|
||||
progress <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func serializeListContent(writer *kw.JsonSerializationWriter, lst models.Listable) ([]byte, error) {
|
||||
func (sc *Collection) retrievePages(
|
||||
ctx context.Context,
|
||||
wtr *kw.JsonSerializationWriter,
|
||||
progress chan<- struct{},
|
||||
) (numMetrics, error) {
|
||||
var (
|
||||
errs error
|
||||
metrics numMetrics
|
||||
)
|
||||
|
||||
betaService := sc.betaService
|
||||
if betaService == nil {
|
||||
return metrics, fmt.Errorf("beta service not found in collection")
|
||||
}
|
||||
|
||||
pages, err := sapi.GetSitePages(ctx, betaService, sc.fullPath.ResourceOwner(), sc.jobs)
|
||||
if err != nil {
|
||||
return metrics, errors.Wrap(err, sc.fullPath.ResourceOwner())
|
||||
}
|
||||
|
||||
metrics.attempts = len(pages)
|
||||
// For each models.Pageable, object is serialize and the metrics are collected and returned.
|
||||
// Pageable objects are not supported in v1.0 of msgraph at this time.
|
||||
// TODO: Verify Parsable interface supported with modified-Pageable
|
||||
for _, pg := range pages {
|
||||
byteArray, err := serializeContent(wtr, pg)
|
||||
if err != nil {
|
||||
errs = support.WrapAndAppend(*pg.GetId(), err, errs)
|
||||
if sc.ctrl.FailFast {
|
||||
return metrics, errs
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
arrayLength := int64(len(byteArray))
|
||||
|
||||
if arrayLength > 0 {
|
||||
t := time.Now()
|
||||
if t1 := pg.GetLastModifiedDateTime(); t1 != nil {
|
||||
t = *t1
|
||||
}
|
||||
|
||||
metrics.totalBytes += arrayLength
|
||||
metrics.success++
|
||||
sc.data <- &Item{
|
||||
id: *pg.GetId(),
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: sharePointPageInfo(pg, arrayLength),
|
||||
modTime: t,
|
||||
}
|
||||
|
||||
progress <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func serializeContent(writer *kw.JsonSerializationWriter, obj absser.Parsable) ([]byte, error) {
|
||||
defer writer.Close()
|
||||
|
||||
err := writer.WriteObjectValue("", lst)
|
||||
err := writer.WriteObjectValue("", obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -50,7 +51,7 @@ func TestSharePointCollectionSuite(t *testing.T) {
|
||||
suite.Run(t, new(SharePointCollectionSuite))
|
||||
}
|
||||
|
||||
func (suite *SharePointCollectionSuite) TestSharePointDataReader_Valid() {
|
||||
func (suite *SharePointCollectionSuite) TestCollection_Item_Read() {
|
||||
t := suite.T()
|
||||
m := []byte("test message")
|
||||
name := "aFile"
|
||||
@ -65,73 +66,109 @@ func (suite *SharePointCollectionSuite) TestSharePointDataReader_Valid() {
|
||||
assert.Equal(t, readData, m)
|
||||
}
|
||||
|
||||
// TestSharePointListCollection tests basic functionality to create
|
||||
// TestListCollection tests basic functionality to create
|
||||
// SharePoint collection and to use the data stream channel.
|
||||
func (suite *SharePointCollectionSuite) TestSharePointListCollection() {
|
||||
func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
t := suite.T()
|
||||
tenant := "some"
|
||||
user := "user"
|
||||
dirRoot := "directory"
|
||||
tables := []struct {
|
||||
name, itemName string
|
||||
category DataCategory
|
||||
getDir func(t *testing.T) path.Path
|
||||
getItem func(t *testing.T, itemName string) *Item
|
||||
}{
|
||||
{
|
||||
name: "List",
|
||||
itemName: "MockListing",
|
||||
category: List,
|
||||
getDir: func(t *testing.T) path.Path {
|
||||
dir, err := path.Builder{}.Append(dirRoot).
|
||||
ToDataLayerSharePointPath(
|
||||
tenant,
|
||||
user,
|
||||
path.ListsCategory,
|
||||
false)
|
||||
require.NoError(t, err)
|
||||
|
||||
ow := kioser.NewJsonSerializationWriter()
|
||||
listing := mockconnector.GetMockListDefault("Mock List")
|
||||
testName := "MockListing"
|
||||
listing.SetDisplayName(&testName)
|
||||
return dir
|
||||
},
|
||||
getItem: func(t *testing.T, name string) *Item {
|
||||
ow := kioser.NewJsonSerializationWriter()
|
||||
listing := mockconnector.GetMockListDefault(name)
|
||||
listing.SetDisplayName(&name)
|
||||
|
||||
err := ow.WriteObjectValue("", listing)
|
||||
require.NoError(t, err)
|
||||
err := ow.WriteObjectValue("", listing)
|
||||
require.NoError(t, err)
|
||||
|
||||
byteArray, err := ow.GetSerializedContent()
|
||||
require.NoError(t, err)
|
||||
byteArray, err := ow.GetSerializedContent()
|
||||
require.NoError(t, err)
|
||||
|
||||
dir, err := path.Builder{}.Append("directory").
|
||||
ToDataLayerSharePointPath(
|
||||
"some",
|
||||
"user",
|
||||
path.ListsCategory,
|
||||
false)
|
||||
require.NoError(t, err)
|
||||
data := &Item{
|
||||
id: name,
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: sharePointListInfo(listing, int64(len(byteArray))),
|
||||
}
|
||||
|
||||
col := NewCollection(dir, nil, nil)
|
||||
col.data <- &Item{
|
||||
id: testName,
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: sharePointListInfo(listing, int64(len(byteArray))),
|
||||
return data
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Pages",
|
||||
itemName: "MockPages",
|
||||
category: Pages,
|
||||
getDir: func(t *testing.T) path.Path {
|
||||
dir, err := path.Builder{}.Append(dirRoot).
|
||||
ToDataLayerSharePointPath(
|
||||
tenant,
|
||||
user,
|
||||
path.PagesCategory,
|
||||
false)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
},
|
||||
getItem: func(t *testing.T, itemName string) *Item {
|
||||
byteArray := mockconnector.GetMockPage(itemName)
|
||||
page, err := support.CreatePageFromBytes(byteArray)
|
||||
require.NoError(t, err)
|
||||
|
||||
data := &Item{
|
||||
id: itemName,
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: api.PageInfo(page, int64(len(byteArray))),
|
||||
}
|
||||
|
||||
return data
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
readItems := []data.Stream{}
|
||||
for _, test := range tables {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
col := NewCollection(test.getDir(t), nil, test.category, nil, control.Defaults())
|
||||
col.data <- test.getItem(t, test.itemName)
|
||||
|
||||
for item := range col.Items() {
|
||||
readItems = append(readItems, item)
|
||||
readItems := []data.Stream{}
|
||||
|
||||
for item := range col.Items() {
|
||||
readItems = append(readItems, item)
|
||||
}
|
||||
|
||||
require.Equal(t, len(readItems), 1)
|
||||
item := readItems[0]
|
||||
shareInfo, ok := item.(data.StreamInfo)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, shareInfo.Info())
|
||||
require.NotNil(t, shareInfo.Info().SharePoint)
|
||||
assert.Equal(t, test.itemName, shareInfo.Info().SharePoint.ItemName)
|
||||
})
|
||||
}
|
||||
|
||||
require.Equal(t, len(readItems), 1)
|
||||
item := readItems[0]
|
||||
shareInfo, ok := item.(data.StreamInfo)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, shareInfo.Info())
|
||||
require.NotNil(t, shareInfo.Info().SharePoint)
|
||||
assert.Equal(t, testName, shareInfo.Info().SharePoint.ItemName)
|
||||
}
|
||||
|
||||
func (suite *SharePointCollectionSuite) TestCollectPages() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
col, err := collectPages(
|
||||
ctx,
|
||||
suite.creds,
|
||||
nil,
|
||||
account.AzureTenantID,
|
||||
suite.siteID,
|
||||
nil,
|
||||
&MockGraphService{},
|
||||
control.Defaults(),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, col)
|
||||
}
|
||||
|
||||
// TestRestoreListCollection verifies Graph Restore API for the List Collection
|
||||
func (suite *SharePointCollectionSuite) TestRestoreListCollection() {
|
||||
func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
|
||||
@ -30,11 +30,11 @@ func DataCollections(
|
||||
ctx context.Context,
|
||||
itemClient *http.Client,
|
||||
selector selectors.Selector,
|
||||
tenantID string,
|
||||
creds account.M365Config,
|
||||
serv graph.Servicer,
|
||||
su statusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, map[string]struct{}, error) {
|
||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||
b, err := selector.ToSharePointBackup()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "sharePointDataCollection: parsing selector")
|
||||
@ -42,7 +42,7 @@ func DataCollections(
|
||||
|
||||
var (
|
||||
site = b.DiscreteOwner
|
||||
collections = []data.Collection{}
|
||||
collections = []data.BackupCollection{}
|
||||
errs error
|
||||
)
|
||||
|
||||
@ -54,14 +54,14 @@ func DataCollections(
|
||||
defer closer()
|
||||
defer close(foldersComplete)
|
||||
|
||||
var spcs []data.Collection
|
||||
var spcs []data.BackupCollection
|
||||
|
||||
switch scope.Category().PathType() {
|
||||
case path.ListsCategory:
|
||||
spcs, err = collectLists(
|
||||
ctx,
|
||||
serv,
|
||||
tenantID,
|
||||
creds.AzureTenantID,
|
||||
site,
|
||||
su,
|
||||
ctrlOpts)
|
||||
@ -74,7 +74,7 @@ func DataCollections(
|
||||
ctx,
|
||||
itemClient,
|
||||
serv,
|
||||
tenantID,
|
||||
creds.AzureTenantID,
|
||||
site,
|
||||
scope,
|
||||
su,
|
||||
@ -82,6 +82,17 @@ func DataCollections(
|
||||
if err != nil {
|
||||
return nil, nil, support.WrapAndAppend(site, err, errs)
|
||||
}
|
||||
case path.PagesCategory:
|
||||
spcs, err = collectPages(
|
||||
ctx,
|
||||
creds,
|
||||
serv,
|
||||
site,
|
||||
su,
|
||||
ctrlOpts)
|
||||
if err != nil {
|
||||
return nil, nil, support.WrapAndAppend(site, err, errs)
|
||||
}
|
||||
}
|
||||
|
||||
collections = append(collections, spcs...)
|
||||
@ -97,10 +108,10 @@ func collectLists(
|
||||
tenantID, siteID string,
|
||||
updater statusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, error) {
|
||||
) ([]data.BackupCollection, error) {
|
||||
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint List Collections")
|
||||
|
||||
spcs := make([]data.Collection, 0)
|
||||
spcs := make([]data.BackupCollection, 0)
|
||||
|
||||
tuples, err := preFetchLists(ctx, serv, siteID)
|
||||
if err != nil {
|
||||
@ -118,7 +129,7 @@ func collectLists(
|
||||
return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID)
|
||||
}
|
||||
|
||||
collection := NewCollection(dir, serv, updater.UpdateStatus)
|
||||
collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts)
|
||||
collection.AddJob(tuple.id)
|
||||
|
||||
spcs = append(spcs, collection)
|
||||
@ -137,9 +148,9 @@ func collectLibraries(
|
||||
scope selectors.SharePointScope,
|
||||
updater statusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, map[string]struct{}, error) {
|
||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||
var (
|
||||
collections = []data.Collection{}
|
||||
collections = []data.BackupCollection{}
|
||||
errs error
|
||||
)
|
||||
|
||||
@ -166,24 +177,24 @@ func collectLibraries(
|
||||
}
|
||||
|
||||
// collectPages constructs a sharepoint Collections struct and Get()s the associated
|
||||
// M365 IDs for the associated Pages
|
||||
// M365 IDs for the associated Pages.
|
||||
func collectPages(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
serv graph.Servicer,
|
||||
tenantID, siteID string,
|
||||
scope selectors.SharePointScope,
|
||||
siteID string,
|
||||
updater statusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, error) {
|
||||
) ([]data.BackupCollection, error) {
|
||||
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Pages collections")
|
||||
|
||||
spcs := make([]data.Collection, 0)
|
||||
spcs := make([]data.BackupCollection, 0)
|
||||
|
||||
// make the betaClient
|
||||
// Need to receive From DataCollection Call
|
||||
adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "adapter for betaservice not created")
|
||||
return nil, errors.New("unable to create adapter w/ env credentials")
|
||||
}
|
||||
|
||||
betaService := api.NewBetaService(adpt)
|
||||
@ -196,7 +207,7 @@ func collectPages(
|
||||
for _, tuple := range tuples {
|
||||
dir, err := path.Builder{}.Append(tuple.Name).
|
||||
ToDataLayerSharePointPath(
|
||||
tenantID,
|
||||
creds.AzureTenantID,
|
||||
siteID,
|
||||
path.PagesCategory,
|
||||
false)
|
||||
@ -204,7 +215,7 @@ func collectPages(
|
||||
return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID)
|
||||
}
|
||||
|
||||
collection := NewCollection(dir, serv, updater.UpdateStatus)
|
||||
collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts)
|
||||
collection.betaService = betaService
|
||||
collection.AddJob(tuple.ID)
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
@ -100,7 +101,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() {
|
||||
&MockGraphService{},
|
||||
nil,
|
||||
control.Options{})
|
||||
err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths, excluded)
|
||||
err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths, excluded, true)
|
||||
test.expect(t, err)
|
||||
assert.Equal(t, len(test.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
||||
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
||||
@ -128,3 +129,38 @@ func driveItem(name string, path string, isFile bool) models.DriveItemable {
|
||||
|
||||
return item
|
||||
}
|
||||
|
||||
type SharePointPagesSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func TestSharePointPagesSuite(t *testing.T) {
|
||||
tester.RunOnAny(
|
||||
t,
|
||||
tester.CorsoCITests,
|
||||
tester.CorsoGraphConnectorTests,
|
||||
tester.CorsoGraphConnectorSharePointTests)
|
||||
suite.Run(t, new(SharePointPagesSuite))
|
||||
}
|
||||
|
||||
func (suite *SharePointPagesSuite) TestCollectPages() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
siteID := tester.M365SiteID(t)
|
||||
a := tester.NewM365Account(t)
|
||||
account, err := a.M365Config()
|
||||
require.NoError(t, err)
|
||||
|
||||
col, err := collectPages(
|
||||
ctx,
|
||||
account,
|
||||
nil,
|
||||
siteID,
|
||||
&MockGraphService{},
|
||||
control.Defaults(),
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, col)
|
||||
}
|
||||
|
||||
@ -8,19 +8,20 @@ func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Unknown-1]
|
||||
_ = x[List-2]
|
||||
_ = x[Drive-3]
|
||||
_ = x[Unknown-2]
|
||||
_ = x[List-3]
|
||||
_ = x[Drive-4]
|
||||
_ = x[Pages-5]
|
||||
}
|
||||
|
||||
const _DataCategory_name = "UnknownListDrive"
|
||||
const _DataCategory_name = "UnknownListDrivePages"
|
||||
|
||||
var _DataCategory_index = [...]uint8{0, 7, 11, 16}
|
||||
var _DataCategory_index = [...]uint8{0, 7, 11, 16, 21}
|
||||
|
||||
func (i DataCategory) String() string {
|
||||
i -= 1
|
||||
i -= 2
|
||||
if i < 0 || i >= DataCategory(len(_DataCategory_index)-1) {
|
||||
return "DataCategory(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
||||
return "DataCategory(" + strconv.FormatInt(int64(i+2), 10) + ")"
|
||||
}
|
||||
return _DataCategory_name[_DataCategory_index[i]:_DataCategory_index[i+1]]
|
||||
}
|
||||
|
||||
@ -17,6 +17,16 @@ import (
|
||||
// ---------------------------------------------------------------------------
|
||||
type MockGraphService struct{}
|
||||
|
||||
type MockUpdater struct {
|
||||
UpdateState func(*support.ConnectorOperationStatus)
|
||||
}
|
||||
|
||||
func (mu *MockUpdater) UpdateStatus(input *support.ConnectorOperationStatus) {
|
||||
if mu.UpdateState != nil {
|
||||
mu.UpdateState(input)
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------
|
||||
// Interface Functions: @See graph.Service
|
||||
//------------------------------------------------------------
|
||||
|
||||
@ -3,6 +3,7 @@ package sharepoint
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
mssite "github.com/microsoftgraph/msgraph-sdk-go/sites"
|
||||
@ -91,33 +92,65 @@ func loadSiteLists(
|
||||
listIDs []string,
|
||||
) ([]models.Listable, error) {
|
||||
var (
|
||||
results = make([]models.Listable, 0)
|
||||
errs error
|
||||
results = make([]models.Listable, 0)
|
||||
semaphoreCh = make(chan struct{}, fetchChannelSize)
|
||||
errs error
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
)
|
||||
|
||||
for _, listID := range listIDs {
|
||||
entry, err := gs.Client().SitesById(siteID).ListsById(listID).Get(ctx, nil)
|
||||
if err != nil {
|
||||
errs = support.WrapAndAppend(
|
||||
listID,
|
||||
errors.Wrap(err, support.ConnectorStackErrorTrace(err)),
|
||||
errs,
|
||||
)
|
||||
}
|
||||
defer close(semaphoreCh)
|
||||
|
||||
errUpdater := func(id string, err error) {
|
||||
m.Lock()
|
||||
errs = support.WrapAndAppend(id, err, errs)
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
updateLists := func(list models.Listable) {
|
||||
m.Lock()
|
||||
results = append(results, list)
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
for _, listID := range listIDs {
|
||||
semaphoreCh <- struct{}{}
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func(id string) {
|
||||
defer wg.Done()
|
||||
defer func() { <-semaphoreCh }()
|
||||
|
||||
var (
|
||||
entry models.Listable
|
||||
err error
|
||||
)
|
||||
|
||||
err = graph.RunWithRetry(func() error {
|
||||
entry, err = gs.Client().SitesById(siteID).ListsById(id).Get(ctx, nil)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
errUpdater(id, support.ConnectorStackErrorTraceWrap(err, ""))
|
||||
return
|
||||
}
|
||||
|
||||
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, id)
|
||||
if err != nil {
|
||||
errUpdater(id, errors.Wrap(err, "unable to fetchRelationships during loadSiteLists"))
|
||||
return
|
||||
}
|
||||
|
||||
cols, cTypes, lItems, err := fetchListContents(ctx, gs, siteID, listID)
|
||||
if err == nil {
|
||||
entry.SetColumns(cols)
|
||||
entry.SetContentTypes(cTypes)
|
||||
entry.SetItems(lItems)
|
||||
} else {
|
||||
errs = support.WrapAndAppend("unable to fetchRelationships during loadSiteLists", err, errs)
|
||||
continue
|
||||
}
|
||||
|
||||
results = append(results, entry)
|
||||
updateLists(entry)
|
||||
}(listID)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
@ -9,11 +9,14 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
discover "github.com/alcionai/corso/src/internal/connector/discovery/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -27,7 +30,7 @@ import (
|
||||
// -- Switch:
|
||||
// ---- Libraries restored via the same workflow as oneDrive
|
||||
// ---- Lists call RestoreCollection()
|
||||
// ----> for each data.Stream within Collection.Items()
|
||||
// ----> for each data.Stream within RestoreCollection.Items()
|
||||
// ----> restoreListItems() is called
|
||||
// Restored List can be found in the Site's `Site content` page
|
||||
// Restored Libraries can be found within the Site's `Pages` page
|
||||
@ -37,9 +40,10 @@ import (
|
||||
func RestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
creds account.M365Config,
|
||||
service graph.Servicer,
|
||||
dest control.RestoreDestination,
|
||||
dcs []data.Collection,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
) (*support.ConnectorOperationStatus, error) {
|
||||
var (
|
||||
@ -74,7 +78,7 @@ func RestoreCollections(
|
||||
false,
|
||||
)
|
||||
case path.ListsCategory:
|
||||
metrics, canceled = RestoreCollection(
|
||||
metrics, canceled = RestoreListCollection(
|
||||
ctx,
|
||||
service,
|
||||
dc,
|
||||
@ -83,11 +87,14 @@ func RestoreCollections(
|
||||
errUpdater,
|
||||
)
|
||||
case path.PagesCategory:
|
||||
errorMessage := fmt.Sprintf("restore of %s not supported", dc.FullPath().Category())
|
||||
logger.Ctx(ctx).Error(errorMessage)
|
||||
|
||||
return nil, errors.New(errorMessage)
|
||||
|
||||
metrics, canceled = RestorePageCollection(
|
||||
ctx,
|
||||
creds,
|
||||
dc,
|
||||
dest.ContainerName,
|
||||
deets,
|
||||
errUpdater,
|
||||
)
|
||||
default:
|
||||
return nil, errors.Errorf("category %s not supported", dc.FullPath().Category())
|
||||
}
|
||||
@ -209,15 +216,15 @@ func restoreListItem(
|
||||
return dii, nil
|
||||
}
|
||||
|
||||
func RestoreCollection(
|
||||
func RestoreListCollection(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
dc data.Collection,
|
||||
dc data.RestoreCollection,
|
||||
restoreContainerName string,
|
||||
deets *details.Builder,
|
||||
errUpdater func(string, error),
|
||||
) (support.CollectionMetrics, bool) {
|
||||
ctx, end := D.Span(ctx, "gc:sharepoint:restoreCollection", D.Label("path", dc.FullPath()))
|
||||
ctx, end := D.Span(ctx, "gc:sharepoint:restoreListCollection", D.Label("path", dc.FullPath()))
|
||||
defer end()
|
||||
|
||||
var (
|
||||
@ -225,7 +232,7 @@ func RestoreCollection(
|
||||
directory = dc.FullPath()
|
||||
)
|
||||
|
||||
trace.Log(ctx, "gc:sharepoint:restoreCollection", directory.String())
|
||||
trace.Log(ctx, "gc:sharepoint:restoreListCollection", directory.String())
|
||||
siteID := directory.ResourceOwner()
|
||||
|
||||
// Restore items from the collection
|
||||
@ -276,3 +283,83 @@ func RestoreCollection(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RestorePageCollection handles restoration of an individual site page collection.
|
||||
// returns:
|
||||
// - the collection's item and byte count metrics
|
||||
// - the context cancellation station. True iff context is canceled.
|
||||
func RestorePageCollection(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
dc data.RestoreCollection,
|
||||
restoreContainerName string,
|
||||
deets *details.Builder,
|
||||
errUpdater func(string, error),
|
||||
) (support.CollectionMetrics, bool) {
|
||||
ctx, end := D.Span(ctx, "gc:sharepoint:restorePageCollection", D.Label("path", dc.FullPath()))
|
||||
defer end()
|
||||
|
||||
var (
|
||||
metrics = support.CollectionMetrics{}
|
||||
directory = dc.FullPath()
|
||||
)
|
||||
|
||||
adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
|
||||
if err != nil {
|
||||
return metrics, false
|
||||
}
|
||||
|
||||
service := discover.NewBetaService(adpt)
|
||||
|
||||
trace.Log(ctx, "gc:sharepoint:restorePageCollection", directory.String())
|
||||
siteID := directory.ResourceOwner()
|
||||
|
||||
// Restore items from collection
|
||||
items := dc.Items()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
errUpdater("context canceled", ctx.Err())
|
||||
return metrics, true
|
||||
|
||||
case itemData, ok := <-items:
|
||||
if !ok {
|
||||
return metrics, false
|
||||
}
|
||||
metrics.Objects++
|
||||
|
||||
itemInfo, err := api.RestoreSitePage(
|
||||
ctx,
|
||||
service,
|
||||
itemData,
|
||||
siteID,
|
||||
restoreContainerName,
|
||||
)
|
||||
if err != nil {
|
||||
errUpdater(itemData.UUID(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
metrics.TotalBytes += itemInfo.SharePoint.Size
|
||||
|
||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||
if err != nil {
|
||||
logger.Ctx(ctx).Errorw("transforming item to full path", "error", err)
|
||||
errUpdater(itemData.UUID(), err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
deets.Add(
|
||||
itemPath.String(),
|
||||
itemPath.ShortRef(),
|
||||
"",
|
||||
true,
|
||||
itemInfo,
|
||||
)
|
||||
|
||||
metrics.Successes++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,29 +8,8 @@ import (
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
msgraph_errors "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
)
|
||||
|
||||
// GraphConnector has two types of errors that are exported
|
||||
// RecoverableGCError is a query error that can be overcome with time
|
||||
type RecoverableGCError struct {
|
||||
common.Err
|
||||
}
|
||||
|
||||
func SetRecoverableError(e error) error {
|
||||
return RecoverableGCError{*common.EncapsulateError(e)}
|
||||
}
|
||||
|
||||
// NonRecoverableGCError is a permanent query error
|
||||
type NonRecoverableGCError struct {
|
||||
common.Err
|
||||
}
|
||||
|
||||
func SetNonRecoverableError(e error) error {
|
||||
return NonRecoverableGCError{*common.EncapsulateError(e)}
|
||||
}
|
||||
|
||||
// WrapErrorAndAppend helper function used to attach identifying information to an error
|
||||
// and return it as a mulitierror
|
||||
func WrapAndAppend(identifier string, e, previous error) error {
|
||||
@ -101,7 +80,7 @@ func ConnectorStackErrorTraceWrap(e error, prefix string) error {
|
||||
return errors.Wrap(e, prefix)
|
||||
}
|
||||
|
||||
// ConnectorStackErrorTracew is a helper function that extracts
|
||||
// ConnectorStackErrorTrace is a helper function that extracts
|
||||
// the stack trace for oDataErrors, if the error has one.
|
||||
func ConnectorStackErrorTrace(e error) string {
|
||||
eMessage := ""
|
||||
|
||||
@ -41,26 +41,6 @@ func (suite *GraphConnectorErrorSuite) TestWrapAndAppend_OnVar() {
|
||||
suite.True(strings.Contains(received.Error(), id))
|
||||
}
|
||||
|
||||
func (suite *GraphConnectorErrorSuite) TestAsRecoverableError() {
|
||||
err := assert.AnError
|
||||
|
||||
rcv := RecoverableGCError{}
|
||||
suite.False(errors.As(err, &rcv))
|
||||
|
||||
aRecoverable := SetRecoverableError(err)
|
||||
suite.True(errors.As(aRecoverable, &rcv))
|
||||
}
|
||||
|
||||
func (suite *GraphConnectorErrorSuite) TestAsNonRecoverableError() {
|
||||
err := assert.AnError
|
||||
|
||||
noRecover := NonRecoverableGCError{}
|
||||
suite.False(errors.As(err, &noRecover))
|
||||
|
||||
nonRecoverable := SetNonRecoverableError(err)
|
||||
suite.True(errors.As(nonRecoverable, &noRecover))
|
||||
}
|
||||
|
||||
func (suite *GraphConnectorErrorSuite) TestWrapAndAppend_Add3() {
|
||||
errOneTwo := WrapAndAppend("user1", assert.AnError, assert.AnError)
|
||||
combined := WrapAndAppend("unix36", assert.AnError, errOneTwo)
|
||||
|
||||
@ -3,11 +3,12 @@ package support
|
||||
import (
|
||||
"strings"
|
||||
|
||||
bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
||||
absser "github.com/microsoft/kiota-abstractions-go/serialization"
|
||||
js "github.com/microsoft/kiota-serialization-json-go"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
|
||||
)
|
||||
|
||||
// CreateFromBytes helper function to initialize m365 object form bytes.
|
||||
|
||||
@ -7,7 +7,11 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
)
|
||||
|
||||
const itemAttachment = "#microsoft.graph.itemAttachment"
|
||||
//==========================================================
|
||||
// m365Transform.go contains utility functions that
|
||||
// either add, modify, or remove fields from M365
|
||||
// objects for interacton with M365 services
|
||||
//=========================================================
|
||||
|
||||
// CloneMessageableFields places data from original data into new message object.
|
||||
// SingleLegacyValueProperty is not populated during this operation
|
||||
@ -282,14 +286,36 @@ func cloneColumnDefinitionable(orig models.ColumnDefinitionable) models.ColumnDe
|
||||
return newColumn
|
||||
}
|
||||
|
||||
// ===============================================================================================
|
||||
// Sanitization section
|
||||
// Set of functions that support ItemAttachemtable object restoration.
|
||||
// These attachments can be nested as well as possess one of the other
|
||||
// reference types. To ensure proper upload, each interior`item` requires
|
||||
// that certain fields be modified.
|
||||
// ItemAttachment:
|
||||
// https://learn.microsoft.com/en-us/graph/api/resources/itemattachment?view=graph-rest-1.0
|
||||
// https://learn.microsoft.com/en-us/exchange/client-developer/exchange-web-services/attachments-and-ews-in-exchange
|
||||
// https://learn.microsoft.com/en-us/exchange/client-developer/exchange-web-services/folders-and-items-in-ews-in-exchange
|
||||
// ===============================================================================================
|
||||
// M365 Models possess a field, OData.Type which indicate
|
||||
// the represent the intended model in string format.
|
||||
// The constants listed here identify the supported itemAttachments
|
||||
// currently supported for Restore operations.
|
||||
// itemAttachments
|
||||
// support ODataType values
|
||||
//
|
||||
//nolint:lll
|
||||
const (
|
||||
itemAttachment = "#microsoft.graph.itemAttachment"
|
||||
eventItemType = "#microsoft.graph.event"
|
||||
mailItemType = "#microsoft.graph.message"
|
||||
contactItemType = "#microsoft.graph.contact"
|
||||
)
|
||||
|
||||
// ToItemAttachment transforms internal item, OutlookItemables, into
|
||||
// objects that are able to be uploaded into M365.
|
||||
// Supported Internal Items:
|
||||
// - Events
|
||||
func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) {
|
||||
transform, ok := orig.(models.ItemAttachmentable)
|
||||
supported := "#microsoft.graph.event"
|
||||
|
||||
if !ok { // Shouldn't ever happen
|
||||
return nil, fmt.Errorf("transforming attachment to item attachment")
|
||||
}
|
||||
@ -298,7 +324,14 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error)
|
||||
itemType := item.GetOdataType()
|
||||
|
||||
switch *itemType {
|
||||
case supported:
|
||||
case contactItemType:
|
||||
contact := item.(models.Contactable)
|
||||
revised := sanitizeContact(contact)
|
||||
|
||||
transform.SetItem(revised)
|
||||
|
||||
return transform, nil
|
||||
case eventItemType:
|
||||
event := item.(models.Eventable)
|
||||
|
||||
newEvent, err := sanitizeEvent(event)
|
||||
@ -308,12 +341,54 @@ func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error)
|
||||
|
||||
transform.SetItem(newEvent)
|
||||
|
||||
return transform, nil
|
||||
case mailItemType:
|
||||
message := item.(models.Messageable)
|
||||
|
||||
newMessage, err := sanitizeMessage(message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transform.SetItem(newMessage)
|
||||
|
||||
return transform, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("exiting ToItemAttachment: %s not supported", *itemType)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO #2428 (dadam39): re-apply nested attachments for itemAttachments
|
||||
// func sanitizeAttachments(attached []models.Attachmentable) ([]models.Attachmentable, error) {
|
||||
// attachments := make([]models.Attachmentable, len(attached))
|
||||
|
||||
// for _, ax := range attached {
|
||||
// if *ax.GetOdataType() == itemAttachment {
|
||||
// newAttachment, err := ToItemAttachment(ax)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// attachments = append(attachments, newAttachment)
|
||||
|
||||
// continue
|
||||
// }
|
||||
|
||||
// attachments = append(attachments, ax)
|
||||
// }
|
||||
|
||||
// return attachments, nil
|
||||
// }
|
||||
|
||||
// sanitizeContact removes fields which prevent a Contact from
|
||||
// being uploaded as an attachment.
|
||||
func sanitizeContact(orig models.Contactable) models.Contactable {
|
||||
orig.SetParentFolderId(nil)
|
||||
orig.SetAdditionalData(nil)
|
||||
|
||||
return orig
|
||||
}
|
||||
|
||||
// sanitizeEvent transfers data into event object and
|
||||
// removes unique IDs from the M365 object
|
||||
func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
||||
@ -324,7 +399,9 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
||||
newEvent.SetCalendar(orig.GetCalendar())
|
||||
newEvent.SetCreatedDateTime(orig.GetCreatedDateTime())
|
||||
newEvent.SetEnd(orig.GetEnd())
|
||||
newEvent.SetHasAttachments(orig.GetHasAttachments())
|
||||
// TODO: dadams39 Nested attachments not supported
|
||||
// Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61
|
||||
newEvent.SetHasAttachments(nil)
|
||||
newEvent.SetHideAttendees(orig.GetHideAttendees())
|
||||
newEvent.SetImportance(orig.GetImportance())
|
||||
newEvent.SetIsAllDay(orig.GetIsAllDay())
|
||||
@ -337,7 +414,7 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
||||
newEvent.SetSubject(orig.GetSubject())
|
||||
newEvent.SetType(orig.GetType())
|
||||
|
||||
// Sanitation
|
||||
// Sanitation NOTE
|
||||
// isDraft and isOrganizer *bool ptr's have to be removed completely
|
||||
// from JSON in order for POST method to succeed.
|
||||
// Current as of 2/2/2023
|
||||
@ -346,25 +423,34 @@ func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
|
||||
newEvent.SetIsDraft(nil)
|
||||
newEvent.SetAdditionalData(orig.GetAdditionalData())
|
||||
|
||||
attached := orig.GetAttachments()
|
||||
attachments := make([]models.Attachmentable, len(attached))
|
||||
|
||||
for _, ax := range attached {
|
||||
if *ax.GetOdataType() == itemAttachment {
|
||||
newAttachment, err := ToItemAttachment(ax)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attachments = append(attachments, newAttachment)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
attachments = append(attachments, ax)
|
||||
}
|
||||
|
||||
newEvent.SetAttachments(attachments)
|
||||
// TODO #2428 (dadam39): re-apply nested attachments for itemAttachments
|
||||
// Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61
|
||||
// attachments, err := sanitizeAttachments(message.GetAttachments())
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
newEvent.SetAttachments(nil)
|
||||
|
||||
return newEvent, nil
|
||||
}
|
||||
|
||||
func sanitizeMessage(orig models.Messageable) (models.Messageable, error) {
|
||||
message := ToMessage(orig)
|
||||
|
||||
// TODO #2428 (dadam39): re-apply nested attachments for itemAttachments
|
||||
// Upstream: https://github.com/microsoft/kiota-serialization-json-go/issues/61
|
||||
// attachments, err := sanitizeAttachments(message.GetAttachments())
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
message.SetAttachments(nil)
|
||||
|
||||
// The following fields are set to nil to
|
||||
// not interfere with M365 guard checks.
|
||||
message.SetHasAttachments(nil)
|
||||
message.SetParentFolderId(nil)
|
||||
message.SetInternetMessageHeaders(nil)
|
||||
message.SetIsDraft(nil)
|
||||
|
||||
return message, nil
|
||||
}
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
@ -12,6 +14,8 @@ import (
|
||||
// standard ifaces
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
var ErrNotFound = errors.New("not found")
|
||||
|
||||
type CollectionState int
|
||||
|
||||
const (
|
||||
@ -21,8 +25,8 @@ const (
|
||||
DeletedState
|
||||
)
|
||||
|
||||
// A Collection represents a compilation of data from the
|
||||
// same type application (e.g. mail)
|
||||
// A Collection represents the set of data within a single logical location
|
||||
// denoted by FullPath.
|
||||
type Collection interface {
|
||||
// Items returns a channel from which items in the collection can be read.
|
||||
// Each returned struct contains the next item in the collection
|
||||
@ -30,10 +34,13 @@ type Collection interface {
|
||||
// an unrecoverable error caused an early termination in the sender.
|
||||
Items() <-chan Stream
|
||||
// FullPath returns a path struct that acts as a metadata tag for this
|
||||
// DataCollection. Returned items should be ordered from most generic to least
|
||||
// generic. For example, a DataCollection for emails from a specific user
|
||||
// would be {"<tenant id>", "exchange", "<user ID>", "emails"}.
|
||||
// Collection.
|
||||
FullPath() path.Path
|
||||
}
|
||||
|
||||
// BackupCollection is an extension of Collection that is used during backups.
|
||||
type BackupCollection interface {
|
||||
Collection
|
||||
// PreviousPath returns the path.Path this collection used to reside at
|
||||
// (according to the M365 ID for the container) if the collection was moved or
|
||||
// renamed. Returns nil if the collection is new.
|
||||
@ -58,6 +65,25 @@ type Collection interface {
|
||||
DoNotMergeItems() bool
|
||||
}
|
||||
|
||||
// RestoreCollection is an extension of Collection that is used during restores.
|
||||
type RestoreCollection interface {
|
||||
Collection
|
||||
// Fetch retrieves an item with the given name from the Collection if it
|
||||
// exists. Items retrieved with Fetch may still appear in the channel returned
|
||||
// by Items().
|
||||
Fetch(ctx context.Context, name string) (Stream, error)
|
||||
}
|
||||
|
||||
// NotFoundRestoreCollection is a wrapper for a Collection that returns
|
||||
// ErrNotFound for all Fetch calls.
|
||||
type NotFoundRestoreCollection struct {
|
||||
Collection
|
||||
}
|
||||
|
||||
func (c NotFoundRestoreCollection) Fetch(context.Context, string) (Stream, error) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
// Stream represents a single item within a Collection
|
||||
// that can be consumed as a stream (it embeds io.Reader)
|
||||
type Stream interface {
|
||||
@ -87,37 +113,20 @@ type StreamModTime interface {
|
||||
ModTime() time.Time
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// functionality
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
// ResourceOwnerSet extracts the set of unique resource owners from the
|
||||
// slice of Collections.
|
||||
func ResourceOwnerSet(cs []Collection) []string {
|
||||
rs := map[string]struct{}{}
|
||||
|
||||
for _, c := range cs {
|
||||
fp := c.FullPath()
|
||||
if fp == nil {
|
||||
// Deleted collections have their full path set to nil but the previous
|
||||
// path will be populated.
|
||||
fp = c.PreviousPath()
|
||||
}
|
||||
|
||||
if fp == nil {
|
||||
// This should not happen, but keep us from hitting a nil pointer
|
||||
// exception if it does somehow occur. Statistics will be off though.
|
||||
continue
|
||||
}
|
||||
|
||||
rs[fp.ResourceOwner()] = struct{}{}
|
||||
// StateOf lets us figure out the state of the collection from the
|
||||
// previous and current path
|
||||
func StateOf(prev, curr path.Path) CollectionState {
|
||||
if curr == nil || len(curr.String()) == 0 {
|
||||
return DeletedState
|
||||
}
|
||||
|
||||
rss := make([]string, 0, len(rs))
|
||||
|
||||
for k := range rs {
|
||||
rss = append(rss, k)
|
||||
if prev == nil || len(prev.String()) == 0 {
|
||||
return NewState
|
||||
}
|
||||
|
||||
return rss
|
||||
if curr.Folder() != prev.Folder() {
|
||||
return MovedState
|
||||
}
|
||||
|
||||
return NotMovedState
|
||||
}
|
||||
|
||||
@ -10,89 +10,57 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
type mockColl struct {
|
||||
p path.Path
|
||||
prevP path.Path
|
||||
}
|
||||
|
||||
func (mc mockColl) Items() <-chan Stream {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc mockColl) FullPath() path.Path {
|
||||
return mc.p
|
||||
}
|
||||
|
||||
func (mc mockColl) PreviousPath() path.Path {
|
||||
return mc.prevP
|
||||
}
|
||||
|
||||
func (mc mockColl) State() CollectionState {
|
||||
return NewState
|
||||
}
|
||||
|
||||
func (mc mockColl) DoNotMergeItems() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type CollectionSuite struct {
|
||||
type DataCollectionSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// tests
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
func TestCollectionSuite(t *testing.T) {
|
||||
suite.Run(t, new(CollectionSuite))
|
||||
func TestDataCollectionSuite(t *testing.T) {
|
||||
suite.Run(t, new(DataCollectionSuite))
|
||||
}
|
||||
|
||||
func (suite *CollectionSuite) TestResourceOwnerSet() {
|
||||
t := suite.T()
|
||||
toColl := func(t *testing.T, resource string) Collection {
|
||||
p, err := path.Builder{}.
|
||||
Append("foo").
|
||||
ToDataLayerExchangePathForCategory("tid", resource, path.EventsCategory, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
return mockColl{p, nil}
|
||||
}
|
||||
func (suite *DataCollectionSuite) TestStateOf() {
|
||||
fooP, err := path.Builder{}.
|
||||
Append("foo").
|
||||
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
||||
require.NoError(suite.T(), err)
|
||||
barP, err := path.Builder{}.
|
||||
Append("bar").
|
||||
ToDataLayerExchangePathForCategory("t", "u", path.EmailCategory, false)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
input []Collection
|
||||
expect []string
|
||||
prev path.Path
|
||||
curr path.Path
|
||||
expect CollectionState
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
input: []Collection{},
|
||||
expect: []string{},
|
||||
name: "new",
|
||||
curr: fooP,
|
||||
expect: NewState,
|
||||
},
|
||||
{
|
||||
name: "nil",
|
||||
input: nil,
|
||||
expect: []string{},
|
||||
name: "not moved",
|
||||
prev: fooP,
|
||||
curr: fooP,
|
||||
expect: NotMovedState,
|
||||
},
|
||||
{
|
||||
name: "single resource",
|
||||
input: []Collection{toColl(t, "fnords")},
|
||||
expect: []string{"fnords"},
|
||||
name: "moved",
|
||||
prev: fooP,
|
||||
curr: barP,
|
||||
expect: MovedState,
|
||||
},
|
||||
{
|
||||
name: "multiple resource",
|
||||
input: []Collection{toColl(t, "fnords"), toColl(t, "smarfs")},
|
||||
expect: []string{"fnords", "smarfs"},
|
||||
},
|
||||
{
|
||||
name: "duplciate resources",
|
||||
input: []Collection{toColl(t, "fnords"), toColl(t, "smarfs"), toColl(t, "fnords")},
|
||||
expect: []string{"fnords", "smarfs"},
|
||||
name: "deleted",
|
||||
prev: fooP,
|
||||
expect: DeletedState,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
rs := ResourceOwnerSet(test.input)
|
||||
assert.ElementsMatch(t, test.expect, rs)
|
||||
state := StateOf(test.prev, test.curr)
|
||||
assert.Equal(t, test.expect, state)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -7,7 +7,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/alcionai/clues"
|
||||
analytics "github.com/rudderlabs/analytics-go"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
@ -93,7 +93,7 @@ func NewBus(ctx context.Context, s storage.Storage, tenID string, opts control.O
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return Bus{}, errors.Wrap(err, "configuring event bus")
|
||||
return Bus{}, clues.Wrap(err, "configuring event bus").WithClues(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/fs"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
@ -17,7 +18,6 @@ import (
|
||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
)
|
||||
|
||||
@ -29,11 +29,9 @@ const (
|
||||
defaultSchedulingInterval = time.Second * 0
|
||||
)
|
||||
|
||||
const defaultConfigErrTmpl = "setting default repo config values"
|
||||
|
||||
var (
|
||||
errInit = errors.New("initializing repo")
|
||||
errConnect = errors.New("connecting repo")
|
||||
ErrSettingDefaultConfig = errors.New("setting default repo config values")
|
||||
ErrorRepoAlreadyExists = errors.New("repo already exists")
|
||||
)
|
||||
|
||||
// Having all fields set to 0 causes it to keep max-int versions of snapshots.
|
||||
@ -53,19 +51,6 @@ type snapshotLoader interface {
|
||||
SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error)
|
||||
}
|
||||
|
||||
type ErrorRepoAlreadyExists struct {
|
||||
common.Err
|
||||
}
|
||||
|
||||
func RepoAlreadyExistsError(e error) error {
|
||||
return ErrorRepoAlreadyExists{*common.EncapsulateError(e)}
|
||||
}
|
||||
|
||||
func IsRepoAlreadyExistsError(e error) bool {
|
||||
var erae ErrorRepoAlreadyExists
|
||||
return errors.As(e, &erae)
|
||||
}
|
||||
|
||||
var (
|
||||
_ snapshotManager = &conn{}
|
||||
_ snapshotLoader = &conn{}
|
||||
@ -87,22 +72,22 @@ func NewConn(s storage.Storage) *conn {
|
||||
func (w *conn) Initialize(ctx context.Context) error {
|
||||
bst, err := blobStoreByProvider(ctx, w.storage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, errInit.Error())
|
||||
return errors.Wrap(err, "initializing storage")
|
||||
}
|
||||
defer bst.Close(ctx)
|
||||
|
||||
cfg, err := w.storage.CommonConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
// todo - issue #75: nil here should be a storage.NewRepoOptions()
|
||||
if err = repo.Initialize(ctx, bst, nil, cfg.CorsoPassphrase); err != nil {
|
||||
if errors.Is(err, repo.ErrAlreadyInitialized) {
|
||||
return RepoAlreadyExistsError(err)
|
||||
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, errInit.Error())
|
||||
return clues.Wrap(err, "initialzing repo").WithClues(ctx)
|
||||
}
|
||||
|
||||
return w.commonConnect(
|
||||
@ -117,13 +102,13 @@ func (w *conn) Initialize(ctx context.Context) error {
|
||||
func (w *conn) Connect(ctx context.Context) error {
|
||||
bst, err := blobStoreByProvider(ctx, w.storage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, errInit.Error())
|
||||
return errors.Wrap(err, "initializing storage")
|
||||
}
|
||||
defer bst.Close(ctx)
|
||||
|
||||
cfg, err := w.storage.CommonConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return w.commonConnect(
|
||||
@ -162,14 +147,18 @@ func (w *conn) commonConnect(
|
||||
password,
|
||||
opts,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, errConnect.Error())
|
||||
return clues.Wrap(err, "connecting to repo").WithClues(ctx)
|
||||
}
|
||||
|
||||
if err := w.open(ctx, cfgFile, password); err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return w.setDefaultConfigValues(ctx)
|
||||
if err := w.setDefaultConfigValues(ctx); err != nil {
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage, error) {
|
||||
@ -177,7 +166,7 @@ func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage,
|
||||
case storage.ProviderS3:
|
||||
return s3BlobStorage(ctx, s)
|
||||
default:
|
||||
return nil, errors.New("storage provider details are required")
|
||||
return nil, clues.New("storage provider details are required").WithClues(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,7 +193,11 @@ func (w *conn) close(ctx context.Context) error {
|
||||
err := w.Repository.Close(ctx)
|
||||
w.Repository = nil
|
||||
|
||||
return errors.Wrap(err, "closing repository connection")
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "closing repository connection").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *conn) open(ctx context.Context, configPath, password string) error {
|
||||
@ -216,7 +209,7 @@ func (w *conn) open(ctx context.Context, configPath, password string) error {
|
||||
// TODO(ashmrtnz): issue #75: nil here should be storage.ConnectionOptions().
|
||||
rep, err := repo.Open(ctx, configPath, password, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "opening repository connection")
|
||||
return clues.Wrap(err, "opening repository connection").WithClues(ctx)
|
||||
}
|
||||
|
||||
w.Repository = rep
|
||||
@ -229,7 +222,7 @@ func (w *conn) wrap() error {
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.refCount == 0 {
|
||||
return errors.New("conn already closed")
|
||||
return clues.New("conn already closed")
|
||||
}
|
||||
|
||||
w.refCount++
|
||||
@ -240,12 +233,12 @@ func (w *conn) wrap() error {
|
||||
func (w *conn) setDefaultConfigValues(ctx context.Context) error {
|
||||
p, err := w.getGlobalPolicyOrEmpty(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, defaultConfigErrTmpl)
|
||||
return clues.Stack(ErrSettingDefaultConfig, err)
|
||||
}
|
||||
|
||||
changed, err := updateCompressionOnPolicy(defaultCompressor, p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, defaultConfigErrTmpl)
|
||||
return clues.Stack(ErrSettingDefaultConfig, err)
|
||||
}
|
||||
|
||||
if updateRetentionOnPolicy(defaultRetention, p) {
|
||||
@ -260,10 +253,11 @@ func (w *conn) setDefaultConfigValues(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrap(
|
||||
w.writeGlobalPolicy(ctx, "UpdateGlobalPolicyWithDefaults", p),
|
||||
"updating global policy with defaults",
|
||||
)
|
||||
if err := w.writeGlobalPolicy(ctx, "UpdateGlobalPolicyWithDefaults", p); err != nil {
|
||||
return clues.Wrap(err, "updating global policy with defaults")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compression attempts to set the global compression policy for the kopia repo
|
||||
@ -273,7 +267,7 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
|
||||
// compressor was given.
|
||||
comp := compression.Name(compressor)
|
||||
if err := checkCompressor(comp); err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
p, err := w.getGlobalPolicyOrEmpty(ctx)
|
||||
@ -283,17 +277,18 @@ func (w *conn) Compression(ctx context.Context, compressor string) error {
|
||||
|
||||
changed, err := updateCompressionOnPolicy(compressor, p)
|
||||
if err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrap(
|
||||
w.writeGlobalPolicy(ctx, "UpdateGlobalCompressionPolicy", p),
|
||||
"updating global compression policy",
|
||||
)
|
||||
if err := w.writeGlobalPolicy(ctx, "UpdateGlobalCompressionPolicy", p); err != nil {
|
||||
return clues.Wrap(err, "updating global compression policy")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateCompressionOnPolicy(compressor string, p *policy.Policy) (bool, error) {
|
||||
@ -349,7 +344,7 @@ func (w *conn) getPolicyOrEmpty(ctx context.Context, si snapshot.SourceInfo) (*p
|
||||
return &policy.Policy{}, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(err, "getting backup policy for %+v", si)
|
||||
return nil, clues.Wrap(err, "getting backup policy").With("source_info", si).WithClues(ctx)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
@ -370,16 +365,22 @@ func (w *conn) writePolicy(
|
||||
si snapshot.SourceInfo,
|
||||
p *policy.Policy,
|
||||
) error {
|
||||
err := repo.WriteSession(
|
||||
ctx,
|
||||
w.Repository,
|
||||
repo.WriteSessionOptions{Purpose: purpose},
|
||||
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||
return policy.SetPolicy(ctx, rw, si, p)
|
||||
},
|
||||
)
|
||||
ctx = clues.Add(ctx, "source_info", si)
|
||||
|
||||
return errors.Wrapf(err, "updating policy for %+v", si)
|
||||
writeOpts := repo.WriteSessionOptions{Purpose: purpose}
|
||||
cb := func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||
if err := policy.SetPolicy(ctx, rw, si, p); err != nil {
|
||||
return clues.Stack(err).WithClues(innerCtx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := repo.WriteSession(ctx, w.Repository, writeOpts, cb); err != nil {
|
||||
return clues.Wrap(err, "updating policy").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkCompressor(compressor compression.Name) error {
|
||||
@ -389,14 +390,19 @@ func checkCompressor(compressor compression.Name) error {
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unknown compressor type %s", compressor)
|
||||
return clues.Stack(clues.New("unknown compressor type"), clues.New(string(compressor)))
|
||||
}
|
||||
|
||||
func (w *conn) LoadSnapshots(
|
||||
ctx context.Context,
|
||||
ids []manifest.ID,
|
||||
) ([]*snapshot.Manifest, error) {
|
||||
return snapshot.LoadSnapshots(ctx, w.Repository, ids)
|
||||
mans, err := snapshot.LoadSnapshots(ctx, w.Repository, ids)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return mans, nil
|
||||
}
|
||||
|
||||
func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) {
|
||||
|
||||
@ -85,7 +85,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
|
||||
|
||||
err := k.Initialize(ctx)
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsRepoAlreadyExistsError(err))
|
||||
assert.ErrorIs(t, err, ErrorRepoAlreadyExists)
|
||||
}
|
||||
|
||||
func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
|
||||
|
||||
@ -1,20 +1,26 @@
|
||||
package kopia
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/fs"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
var (
|
||||
_ data.Collection = &kopiaDataCollection{}
|
||||
_ data.Stream = &kopiaDataStream{}
|
||||
_ data.RestoreCollection = &kopiaDataCollection{}
|
||||
_ data.Stream = &kopiaDataStream{}
|
||||
)
|
||||
|
||||
type kopiaDataCollection struct {
|
||||
path path.Path
|
||||
streams []data.Stream
|
||||
path path.Path
|
||||
streams []data.Stream
|
||||
snapshotRoot fs.Entry
|
||||
counter ByteCounter
|
||||
}
|
||||
|
||||
func (kdc *kopiaDataCollection) Items() <-chan data.Stream {
|
||||
@ -35,16 +41,23 @@ func (kdc kopiaDataCollection) FullPath() path.Path {
|
||||
return kdc.path
|
||||
}
|
||||
|
||||
func (kdc kopiaDataCollection) PreviousPath() path.Path {
|
||||
return nil
|
||||
}
|
||||
func (kdc kopiaDataCollection) Fetch(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
) (data.Stream, error) {
|
||||
if kdc.snapshotRoot == nil {
|
||||
return nil, clues.New("no snapshot root")
|
||||
}
|
||||
|
||||
func (kdc kopiaDataCollection) State() data.CollectionState {
|
||||
return data.NewState
|
||||
}
|
||||
p, err := kdc.FullPath().Append(name, true)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating item path")
|
||||
}
|
||||
|
||||
func (kdc kopiaDataCollection) DoNotMergeItems() bool {
|
||||
return false
|
||||
// TODO(ashmrtn): We could possibly hold a reference to the folder this
|
||||
// collection corresponds to, but that requires larger changes for the
|
||||
// creation of these collections.
|
||||
return getItemStream(ctx, p, kdc.snapshotRoot, kdc.counter)
|
||||
}
|
||||
|
||||
type kopiaDataStream struct {
|
||||
|
||||
@ -2,14 +2,20 @@ package kopia
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/kopia/kopia/fs"
|
||||
"github.com/kopia/kopia/fs/virtualfs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
@ -113,3 +119,172 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// These types are needed because we check that a fs.File was returned.
|
||||
// Unfortunately fs.StreamingFile and fs.File have different interfaces so we
|
||||
// have to fake things.
|
||||
type mockSeeker struct{}
|
||||
|
||||
func (s mockSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
type mockReader struct {
|
||||
io.ReadCloser
|
||||
mockSeeker
|
||||
}
|
||||
|
||||
func (r mockReader) Entry() (fs.Entry, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
type mockFile struct {
|
||||
// Use for Entry interface.
|
||||
fs.StreamingFile
|
||||
r io.ReadCloser
|
||||
}
|
||||
|
||||
func (f *mockFile) Open(ctx context.Context) (fs.Reader, error) {
|
||||
return mockReader{ReadCloser: f.r}, nil
|
||||
}
|
||||
|
||||
func (suite *KopiaDataCollectionUnitSuite) TestFetch() {
|
||||
var (
|
||||
tenant = "a-tenant"
|
||||
user = "a-user"
|
||||
service = path.ExchangeService.String()
|
||||
category = path.EmailCategory
|
||||
folder1 = "folder1"
|
||||
folder2 = "folder2"
|
||||
|
||||
noErrFileName = "noError"
|
||||
errFileName = "error"
|
||||
|
||||
noErrFileData = "foo bar baz"
|
||||
|
||||
errReader = &mockconnector.MockExchangeData{
|
||||
ReadErr: assert.AnError,
|
||||
}
|
||||
)
|
||||
|
||||
// Needs to be a function so we can switch the serialization version as
|
||||
// needed.
|
||||
getLayout := func(serVersion uint32) fs.Entry {
|
||||
return virtualfs.NewStaticDirectory(encodeAsPath(tenant), []fs.Entry{
|
||||
virtualfs.NewStaticDirectory(encodeAsPath(service), []fs.Entry{
|
||||
virtualfs.NewStaticDirectory(encodeAsPath(user), []fs.Entry{
|
||||
virtualfs.NewStaticDirectory(encodeAsPath(category.String()), []fs.Entry{
|
||||
virtualfs.NewStaticDirectory(encodeAsPath(folder1), []fs.Entry{
|
||||
virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(noErrFileName),
|
||||
nil,
|
||||
),
|
||||
r: newBackupStreamReader(
|
||||
serVersion,
|
||||
io.NopCloser(bytes.NewReader([]byte(noErrFileData))),
|
||||
),
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(errFileName),
|
||||
nil,
|
||||
),
|
||||
r: newBackupStreamReader(
|
||||
serVersion,
|
||||
errReader.ToReader(),
|
||||
),
|
||||
},
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
b := path.Builder{}.Append(folder1, folder2)
|
||||
pth, err := b.ToDataLayerExchangePathForCategory(
|
||||
tenant,
|
||||
user,
|
||||
category,
|
||||
false,
|
||||
)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
inputName string
|
||||
inputSerializationVersion uint32
|
||||
expectedData []byte
|
||||
lookupErr assert.ErrorAssertionFunc
|
||||
readErr assert.ErrorAssertionFunc
|
||||
notFoundErr bool
|
||||
}{
|
||||
{
|
||||
name: "FileFound_NoError",
|
||||
inputName: noErrFileName,
|
||||
inputSerializationVersion: serializationVersion,
|
||||
expectedData: []byte(noErrFileData),
|
||||
lookupErr: assert.NoError,
|
||||
readErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "FileFound_ReadError",
|
||||
inputName: errFileName,
|
||||
inputSerializationVersion: serializationVersion,
|
||||
lookupErr: assert.NoError,
|
||||
readErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "FileFound_VersionError",
|
||||
inputName: noErrFileName,
|
||||
inputSerializationVersion: serializationVersion + 1,
|
||||
lookupErr: assert.NoError,
|
||||
readErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "FileNotFound",
|
||||
inputName: "foo",
|
||||
inputSerializationVersion: serializationVersion + 1,
|
||||
lookupErr: assert.Error,
|
||||
notFoundErr: true,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
t := suite.T()
|
||||
|
||||
root := getLayout(test.inputSerializationVersion)
|
||||
c := &i64counter{}
|
||||
|
||||
col := &kopiaDataCollection{path: pth, snapshotRoot: root, counter: c}
|
||||
|
||||
s, err := col.Fetch(ctx, test.inputName)
|
||||
|
||||
test.lookupErr(t, err)
|
||||
|
||||
if err != nil {
|
||||
if test.notFoundErr {
|
||||
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
fileData, err := io.ReadAll(s.ToReader())
|
||||
|
||||
test.readErr(t, err)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, test.expectedData, fileData)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,12 +4,14 @@ import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
)
|
||||
|
||||
@ -20,7 +22,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
errNoModelStoreID = errors.New("model has no ModelStoreID")
|
||||
errNoStableID = errors.New("model has no StableID")
|
||||
errBadTagKey = errors.New("tag key overlaps with required key")
|
||||
@ -59,7 +60,7 @@ func (ms *ModelStore) Close(ctx context.Context) error {
|
||||
// bad model type is given.
|
||||
func tagsForModel(s model.Schema, tags map[string]string) (map[string]string, error) {
|
||||
if _, ok := tags[manifest.TypeLabelKey]; ok {
|
||||
return nil, errors.WithStack(errBadTagKey)
|
||||
return nil, clues.Stack(errBadTagKey)
|
||||
}
|
||||
|
||||
res := make(map[string]string, len(tags)+1)
|
||||
@ -80,11 +81,11 @@ func tagsForModelWithID(
|
||||
tags map[string]string,
|
||||
) (map[string]string, error) {
|
||||
if !s.Valid() {
|
||||
return nil, errors.WithStack(errUnrecognizedSchema)
|
||||
return nil, clues.Stack(errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
if len(id) == 0 {
|
||||
return nil, errors.WithStack(errNoStableID)
|
||||
return nil, clues.Stack(errNoStableID)
|
||||
}
|
||||
|
||||
res, err := tagsForModel(s, tags)
|
||||
@ -93,13 +94,13 @@ func tagsForModelWithID(
|
||||
}
|
||||
|
||||
if _, ok := res[stableIDKey]; ok {
|
||||
return nil, errors.WithStack(errBadTagKey)
|
||||
return nil, clues.Stack(errBadTagKey)
|
||||
}
|
||||
|
||||
res[stableIDKey] = string(id)
|
||||
|
||||
if _, ok := res[modelVersionKey]; ok {
|
||||
return nil, errors.WithStack(errBadTagKey)
|
||||
return nil, clues.Stack(errBadTagKey)
|
||||
}
|
||||
|
||||
res[modelVersionKey] = strconv.Itoa(version)
|
||||
@ -117,7 +118,7 @@ func putInner(
|
||||
create bool,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return errors.WithStack(errUnrecognizedSchema)
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
base := m.Base()
|
||||
@ -128,13 +129,13 @@ func putInner(
|
||||
tmpTags, err := tagsForModelWithID(s, base.ID, base.Version, base.Tags)
|
||||
if err != nil {
|
||||
// Will be wrapped at a higher layer.
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
id, err := w.PutManifest(ctx, tmpTags, m)
|
||||
if err != nil {
|
||||
// Will be wrapped at a higher layer.
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
base.ModelStoreID = id
|
||||
@ -150,7 +151,7 @@ func (ms *ModelStore) Put(
|
||||
m model.Model,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return errors.WithStack(errUnrecognizedSchema)
|
||||
return clues.Stack(errUnrecognizedSchema)
|
||||
}
|
||||
|
||||
m.Base().Version = ms.modelVersion
|
||||
@ -162,14 +163,16 @@ func (ms *ModelStore) Put(
|
||||
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||
err := putInner(innerCtx, w, s, m, true)
|
||||
if err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(innerCtx)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "putting model").WithClues(ctx)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "putting model")
|
||||
return nil
|
||||
}
|
||||
|
||||
func stripHiddenTags(tags map[string]string) {
|
||||
@ -184,7 +187,7 @@ func (ms ModelStore) populateBaseModelFromMetadata(
|
||||
) error {
|
||||
id, ok := m.Labels[stableIDKey]
|
||||
if !ok {
|
||||
return errors.WithStack(errNoStableID)
|
||||
return clues.Stack(errNoStableID)
|
||||
}
|
||||
|
||||
v, err := strconv.Atoi(m.Labels[modelVersionKey])
|
||||
@ -193,7 +196,7 @@ func (ms ModelStore) populateBaseModelFromMetadata(
|
||||
}
|
||||
|
||||
if v != ms.modelVersion {
|
||||
return errors.Errorf("bad model version %s", m.Labels[modelVersionKey])
|
||||
return clues.Wrap(clues.New(m.Labels[modelVersionKey]), "bad model version")
|
||||
}
|
||||
|
||||
base.ModelStoreID = m.ID
|
||||
@ -211,7 +214,7 @@ func (ms ModelStore) baseModelFromMetadata(
|
||||
) (*model.BaseModel, error) {
|
||||
res := &model.BaseModel{}
|
||||
if err := ms.populateBaseModelFromMetadata(res, m); err != nil {
|
||||
return nil, err
|
||||
return nil, clues.Stack(err).WithAll("metadata_id", m.ID, "metadata_modtime", m.ModTime)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
@ -226,21 +229,21 @@ func (ms *ModelStore) GetIDsForType(
|
||||
tags map[string]string,
|
||||
) ([]*model.BaseModel, error) {
|
||||
if !s.Valid() {
|
||||
return nil, errors.WithStack(errUnrecognizedSchema)
|
||||
return nil, clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
if _, ok := tags[stableIDKey]; ok {
|
||||
return nil, errors.WithStack(errBadTagKey)
|
||||
return nil, clues.Stack(errBadTagKey).WithClues(ctx)
|
||||
}
|
||||
|
||||
tmpTags, err := tagsForModel(s, tags)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting model metadata")
|
||||
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
metadata, err := ms.c.FindManifests(ctx, tmpTags)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting model metadata")
|
||||
return nil, clues.Wrap(err, "getting model metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
res := make([]*model.BaseModel, 0, len(metadata))
|
||||
@ -248,7 +251,7 @@ func (ms *ModelStore) GetIDsForType(
|
||||
for _, m := range metadata {
|
||||
bm, err := ms.baseModelFromMetadata(m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "parsing model metadata")
|
||||
return nil, clues.Wrap(err, "parsing model metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
res = append(res, bm)
|
||||
@ -266,30 +269,30 @@ func (ms *ModelStore) getModelStoreID(
|
||||
id model.StableID,
|
||||
) (manifest.ID, error) {
|
||||
if !s.Valid() {
|
||||
return "", errors.WithStack(errUnrecognizedSchema)
|
||||
return "", clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
if len(id) == 0 {
|
||||
return "", errors.WithStack(errNoStableID)
|
||||
return "", clues.Stack(errNoStableID).WithClues(ctx)
|
||||
}
|
||||
|
||||
tags := map[string]string{stableIDKey: string(id)}
|
||||
|
||||
metadata, err := ms.c.FindManifests(ctx, tags)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "getting ModelStoreID")
|
||||
return "", clues.Wrap(err, "getting ModelStoreID").WithClues(ctx)
|
||||
}
|
||||
|
||||
if len(metadata) == 0 {
|
||||
return "", errors.Wrap(ErrNotFound, "getting ModelStoreID")
|
||||
return "", clues.Wrap(data.ErrNotFound, "getting ModelStoreID").WithClues(ctx)
|
||||
}
|
||||
|
||||
if len(metadata) != 1 {
|
||||
return "", errors.New("multiple models with same StableID")
|
||||
return "", clues.New("multiple models with same StableID").WithClues(ctx)
|
||||
}
|
||||
|
||||
if metadata[0].Labels[manifest.TypeLabelKey] != s.String() {
|
||||
return "", errors.WithStack(errModelTypeMismatch)
|
||||
return "", clues.Stack(errModelTypeMismatch).WithClues(ctx)
|
||||
}
|
||||
|
||||
return metadata[0].ID, nil
|
||||
@ -302,10 +305,10 @@ func (ms *ModelStore) Get(
|
||||
ctx context.Context,
|
||||
s model.Schema,
|
||||
id model.StableID,
|
||||
data model.Model,
|
||||
m model.Model,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return errors.WithStack(errUnrecognizedSchema)
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
modelID, err := ms.getModelStoreID(ctx, s, id)
|
||||
@ -313,7 +316,7 @@ func (ms *ModelStore) Get(
|
||||
return err
|
||||
}
|
||||
|
||||
return transmuteErr(ms.GetWithModelStoreID(ctx, s, modelID, data))
|
||||
return ms.GetWithModelStoreID(ctx, s, modelID, m)
|
||||
}
|
||||
|
||||
// GetWithModelStoreID deserializes the model with the given ModelStoreID into
|
||||
@ -323,29 +326,37 @@ func (ms *ModelStore) GetWithModelStoreID(
|
||||
ctx context.Context,
|
||||
s model.Schema,
|
||||
id manifest.ID,
|
||||
data model.Model,
|
||||
m model.Model,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return errors.WithStack(errUnrecognizedSchema)
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
if len(id) == 0 {
|
||||
return errors.WithStack(errNoModelStoreID)
|
||||
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||
}
|
||||
|
||||
metadata, err := ms.c.GetManifest(ctx, id, data)
|
||||
metadata, err := ms.c.GetManifest(ctx, id, m)
|
||||
if err != nil {
|
||||
return errors.Wrap(transmuteErr(err), "getting model data")
|
||||
if errors.Is(err, manifest.ErrNotFound) {
|
||||
err = data.ErrNotFound
|
||||
}
|
||||
|
||||
return clues.Wrap(err, "getting model data").WithClues(ctx)
|
||||
}
|
||||
|
||||
if metadata.Labels[manifest.TypeLabelKey] != s.String() {
|
||||
return errors.WithStack(errModelTypeMismatch)
|
||||
mdlbl := metadata.Labels[manifest.TypeLabelKey]
|
||||
if mdlbl != s.String() {
|
||||
return clues.Stack(errModelTypeMismatch).
|
||||
WithClues(ctx).
|
||||
WithAll("expected_label", s, "got_label", mdlbl)
|
||||
}
|
||||
|
||||
return errors.Wrap(
|
||||
ms.populateBaseModelFromMetadata(data.Base(), metadata),
|
||||
"getting model by ID",
|
||||
)
|
||||
if err := ms.populateBaseModelFromMetadata(m.Base(), metadata); err != nil {
|
||||
return clues.Wrap(err, "getting model by ID").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPrevModelVersion compares the ModelType and ModelStoreID in this model
|
||||
@ -359,26 +370,31 @@ func (ms *ModelStore) checkPrevModelVersion(
|
||||
b *model.BaseModel,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return errors.WithStack(errUnrecognizedSchema)
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
id, err := ms.getModelStoreID(ctx, s, b.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
return clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
// We actually got something back during our lookup.
|
||||
meta, err := ms.c.GetManifest(ctx, id, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting previous model version")
|
||||
return clues.Wrap(err, "getting previous model version").WithClues(ctx)
|
||||
}
|
||||
|
||||
if meta.ID != b.ModelStoreID {
|
||||
return errors.New("updated model has different ModelStoreID")
|
||||
return clues.New("updated model has different ModelStoreID").
|
||||
WithClues(ctx).
|
||||
WithAll("expected_id", meta.ID, "model_store_id", b.ModelStoreID)
|
||||
}
|
||||
|
||||
if meta.Labels[manifest.TypeLabelKey] != s.String() {
|
||||
return errors.New("updated model has different model type")
|
||||
mdlbl := meta.Labels[manifest.TypeLabelKey]
|
||||
if mdlbl != s.String() {
|
||||
return clues.New("updated model has different model type").
|
||||
WithClues(ctx).
|
||||
WithAll("expected_label", s, "got_label", mdlbl)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -396,12 +412,12 @@ func (ms *ModelStore) Update(
|
||||
m model.Model,
|
||||
) error {
|
||||
if !s.Valid() {
|
||||
return errors.WithStack(errUnrecognizedSchema)
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
base := m.Base()
|
||||
if len(base.ModelStoreID) == 0 {
|
||||
return errors.WithStack(errNoModelStoreID)
|
||||
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||
}
|
||||
|
||||
base.Version = ms.modelVersion
|
||||
@ -415,8 +431,11 @@ func (ms *ModelStore) Update(
|
||||
ctx,
|
||||
ms.c,
|
||||
repo.WriteSessionOptions{Purpose: "ModelStoreUpdate"},
|
||||
func(innerCtx context.Context, w repo.RepositoryWriter) (innerErr error) {
|
||||
oldID := base.ModelStoreID
|
||||
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||
var (
|
||||
innerErr error
|
||||
oldID = base.ModelStoreID
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if innerErr != nil {
|
||||
@ -429,19 +448,26 @@ func (ms *ModelStore) Update(
|
||||
return innerErr
|
||||
}
|
||||
|
||||
// if equal, everything worked out fine.
|
||||
// if not, we handle the cleanup below.
|
||||
if oldID == base.ModelStoreID {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we fail at this point no changes will be made to the manifest store
|
||||
// in kopia, making it appear like nothing ever happened. At worst some
|
||||
// orphaned content blobs may be uploaded, but they should be garbage
|
||||
// collected the next time kopia maintenance is run.
|
||||
if oldID != base.ModelStoreID {
|
||||
innerErr = w.DeleteManifest(innerCtx, oldID)
|
||||
innerErr = w.DeleteManifest(innerCtx, oldID)
|
||||
if innerErr != nil {
|
||||
return clues.Stack(innerErr).WithClues(ctx)
|
||||
}
|
||||
|
||||
return innerErr
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "updating model")
|
||||
return clues.Wrap(err, "updating model").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -452,12 +478,12 @@ func (ms *ModelStore) Update(
|
||||
// have the same StableID.
|
||||
func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.StableID) error {
|
||||
if !s.Valid() {
|
||||
return errors.WithStack(errUnrecognizedSchema)
|
||||
return clues.Stack(errUnrecognizedSchema).WithClues(ctx)
|
||||
}
|
||||
|
||||
latest, err := ms.getModelStoreID(ctx, s, id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -472,26 +498,17 @@ func (ms *ModelStore) Delete(ctx context.Context, s model.Schema, id model.Stabl
|
||||
// exist.
|
||||
func (ms *ModelStore) DeleteWithModelStoreID(ctx context.Context, id manifest.ID) error {
|
||||
if len(id) == 0 {
|
||||
return errors.WithStack(errNoModelStoreID)
|
||||
return clues.Stack(errNoModelStoreID).WithClues(ctx)
|
||||
}
|
||||
|
||||
err := repo.WriteSession(
|
||||
ctx,
|
||||
ms.c,
|
||||
repo.WriteSessionOptions{Purpose: "ModelStoreDelete"},
|
||||
func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||
return w.DeleteManifest(innerCtx, id)
|
||||
},
|
||||
)
|
||||
|
||||
return errors.Wrap(err, "deleting model")
|
||||
}
|
||||
|
||||
func transmuteErr(err error) error {
|
||||
switch {
|
||||
case errors.Is(err, manifest.ErrNotFound):
|
||||
return ErrNotFound
|
||||
default:
|
||||
return err
|
||||
opts := repo.WriteSessionOptions{Purpose: "ModelStoreDelete"}
|
||||
cb := func(innerCtx context.Context, w repo.RepositoryWriter) error {
|
||||
return w.DeleteManifest(innerCtx, id)
|
||||
}
|
||||
|
||||
if err := repo.WriteSession(ctx, ms.c, opts, cb); err != nil {
|
||||
return clues.Wrap(err, "deleting model").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
@ -360,9 +361,9 @@ func (suite *ModelStoreIntegrationSuite) TestPutGet_WithTags() {
|
||||
func (suite *ModelStoreIntegrationSuite) TestGet_NotFoundErrors() {
|
||||
t := suite.T()
|
||||
|
||||
assert.ErrorIs(t, suite.m.Get(suite.ctx, model.BackupOpSchema, "baz", nil), ErrNotFound)
|
||||
assert.ErrorIs(t, suite.m.Get(suite.ctx, model.BackupOpSchema, "baz", nil), data.ErrNotFound)
|
||||
assert.ErrorIs(
|
||||
t, suite.m.GetWithModelStoreID(suite.ctx, model.BackupOpSchema, "baz", nil), ErrNotFound)
|
||||
t, suite.m.GetWithModelStoreID(suite.ctx, model.BackupOpSchema, "baz", nil), data.ErrNotFound)
|
||||
}
|
||||
|
||||
func (suite *ModelStoreIntegrationSuite) TestPutGetOfTypeBadVersion() {
|
||||
@ -630,7 +631,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutUpdate() {
|
||||
}
|
||||
|
||||
err = m.GetWithModelStoreID(ctx, theModelType, oldModelID, nil)
|
||||
assert.ErrorIs(t, err, ErrNotFound)
|
||||
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -691,7 +692,7 @@ func (suite *ModelStoreIntegrationSuite) TestPutDelete() {
|
||||
|
||||
returned := &fooModel{}
|
||||
err := suite.m.GetWithModelStoreID(suite.ctx, theModelType, foo.ModelStoreID, returned)
|
||||
assert.ErrorIs(t, err, ErrNotFound)
|
||||
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||
}
|
||||
|
||||
func (suite *ModelStoreIntegrationSuite) TestPutDelete_BadIDsNoop() {
|
||||
@ -775,7 +776,7 @@ func (suite *ModelStoreRegressionSuite) TestFailDuringWriteSessionHasNoVisibleEf
|
||||
assert.ErrorIs(t, err, assert.AnError)
|
||||
|
||||
err = m.GetWithModelStoreID(ctx, theModelType, newID, nil)
|
||||
assert.ErrorIs(t, err, ErrNotFound)
|
||||
assert.ErrorIs(t, err, data.ErrNotFound)
|
||||
|
||||
returned := &fooModel{}
|
||||
require.NoError(
|
||||
|
||||
@ -3,6 +3,7 @@ package kopia
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/blob/s3"
|
||||
|
||||
@ -16,7 +17,7 @@ const (
|
||||
func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error) {
|
||||
cfg, err := s.S3Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
endpoint := defaultS3Endpoint
|
||||
@ -32,5 +33,10 @@ func s3BlobStorage(ctx context.Context, s storage.Storage) (blob.Storage, error)
|
||||
DoNotVerifyTLS: cfg.DoNotVerifyTLS,
|
||||
}
|
||||
|
||||
return s3.New(ctx, &opts, false)
|
||||
store, err := s3.New(ctx, &opts, false)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return store, nil
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/kopia/kopia/snapshot"
|
||||
"github.com/pkg/errors"
|
||||
@ -218,9 +219,7 @@ func fetchPrevManifests(
|
||||
found = append(found, man.Manifest)
|
||||
logger.Ctx(ctx).Infow(
|
||||
"reusing cached complete snapshot",
|
||||
"snapshot_id",
|
||||
man.ID,
|
||||
)
|
||||
"snapshot_id", man.ID)
|
||||
}
|
||||
|
||||
return found, nil
|
||||
@ -251,29 +250,19 @@ func fetchPrevSnapshotManifests(
|
||||
for _, reason := range reasons {
|
||||
logger.Ctx(ctx).Infow(
|
||||
"searching for previous manifests for reason",
|
||||
"service",
|
||||
reason.Service.String(),
|
||||
"category",
|
||||
reason.Category.String(),
|
||||
)
|
||||
"service", reason.Service.String(),
|
||||
"category", reason.Category.String())
|
||||
|
||||
found, err := fetchPrevManifests(
|
||||
ctx,
|
||||
sm,
|
||||
mans,
|
||||
reason,
|
||||
tags,
|
||||
)
|
||||
found, err := fetchPrevManifests(ctx, sm, mans, reason, tags)
|
||||
if err != nil {
|
||||
logger.Ctx(ctx).Warnw(
|
||||
"fetching previous snapshot manifests for service/category/resource owner",
|
||||
"error",
|
||||
err,
|
||||
"service",
|
||||
reason.Service.String(),
|
||||
"category",
|
||||
reason.Category.String(),
|
||||
)
|
||||
logger.Ctx(ctx).
|
||||
With(
|
||||
"err", err,
|
||||
"service", reason.Service.String(),
|
||||
"category", reason.Category.String()).
|
||||
Warnw(
|
||||
"fetching previous snapshot manifests for service/category/resource owner",
|
||||
clues.InErr(err).Slice()...)
|
||||
|
||||
// Snapshot can still complete fine, just not as efficient.
|
||||
continue
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/fs"
|
||||
"github.com/kopia/kopia/fs/virtualfs"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
@ -25,6 +25,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
@ -137,7 +138,7 @@ type corsoProgress struct {
|
||||
toMerge map[string]path.Path
|
||||
mu sync.RWMutex
|
||||
totalBytes int64
|
||||
errs *multierror.Error
|
||||
errs *fault.Errors
|
||||
}
|
||||
|
||||
// Kopia interface function used as a callback when kopia finishes processing a
|
||||
@ -167,11 +168,11 @@ func (cp *corsoProgress) FinishedFile(relativePath string, err error) {
|
||||
// never had to materialize their details in-memory.
|
||||
if d.info == nil {
|
||||
if d.prevPath == nil {
|
||||
cp.errs = multierror.Append(cp.errs, errors.Errorf(
|
||||
"item sourced from previous backup with no previous path. Service: %s, Category: %s",
|
||||
d.repoPath.Service().String(),
|
||||
d.repoPath.Category().String(),
|
||||
))
|
||||
cp.errs.Add(clues.New("item sourced from previous backup with no previous path").
|
||||
WithAll(
|
||||
"service", d.repoPath.Service().String(),
|
||||
"category", d.repoPath.Category().String(),
|
||||
))
|
||||
|
||||
return
|
||||
}
|
||||
@ -254,31 +255,28 @@ func (cp *corsoProgress) get(k string) *itemDetails {
|
||||
func collectionEntries(
|
||||
ctx context.Context,
|
||||
cb func(context.Context, fs.Entry) error,
|
||||
streamedEnts data.Collection,
|
||||
streamedEnts data.BackupCollection,
|
||||
progress *corsoProgress,
|
||||
) (map[string]struct{}, *multierror.Error) {
|
||||
) (map[string]struct{}, error) {
|
||||
if streamedEnts == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
errs *multierror.Error
|
||||
// Track which items have already been seen so we can skip them if we see
|
||||
// them again in the data from the base snapshot.
|
||||
seen = map[string]struct{}{}
|
||||
items = streamedEnts.Items()
|
||||
log = logger.Ctx(ctx)
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
errs = multierror.Append(errs, ctx.Err())
|
||||
return seen, errs
|
||||
return seen, clues.Stack(ctx.Err()).WithClues(ctx)
|
||||
|
||||
case e, ok := <-items:
|
||||
if !ok {
|
||||
return seen, errs
|
||||
return seen, nil
|
||||
}
|
||||
|
||||
encodedName := encodeAsPath(e.UUID())
|
||||
@ -302,9 +300,9 @@ func collectionEntries(
|
||||
itemPath, err := streamedEnts.FullPath().Append(e.UUID(), true)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "getting full item path")
|
||||
errs = multierror.Append(errs, err)
|
||||
progress.errs.Add(err)
|
||||
|
||||
log.Error(err)
|
||||
logger.Ctx(ctx).With("err", err).Errorw("getting full item path", clues.InErr(err).Slice()...)
|
||||
|
||||
continue
|
||||
}
|
||||
@ -342,13 +340,12 @@ func collectionEntries(
|
||||
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodedName,
|
||||
modTime,
|
||||
newBackupStreamReader(serializationVersion, e.ToReader()),
|
||||
)
|
||||
newBackupStreamReader(serializationVersion, e.ToReader()))
|
||||
|
||||
if err := cb(ctx, entry); err != nil {
|
||||
// Kopia's uploader swallows errors in most cases, so if we see
|
||||
// something here it's probably a big issue and we should return.
|
||||
errs = multierror.Append(errs, errors.Wrapf(err, "executing callback on %q", itemPath))
|
||||
return seen, errs
|
||||
return seen, clues.Wrap(err, "executing callback").WithClues(ctx).With("item_path", itemPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -442,7 +439,7 @@ func getStreamItemFunc(
|
||||
curPath path.Path,
|
||||
prevPath path.Path,
|
||||
staticEnts []fs.Entry,
|
||||
streamedEnts data.Collection,
|
||||
streamedEnts data.BackupCollection,
|
||||
baseDir fs.Directory,
|
||||
globalExcludeSet map[string]struct{},
|
||||
progress *corsoProgress,
|
||||
@ -454,11 +451,14 @@ func getStreamItemFunc(
|
||||
// Return static entries in this directory first.
|
||||
for _, d := range staticEnts {
|
||||
if err := cb(ctx, d); err != nil {
|
||||
return errors.Wrap(err, "executing callback on static directory")
|
||||
return clues.Wrap(err, "executing callback on static directory").WithClues(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
seen, errs := collectionEntries(ctx, cb, streamedEnts, progress)
|
||||
seen, err := collectionEntries(ctx, cb, streamedEnts, progress)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "streaming collection entries")
|
||||
}
|
||||
|
||||
if err := streamBaseEntries(
|
||||
ctx,
|
||||
@ -470,13 +470,10 @@ func getStreamItemFunc(
|
||||
globalExcludeSet,
|
||||
progress,
|
||||
); err != nil {
|
||||
errs = multierror.Append(
|
||||
errs,
|
||||
errors.Wrap(err, "streaming base snapshot entries"),
|
||||
)
|
||||
return errors.Wrap(err, "streaming base snapshot entries")
|
||||
}
|
||||
|
||||
return errs.ErrorOrNil()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -540,7 +537,7 @@ type treeMap struct {
|
||||
childDirs map[string]*treeMap
|
||||
// Reference to data pulled from the external service. Contains only items in
|
||||
// this directory. Does not contain references to subdirectories.
|
||||
collection data.Collection
|
||||
collection data.BackupCollection
|
||||
// Reference to directory in base snapshot. The referenced directory itself
|
||||
// may contain files and subdirectories, but the subdirectories should
|
||||
// eventually be added when walking the base snapshot to build the hierarchy,
|
||||
@ -617,7 +614,7 @@ func getTreeNode(roots map[string]*treeMap, pathElements []string) *treeMap {
|
||||
|
||||
func inflateCollectionTree(
|
||||
ctx context.Context,
|
||||
collections []data.Collection,
|
||||
collections []data.BackupCollection,
|
||||
) (map[string]*treeMap, map[string]path.Path, error) {
|
||||
roots := make(map[string]*treeMap)
|
||||
// Contains the old path for collections that have been moved or renamed.
|
||||
@ -911,13 +908,13 @@ func inflateBaseTree(
|
||||
// exclude from base directories when uploading the snapshot. As items in *all*
|
||||
// base directories will be checked for in every base directory, this assumes
|
||||
// that items in the bases are unique. Deletions of directories or subtrees
|
||||
// should be represented as changes in the status of a Collection, not an entry
|
||||
// in the globalExcludeSet.
|
||||
// should be represented as changes in the status of a BackupCollection, not an
|
||||
// entry in the globalExcludeSet.
|
||||
func inflateDirTree(
|
||||
ctx context.Context,
|
||||
loader snapshotLoader,
|
||||
baseSnaps []IncrementalBase,
|
||||
collections []data.Collection,
|
||||
collections []data.BackupCollection,
|
||||
globalExcludeSet map[string]struct{},
|
||||
progress *corsoProgress,
|
||||
) (fs.Directory, error) {
|
||||
@ -933,9 +930,7 @@ func inflateDirTree(
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
"merging hierarchies from base snapshots",
|
||||
"snapshot_ids",
|
||||
baseIDs,
|
||||
)
|
||||
"snapshot_ids", baseIDs)
|
||||
|
||||
for _, snap := range baseSnaps {
|
||||
if err = inflateBaseTree(ctx, loader, snap, updatedPaths, roots); err != nil {
|
||||
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
@ -456,6 +457,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFile() {
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
||||
@ -503,6 +505,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
for k, v := range cachedItems {
|
||||
@ -518,7 +521,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileCachedNoPrevPathErrors() {
|
||||
|
||||
assert.Empty(t, cp.pending)
|
||||
assert.Empty(t, bd.Details().Entries)
|
||||
assert.Error(t, cp.errs.ErrorOrNil())
|
||||
assert.Error(t, cp.errs.Err())
|
||||
}
|
||||
|
||||
func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||
@ -533,6 +536,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBuildsHierarchyNewItem() {
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: map[string]path.Path{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
deets := &itemDetails{info: &details.ItemInfo{}, repoPath: suite.targetFilePath}
|
||||
@ -605,6 +609,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedFileBaseItemDoesntBuildHierarch
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
toMerge: map[string]path.Path{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
deets := &itemDetails{
|
||||
@ -629,6 +634,7 @@ func (suite *CorsoProgressUnitSuite) TestFinishedHashingFile() {
|
||||
UploadProgress: &snapshotfs.NullUploadProgress{},
|
||||
deets: bd,
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
ci := test.cachedItems(suite.targetFileName, suite.targetFilePath)
|
||||
@ -681,9 +687,12 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
|
||||
user2Encoded: 42,
|
||||
}
|
||||
|
||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
collections := []data.Collection{
|
||||
collections := []data.BackupCollection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
suite.testPath,
|
||||
expectedFileCount[user1Encoded],
|
||||
@ -759,11 +768,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
||||
// - 42 separate files
|
||||
table := []struct {
|
||||
name string
|
||||
layout []data.Collection
|
||||
layout []data.BackupCollection
|
||||
}{
|
||||
{
|
||||
name: "SubdirFirst",
|
||||
layout: []data.Collection{
|
||||
layout: []data.BackupCollection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
p2,
|
||||
5,
|
||||
@ -776,7 +785,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
||||
},
|
||||
{
|
||||
name: "SubdirLast",
|
||||
layout: []data.Collection{
|
||||
layout: []data.BackupCollection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
suite.testPath,
|
||||
42,
|
||||
@ -791,7 +800,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
||||
|
||||
for _, test := range table {
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
|
||||
require.NoError(t, err)
|
||||
@ -845,7 +857,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
layout []data.Collection
|
||||
layout []data.BackupCollection
|
||||
}{
|
||||
{
|
||||
"MultipleRoots",
|
||||
@ -862,7 +874,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
||||
// - emails
|
||||
// - Inbox
|
||||
// - 42 separate files
|
||||
[]data.Collection{
|
||||
[]data.BackupCollection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
suite.testPath,
|
||||
5,
|
||||
@ -875,7 +887,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
||||
},
|
||||
{
|
||||
"NoCollectionPath",
|
||||
[]data.Collection{
|
||||
[]data.BackupCollection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
nil,
|
||||
5,
|
||||
@ -971,9 +983,12 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
cols := []data.Collection{}
|
||||
cols := []data.BackupCollection{}
|
||||
for _, s := range test.states {
|
||||
prevPath := dirPath
|
||||
nowPath := dirPath
|
||||
@ -1037,17 +1052,17 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
inputCollections func() []data.Collection
|
||||
inputCollections func() []data.BackupCollection
|
||||
expected *expectedNode
|
||||
}{
|
||||
{
|
||||
name: "SkipsDeletedItems",
|
||||
inputCollections: func() []data.Collection {
|
||||
inputCollections: func() []data.BackupCollection {
|
||||
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||
mc.Names[0] = testFileName
|
||||
mc.DeletedItems[0] = true
|
||||
|
||||
return []data.Collection{mc}
|
||||
return []data.BackupCollection{mc}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1066,13 +1081,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
},
|
||||
{
|
||||
name: "AddsNewItems",
|
||||
inputCollections: func() []data.Collection {
|
||||
inputCollections: func() []data.BackupCollection {
|
||||
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||
mc.Names[0] = testFileName2
|
||||
mc.Data[0] = testFileData2
|
||||
mc.ColState = data.NotMovedState
|
||||
|
||||
return []data.Collection{mc}
|
||||
return []data.BackupCollection{mc}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1101,13 +1116,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
},
|
||||
{
|
||||
name: "SkipsUpdatedItems",
|
||||
inputCollections: func() []data.Collection {
|
||||
inputCollections: func() []data.BackupCollection {
|
||||
mc := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||
mc.Names[0] = testFileName
|
||||
mc.Data[0] = testFileData2
|
||||
mc.ColState = data.NotMovedState
|
||||
|
||||
return []data.Collection{mc}
|
||||
return []data.BackupCollection{mc}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1132,7 +1147,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
},
|
||||
{
|
||||
name: "DeleteAndNew",
|
||||
inputCollections: func() []data.Collection {
|
||||
inputCollections: func() []data.BackupCollection {
|
||||
mc1 := mockconnector.NewMockExchangeCollection(dirPath, 0)
|
||||
mc1.ColState = data.DeletedState
|
||||
mc1.PrevPath = dirPath
|
||||
@ -1142,7 +1157,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
mc2.Names[0] = testFileName2
|
||||
mc2.Data[0] = testFileData2
|
||||
|
||||
return []data.Collection{mc1, mc2}
|
||||
return []data.BackupCollection{mc1, mc2}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1167,7 +1182,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
},
|
||||
{
|
||||
name: "MovedAndNew",
|
||||
inputCollections: func() []data.Collection {
|
||||
inputCollections: func() []data.BackupCollection {
|
||||
mc1 := mockconnector.NewMockExchangeCollection(dirPath2, 0)
|
||||
mc1.ColState = data.MovedState
|
||||
mc1.PrevPath = dirPath
|
||||
@ -1177,7 +1192,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
mc2.Names[0] = testFileName2
|
||||
mc2.Data[0] = testFileData2
|
||||
|
||||
return []data.Collection{mc1, mc2}
|
||||
return []data.BackupCollection{mc1, mc2}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1211,13 +1226,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
},
|
||||
{
|
||||
name: "NewDoesntMerge",
|
||||
inputCollections: func() []data.Collection {
|
||||
inputCollections: func() []data.BackupCollection {
|
||||
mc1 := mockconnector.NewMockExchangeCollection(dirPath, 1)
|
||||
mc1.ColState = data.NewState
|
||||
mc1.Names[0] = testFileName2
|
||||
mc1.Data[0] = testFileData2
|
||||
|
||||
return []data.Collection{mc1}
|
||||
return []data.BackupCollection{mc1}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1249,7 +1264,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
msw := &mockSnapshotWalker{
|
||||
snapshotRoot: getBaseSnapshot(),
|
||||
}
|
||||
@ -1369,13 +1387,13 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
inputCollections func(t *testing.T) []data.Collection
|
||||
inputCollections func(t *testing.T) []data.BackupCollection
|
||||
inputExcludes map[string]struct{}
|
||||
expected *expectedNode
|
||||
}{
|
||||
{
|
||||
name: "GlobalExcludeSet",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
return nil
|
||||
},
|
||||
inputExcludes: map[string]struct{}{
|
||||
@ -1417,7 +1435,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "MovesSubtree",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
newPath := makePath(
|
||||
t,
|
||||
[]string{testTenant, service, testUser, category, testInboxDir + "2"},
|
||||
@ -1428,7 +1446,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
mc.PrevPath = inboxPath
|
||||
mc.ColState = data.MovedState
|
||||
|
||||
return []data.Collection{mc}
|
||||
return []data.BackupCollection{mc}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1474,7 +1492,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "MovesChildAfterAncestorMove",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
newInboxPath := makePath(
|
||||
t,
|
||||
[]string{testTenant, service, testUser, category, testInboxDir + "2"},
|
||||
@ -1494,7 +1512,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
work.PrevPath = workPath
|
||||
work.ColState = data.MovedState
|
||||
|
||||
return []data.Collection{inbox, work}
|
||||
return []data.BackupCollection{inbox, work}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1540,7 +1558,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "MovesChildAfterAncestorDelete",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
newWorkPath := makePath(
|
||||
t,
|
||||
[]string{testTenant, service, testUser, category, workDir},
|
||||
@ -1555,7 +1573,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
work.PrevPath = workPath
|
||||
work.ColState = data.MovedState
|
||||
|
||||
return []data.Collection{inbox, work}
|
||||
return []data.BackupCollection{inbox, work}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1579,7 +1597,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "ReplaceDeletedDirectory",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
personal := mockconnector.NewMockExchangeCollection(personalPath, 0)
|
||||
personal.PrevPath = personalPath
|
||||
personal.ColState = data.DeletedState
|
||||
@ -1588,7 +1606,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
work.PrevPath = workPath
|
||||
work.ColState = data.MovedState
|
||||
|
||||
return []data.Collection{personal, work}
|
||||
return []data.BackupCollection{personal, work}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1620,7 +1638,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "ReplaceDeletedDirectoryWithNew",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
personal := mockconnector.NewMockExchangeCollection(personalPath, 0)
|
||||
personal.PrevPath = personalPath
|
||||
personal.ColState = data.DeletedState
|
||||
@ -1630,7 +1648,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
newCol.Names[0] = workFileName2
|
||||
newCol.Data[0] = workFileData2
|
||||
|
||||
return []data.Collection{personal, newCol}
|
||||
return []data.BackupCollection{personal, newCol}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1671,7 +1689,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "ReplaceMovedDirectory",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
newPersonalPath := makePath(
|
||||
t,
|
||||
[]string{testTenant, service, testUser, category, personalDir},
|
||||
@ -1686,7 +1704,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
work.PrevPath = workPath
|
||||
work.ColState = data.MovedState
|
||||
|
||||
return []data.Collection{personal, work}
|
||||
return []data.BackupCollection{personal, work}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1729,7 +1747,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "MoveDirectoryAndMergeItems",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
newPersonalPath := makePath(
|
||||
t,
|
||||
[]string{testTenant, service, testUser, category, workDir},
|
||||
@ -1744,7 +1762,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
personal.Names[1] = testFileName4
|
||||
personal.Data[1] = testFileData4
|
||||
|
||||
return []data.Collection{personal}
|
||||
return []data.BackupCollection{personal}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1793,7 +1811,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "MoveParentDeleteFileNoMergeSubtreeMerge",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
newInboxPath := makePath(
|
||||
t,
|
||||
[]string{testTenant, service, testUser, category, personalDir},
|
||||
@ -1824,7 +1842,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
work.Names[0] = testFileName6
|
||||
work.Data[0] = testFileData6
|
||||
|
||||
return []data.Collection{inbox, work}
|
||||
return []data.BackupCollection{inbox, work}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1876,7 +1894,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
},
|
||||
{
|
||||
name: "NoMoveParentDeleteFileNoMergeSubtreeMerge",
|
||||
inputCollections: func(t *testing.T) []data.Collection {
|
||||
inputCollections: func(t *testing.T) []data.BackupCollection {
|
||||
inbox := mockconnector.NewMockExchangeCollection(inboxPath, 1)
|
||||
inbox.PrevPath = inboxPath
|
||||
inbox.ColState = data.NotMovedState
|
||||
@ -1892,7 +1910,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
work.Names[0] = testFileName6
|
||||
work.Data[0] = testFileData6
|
||||
|
||||
return []data.Collection{inbox, work}
|
||||
return []data.BackupCollection{inbox, work}
|
||||
},
|
||||
expected: expectedTreeWithChildren(
|
||||
[]string{
|
||||
@ -1951,7 +1969,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
msw := &mockSnapshotWalker{
|
||||
snapshotRoot: getBaseSnapshot(),
|
||||
}
|
||||
@ -2097,7 +2118,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
||||
},
|
||||
)
|
||||
|
||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
mc := mockconnector.NewMockExchangeCollection(suite.testPath, 1)
|
||||
mc.PrevPath = mc.FullPath()
|
||||
mc.ColState = data.DeletedState
|
||||
@ -2105,7 +2129,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
||||
snapshotRoot: getBaseSnapshot(),
|
||||
}
|
||||
|
||||
collections := []data.Collection{mc}
|
||||
collections := []data.BackupCollection{mc}
|
||||
|
||||
// Returned directory structure should look like:
|
||||
// - a-tenant
|
||||
@ -2346,7 +2370,10 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
||||
},
|
||||
)
|
||||
|
||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||
progress := &corsoProgress{
|
||||
pending: map[string]*itemDetails{},
|
||||
errs: fault.New(true),
|
||||
}
|
||||
|
||||
mc := mockconnector.NewMockExchangeCollection(inboxPath, 1)
|
||||
mc.PrevPath = mc.FullPath()
|
||||
@ -2361,7 +2388,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
||||
},
|
||||
}
|
||||
|
||||
collections := []data.Collection{mc}
|
||||
collections := []data.BackupCollection{mc}
|
||||
|
||||
dirTree, err := inflateDirTree(
|
||||
ctx,
|
||||
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/fs"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
@ -17,6 +17,7 @@ import (
|
||||
D "github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/stats"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
@ -101,7 +102,11 @@ func (w *Wrapper) Close(ctx context.Context) error {
|
||||
err := w.c.Close(ctx)
|
||||
w.c = nil
|
||||
|
||||
return errors.Wrap(err, "closing Wrapper")
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "closing Wrapper").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type IncrementalBase struct {
|
||||
@ -118,13 +123,14 @@ type IncrementalBase struct {
|
||||
func (w Wrapper) BackupCollections(
|
||||
ctx context.Context,
|
||||
previousSnapshots []IncrementalBase,
|
||||
collections []data.Collection,
|
||||
collections []data.BackupCollection,
|
||||
globalExcludeSet map[string]struct{},
|
||||
tags map[string]string,
|
||||
buildTreeWithBase bool,
|
||||
errs *fault.Errors,
|
||||
) (*BackupStats, *details.Builder, map[string]path.Path, error) {
|
||||
if w.c == nil {
|
||||
return nil, nil, nil, errNotConnected
|
||||
return nil, nil, nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||
}
|
||||
|
||||
ctx, end := D.Span(ctx, "kopia:backupCollections")
|
||||
@ -138,6 +144,7 @@ func (w Wrapper) BackupCollections(
|
||||
pending: map[string]*itemDetails{},
|
||||
deets: &details.Builder{},
|
||||
toMerge: map[string]path.Path{},
|
||||
errs: errs,
|
||||
}
|
||||
|
||||
// When running an incremental backup, we need to pass the prior
|
||||
@ -165,14 +172,12 @@ func (w Wrapper) BackupCollections(
|
||||
previousSnapshots,
|
||||
dirTree,
|
||||
tags,
|
||||
progress,
|
||||
)
|
||||
progress)
|
||||
if err != nil {
|
||||
combinedErrs := multierror.Append(nil, err, progress.errs)
|
||||
return nil, nil, nil, combinedErrs.ErrorOrNil()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return s, progress.deets, progress.toMerge, progress.errs.ErrorOrNil()
|
||||
return s, progress.deets, progress.toMerge, progress.errs.Err()
|
||||
}
|
||||
|
||||
func (w Wrapper) makeSnapshotWithRoot(
|
||||
@ -197,9 +202,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
"using snapshots for kopia-assisted incrementals",
|
||||
"snapshot_ids",
|
||||
snapIDs,
|
||||
)
|
||||
"snapshot_ids", snapIDs)
|
||||
|
||||
tags := map[string]string{}
|
||||
|
||||
@ -224,6 +227,8 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
OnUpload: bc.Count,
|
||||
},
|
||||
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||
log := logger.Ctx(innerCtx)
|
||||
|
||||
si := snapshot.SourceInfo{
|
||||
Host: corsoHost,
|
||||
UserName: corsoUser,
|
||||
@ -240,8 +245,8 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
}
|
||||
policyTree, err := policy.TreeForSourceWithOverride(innerCtx, w.c, si, errPolicy)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "get policy tree")
|
||||
logger.Ctx(innerCtx).Errorw("kopia backup", err)
|
||||
err = clues.Wrap(err, "get policy tree").WithClues(ctx)
|
||||
log.With("err", err).Errorw("building kopia backup", clues.InErr(err).Slice()...)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -253,16 +258,16 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
|
||||
man, err = u.Upload(innerCtx, root, policyTree, si, prevSnaps...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "uploading data")
|
||||
logger.Ctx(innerCtx).Errorw("kopia backup", err)
|
||||
err = clues.Wrap(err, "uploading data").WithClues(ctx)
|
||||
log.With("err", err).Errorw("uploading kopia backup", clues.InErr(err).Slice()...)
|
||||
return err
|
||||
}
|
||||
|
||||
man.Tags = tags
|
||||
|
||||
if _, err := snapshot.SaveSnapshot(innerCtx, rw, man); err != nil {
|
||||
err = errors.Wrap(err, "saving snapshot")
|
||||
logger.Ctx(innerCtx).Errorw("kopia backup", err)
|
||||
err = clues.Wrap(err, "saving snapshot").WithClues(ctx)
|
||||
log.With("err", err).Errorw("persisting kopia backup snapshot", clues.InErr(err).Slice()...)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -272,7 +277,7 @@ func (w Wrapper) makeSnapshotWithRoot(
|
||||
// Telling kopia to always flush may hide other errors if it fails while
|
||||
// flushing the write session (hence logging above).
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "kopia backup")
|
||||
return nil, clues.Wrap(err, "kopia backup")
|
||||
}
|
||||
|
||||
res := manifestToStats(man, progress, bc)
|
||||
@ -286,12 +291,15 @@ func (w Wrapper) getSnapshotRoot(
|
||||
) (fs.Entry, error) {
|
||||
man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting snapshot handle")
|
||||
return nil, clues.Wrap(err, "getting snapshot handle").WithClues(ctx)
|
||||
}
|
||||
|
||||
rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting root directory").WithClues(ctx)
|
||||
}
|
||||
|
||||
return rootDirEntry, errors.Wrap(err, "getting root directory")
|
||||
return rootDirEntry, nil
|
||||
}
|
||||
|
||||
// getItemStream looks up the item at the given path starting from snapshotRoot.
|
||||
@ -306,7 +314,7 @@ func getItemStream(
|
||||
bcounter ByteCounter,
|
||||
) (data.Stream, error) {
|
||||
if itemPath == nil {
|
||||
return nil, errors.WithStack(errNoRestorePath)
|
||||
return nil, clues.Stack(errNoRestorePath).WithClues(ctx)
|
||||
}
|
||||
|
||||
// GetNestedEntry handles nil properly.
|
||||
@ -317,15 +325,15 @@ func getItemStream(
|
||||
)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "entry not found") {
|
||||
err = errors.Wrap(ErrNotFound, err.Error())
|
||||
err = clues.Stack(data.ErrNotFound, err).WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "getting nested object handle")
|
||||
return nil, clues.Wrap(err, "getting nested object handle").WithClues(ctx)
|
||||
}
|
||||
|
||||
f, ok := e.(fs.File)
|
||||
if !ok {
|
||||
return nil, errors.New("requested object is not a file")
|
||||
return nil, clues.New("requested object is not a file").WithClues(ctx)
|
||||
}
|
||||
|
||||
if bcounter != nil {
|
||||
@ -334,12 +342,12 @@ func getItemStream(
|
||||
|
||||
r, err := f.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "opening file")
|
||||
return nil, clues.Wrap(err, "opening file").WithClues(ctx)
|
||||
}
|
||||
|
||||
decodedName, err := decodeElement(f.Name())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decoding file name")
|
||||
return nil, clues.Wrap(err, "decoding file name").WithClues(ctx)
|
||||
}
|
||||
|
||||
return &kopiaDataStream{
|
||||
@ -368,12 +376,13 @@ func (w Wrapper) RestoreMultipleItems(
|
||||
snapshotID string,
|
||||
paths []path.Path,
|
||||
bcounter ByteCounter,
|
||||
) ([]data.Collection, error) {
|
||||
errs *fault.Errors,
|
||||
) ([]data.RestoreCollection, error) {
|
||||
ctx, end := D.Span(ctx, "kopia:restoreMultipleItems")
|
||||
defer end()
|
||||
|
||||
if len(paths) == 0 {
|
||||
return nil, errors.WithStack(errNoRestorePath)
|
||||
return nil, clues.Stack(errNoRestorePath).WithClues(ctx)
|
||||
}
|
||||
|
||||
snapshotRoot, err := w.getSnapshotRoot(ctx, snapshotID)
|
||||
@ -381,40 +390,47 @@ func (w Wrapper) RestoreMultipleItems(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
errs *multierror.Error
|
||||
// Maps short ID of parent path to data collection for that folder.
|
||||
cols = map[string]*kopiaDataCollection{}
|
||||
)
|
||||
// Maps short ID of parent path to data collection for that folder.
|
||||
cols := map[string]*kopiaDataCollection{}
|
||||
|
||||
for _, itemPath := range paths {
|
||||
if errs.Err() != nil {
|
||||
return nil, errs.Err()
|
||||
}
|
||||
|
||||
ds, err := getItemStream(ctx, itemPath, snapshotRoot, bcounter)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs.Add(err)
|
||||
continue
|
||||
}
|
||||
|
||||
parentPath, err := itemPath.Dir()
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, errors.Wrap(err, "making directory collection"))
|
||||
errs.Add(clues.Wrap(err, "making directory collection").WithClues(ctx))
|
||||
continue
|
||||
}
|
||||
|
||||
c, ok := cols[parentPath.ShortRef()]
|
||||
if !ok {
|
||||
cols[parentPath.ShortRef()] = &kopiaDataCollection{path: parentPath}
|
||||
cols[parentPath.ShortRef()] = &kopiaDataCollection{
|
||||
path: parentPath,
|
||||
snapshotRoot: snapshotRoot,
|
||||
counter: bcounter,
|
||||
}
|
||||
c = cols[parentPath.ShortRef()]
|
||||
}
|
||||
|
||||
c.streams = append(c.streams, ds)
|
||||
}
|
||||
|
||||
res := make([]data.Collection, 0, len(cols))
|
||||
// Can't use the maps package to extract the values because we need to convert
|
||||
// from *kopiaDataCollection to data.RestoreCollection too.
|
||||
res := make([]data.RestoreCollection, 0, len(cols))
|
||||
for _, c := range cols {
|
||||
res = append(res, c)
|
||||
}
|
||||
|
||||
return res, errs.ErrorOrNil()
|
||||
return res, errs.Err()
|
||||
}
|
||||
|
||||
// DeleteSnapshot removes the provided manifest from kopia.
|
||||
@ -425,7 +441,7 @@ func (w Wrapper) DeleteSnapshot(
|
||||
mid := manifest.ID(snapshotID)
|
||||
|
||||
if len(mid) == 0 {
|
||||
return errors.New("attempt to delete unidentified snapshot")
|
||||
return clues.New("attempt to delete unidentified snapshot").WithClues(ctx)
|
||||
}
|
||||
|
||||
err := repo.WriteSession(
|
||||
@ -434,7 +450,7 @@ func (w Wrapper) DeleteSnapshot(
|
||||
repo.WriteSessionOptions{Purpose: "KopiaWrapperBackupDeletion"},
|
||||
func(innerCtx context.Context, rw repo.RepositoryWriter) error {
|
||||
if err := rw.DeleteManifest(ctx, mid); err != nil {
|
||||
return errors.Wrap(err, "deleting snapshot")
|
||||
return clues.Wrap(err, "deleting snapshot").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -443,7 +459,7 @@ func (w Wrapper) DeleteSnapshot(
|
||||
// Telling kopia to always flush may hide other errors if it fails while
|
||||
// flushing the write session (hence logging above).
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "kopia deleting backup manifest")
|
||||
return clues.Wrap(err, "deleting backup manifest").WithClues(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -464,7 +480,7 @@ func (w Wrapper) FetchPrevSnapshotManifests(
|
||||
tags map[string]string,
|
||||
) ([]*ManifestEntry, error) {
|
||||
if w.c == nil {
|
||||
return nil, errors.WithStack(errNotConnected)
|
||||
return nil, clues.Stack(errNotConnected).WithClues(ctx)
|
||||
}
|
||||
|
||||
return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
@ -52,7 +53,7 @@ var (
|
||||
func testForFiles(
|
||||
t *testing.T,
|
||||
expected map[string][]byte,
|
||||
collections []data.Collection,
|
||||
collections []data.RestoreCollection,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
@ -196,7 +197,7 @@ func (suite *KopiaIntegrationSuite) TearDownTest() {
|
||||
}
|
||||
|
||||
func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
||||
collections := []data.Collection{
|
||||
collections := []data.BackupCollection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
suite.testPath1,
|
||||
5,
|
||||
@ -269,7 +270,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
|
||||
nil,
|
||||
tags,
|
||||
true,
|
||||
)
|
||||
fault.New(true))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, test.expectedUploadedFiles, stats.TotalFileCount, "total files")
|
||||
@ -353,11 +354,11 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
||||
stats, _, _, err := w.BackupCollections(
|
||||
ctx,
|
||||
nil,
|
||||
[]data.Collection{dc1, dc2},
|
||||
[]data.BackupCollection{dc1, dc2},
|
||||
nil,
|
||||
tags,
|
||||
true,
|
||||
)
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, k.Compression(ctx, "gzip"))
|
||||
@ -374,14 +375,49 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
||||
fp1,
|
||||
fp2,
|
||||
},
|
||||
nil)
|
||||
|
||||
nil,
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(result))
|
||||
|
||||
testForFiles(t, expected, result)
|
||||
}
|
||||
|
||||
type mockBackupCollection struct {
|
||||
path path.Path
|
||||
streams []data.Stream
|
||||
}
|
||||
|
||||
func (c *mockBackupCollection) Items() <-chan data.Stream {
|
||||
res := make(chan data.Stream)
|
||||
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for _, s := range c.streams {
|
||||
res <- s
|
||||
}
|
||||
}()
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (c mockBackupCollection) FullPath() path.Path {
|
||||
return c.path
|
||||
}
|
||||
|
||||
func (c mockBackupCollection) PreviousPath() path.Path {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c mockBackupCollection) State() data.CollectionState {
|
||||
return data.NewState
|
||||
}
|
||||
|
||||
func (c mockBackupCollection) DoNotMergeItems() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
t := suite.T()
|
||||
|
||||
@ -396,8 +432,8 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
tags[k] = ""
|
||||
}
|
||||
|
||||
collections := []data.Collection{
|
||||
&kopiaDataCollection{
|
||||
collections := []data.BackupCollection{
|
||||
&mockBackupCollection{
|
||||
path: suite.testPath1,
|
||||
streams: []data.Stream{
|
||||
&mockconnector.MockExchangeData{
|
||||
@ -410,7 +446,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
},
|
||||
},
|
||||
},
|
||||
&kopiaDataCollection{
|
||||
&mockBackupCollection{
|
||||
path: suite.testPath2,
|
||||
streams: []data.Stream{
|
||||
&mockconnector.MockExchangeData{
|
||||
@ -440,7 +476,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
nil,
|
||||
tags,
|
||||
true,
|
||||
)
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 0, stats.ErrorCount)
|
||||
@ -461,11 +497,11 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
string(stats.SnapshotID),
|
||||
[]path.Path{failedPath},
|
||||
&ic,
|
||||
)
|
||||
fault.New(true))
|
||||
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
||||
// may run into kopia-assisted incrementals issues because only mod time and
|
||||
// not file size is checked for StreamingFiles.
|
||||
assert.ErrorIs(t, err, ErrNotFound, "errored file is restorable")
|
||||
assert.ErrorIs(t, err, data.ErrNotFound, "errored file is restorable")
|
||||
}
|
||||
|
||||
type backedupFile struct {
|
||||
@ -477,7 +513,7 @@ type backedupFile struct {
|
||||
func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() {
|
||||
table := []struct {
|
||||
name string
|
||||
collections []data.Collection
|
||||
collections []data.BackupCollection
|
||||
}{
|
||||
{
|
||||
name: "NilCollections",
|
||||
@ -485,7 +521,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
|
||||
},
|
||||
{
|
||||
name: "EmptyCollections",
|
||||
collections: []data.Collection{},
|
||||
collections: []data.BackupCollection{},
|
||||
},
|
||||
}
|
||||
|
||||
@ -501,7 +537,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
|
||||
nil,
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, BackupStats{}, *s)
|
||||
@ -624,10 +660,10 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
|
||||
suite.w = &Wrapper{c}
|
||||
|
||||
collections := []data.Collection{}
|
||||
collections := []data.BackupCollection{}
|
||||
|
||||
for _, parent := range []path.Path{suite.testPath1, suite.testPath2} {
|
||||
collection := &kopiaDataCollection{path: parent}
|
||||
collection := &mockBackupCollection{path: parent}
|
||||
|
||||
for _, item := range suite.files[parent.String()] {
|
||||
collection.streams = append(
|
||||
@ -660,7 +696,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
nil,
|
||||
tags,
|
||||
false,
|
||||
)
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stats.ErrorCount, 0)
|
||||
require.Equal(t, stats.TotalFileCount, expectedFiles)
|
||||
@ -723,7 +759,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
excludeItem bool
|
||||
expectedCachedItems int
|
||||
expectedUncachedItems int
|
||||
cols func() []data.Collection
|
||||
cols func() []data.BackupCollection
|
||||
backupIDCheck require.ValueAssertionFunc
|
||||
restoreCheck assert.ErrorAssertionFunc
|
||||
}{
|
||||
@ -732,7 +768,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
excludeItem: true,
|
||||
expectedCachedItems: len(suite.filesByPath) - 1,
|
||||
expectedUncachedItems: 0,
|
||||
cols: func() []data.Collection {
|
||||
cols: func() []data.BackupCollection {
|
||||
return nil
|
||||
},
|
||||
backupIDCheck: require.NotEmpty,
|
||||
@ -743,7 +779,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
// No snapshot should be made since there were no changes.
|
||||
expectedCachedItems: 0,
|
||||
expectedUncachedItems: 0,
|
||||
cols: func() []data.Collection {
|
||||
cols: func() []data.BackupCollection {
|
||||
return nil
|
||||
},
|
||||
// Backup doesn't run.
|
||||
@ -753,14 +789,14 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
name: "NoExcludeItemWithChanges",
|
||||
expectedCachedItems: len(suite.filesByPath),
|
||||
expectedUncachedItems: 1,
|
||||
cols: func() []data.Collection {
|
||||
cols: func() []data.BackupCollection {
|
||||
c := mockconnector.NewMockExchangeCollection(
|
||||
suite.testPath1,
|
||||
1,
|
||||
)
|
||||
c.ColState = data.NotMovedState
|
||||
|
||||
return []data.Collection{c}
|
||||
return []data.BackupCollection{c}
|
||||
},
|
||||
backupIDCheck: require.NotEmpty,
|
||||
restoreCheck: assert.NoError,
|
||||
@ -790,7 +826,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
excluded,
|
||||
tags,
|
||||
true,
|
||||
)
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectedCachedItems, stats.CachedFileCount)
|
||||
assert.Equal(t, test.expectedUncachedItems, stats.UncachedFileCount)
|
||||
@ -810,7 +846,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
suite.files[suite.testPath1.String()][0].itemPath,
|
||||
},
|
||||
&ic,
|
||||
)
|
||||
fault.New(true))
|
||||
test.restoreCheck(t, err)
|
||||
})
|
||||
}
|
||||
@ -867,7 +903,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
||||
suite.testPath1,
|
||||
suite.files[suite.testPath2.String()][0].itemPath,
|
||||
},
|
||||
expectedCollections: 2,
|
||||
expectedCollections: 0,
|
||||
expectedErr: assert.Error,
|
||||
},
|
||||
{
|
||||
@ -877,7 +913,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
||||
doesntExist,
|
||||
suite.files[suite.testPath2.String()][0].itemPath,
|
||||
},
|
||||
expectedCollections: 2,
|
||||
expectedCollections: 0,
|
||||
expectedErr: assert.Error,
|
||||
},
|
||||
}
|
||||
@ -904,9 +940,14 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems() {
|
||||
suite.ctx,
|
||||
string(suite.snapshotID),
|
||||
test.inputPaths,
|
||||
&ic)
|
||||
&ic,
|
||||
fault.New(true))
|
||||
test.expectedErr(t, err)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Len(t, result, test.expectedCollections)
|
||||
assert.Less(t, int64(0), ic.i)
|
||||
testForFiles(t, expected, result)
|
||||
@ -946,7 +987,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestRestoreMultipleItems_Errors()
|
||||
suite.ctx,
|
||||
test.snapshotID,
|
||||
test.paths,
|
||||
nil)
|
||||
nil,
|
||||
fault.New(true))
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, c)
|
||||
})
|
||||
@ -966,7 +1008,8 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestDeleteSnapshot() {
|
||||
suite.ctx,
|
||||
string(suite.snapshotID),
|
||||
[]path.Path{itemPath},
|
||||
&ic)
|
||||
&ic,
|
||||
fault.New(true))
|
||||
assert.Error(t, err, "snapshot should be deleted")
|
||||
assert.Empty(t, c)
|
||||
assert.Zero(t, ic.i)
|
||||
|
||||
@ -26,6 +26,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -100,7 +101,7 @@ type backupStats struct {
|
||||
}
|
||||
|
||||
type detailsWriter interface {
|
||||
WriteBackupDetails(context.Context, *details.Details) (string, error)
|
||||
WriteBackupDetails(context.Context, *details.Details, *fault.Errors) (string, error)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -238,12 +239,12 @@ func (op *BackupOperation) do(
|
||||
return nil, errors.Wrap(err, "producing manifests and metadata")
|
||||
}
|
||||
|
||||
gc, err := connectToM365(ctx, op.Selectors, op.account)
|
||||
gc, err := connectToM365(ctx, op.Selectors, op.account, op.Errors)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "connectng to m365")
|
||||
}
|
||||
|
||||
cs, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options)
|
||||
cs, excludes, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "producing backup data collections")
|
||||
}
|
||||
@ -257,8 +258,10 @@ func (op *BackupOperation) do(
|
||||
reasons,
|
||||
mans,
|
||||
cs,
|
||||
excludes,
|
||||
backupID,
|
||||
op.incremental && canUseMetaData)
|
||||
op.incremental && canUseMetaData,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "persisting collection backups")
|
||||
}
|
||||
@ -271,7 +274,8 @@ func (op *BackupOperation) do(
|
||||
detailsStore,
|
||||
mans,
|
||||
toMerge,
|
||||
deets)
|
||||
deets,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merging details")
|
||||
}
|
||||
@ -307,9 +311,9 @@ func produceBackupDataCollections(
|
||||
ctx context.Context,
|
||||
gc *connector.GraphConnector,
|
||||
sel selectors.Selector,
|
||||
metadata []data.Collection,
|
||||
metadata []data.RestoreCollection,
|
||||
ctrlOpts control.Options,
|
||||
) ([]data.Collection, error) {
|
||||
) ([]data.BackupCollection, map[string]struct{}, error) {
|
||||
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Discovering items to backup"))
|
||||
defer func() {
|
||||
complete <- struct{}{}
|
||||
@ -317,11 +321,9 @@ func produceBackupDataCollections(
|
||||
closer()
|
||||
}()
|
||||
|
||||
// TODO(ashmrtn): When we're ready to wire up the global exclude list return
|
||||
// all values.
|
||||
cols, _, errs := gc.DataCollections(ctx, sel, metadata, ctrlOpts)
|
||||
cols, excludes, errs := gc.DataCollections(ctx, sel, metadata, ctrlOpts)
|
||||
|
||||
return cols, errs
|
||||
return cols, excludes, errs
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -332,10 +334,11 @@ type backuper interface {
|
||||
BackupCollections(
|
||||
ctx context.Context,
|
||||
bases []kopia.IncrementalBase,
|
||||
cs []data.Collection,
|
||||
cs []data.BackupCollection,
|
||||
excluded map[string]struct{},
|
||||
tags map[string]string,
|
||||
buildTreeWithBase bool,
|
||||
errs *fault.Errors,
|
||||
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error)
|
||||
}
|
||||
|
||||
@ -390,9 +393,11 @@ func consumeBackupDataCollections(
|
||||
tenantID string,
|
||||
reasons []kopia.Reason,
|
||||
mans []*kopia.ManifestEntry,
|
||||
cs []data.Collection,
|
||||
cs []data.BackupCollection,
|
||||
excludes map[string]struct{},
|
||||
backupID model.StableID,
|
||||
isIncremental bool,
|
||||
errs *fault.Errors,
|
||||
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
|
||||
complete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Backing up data"))
|
||||
defer func() {
|
||||
@ -456,9 +461,12 @@ func consumeBackupDataCollections(
|
||||
ctx,
|
||||
bases,
|
||||
cs,
|
||||
// TODO(ashmrtn): When we're ready to enable incremental backups for
|
||||
// OneDrive replace this with `excludes`.
|
||||
nil,
|
||||
tags,
|
||||
isIncremental)
|
||||
isIncremental,
|
||||
errs)
|
||||
if err != nil {
|
||||
if kopiaStats == nil {
|
||||
return nil, nil, nil, err
|
||||
@ -498,6 +506,7 @@ func mergeDetails(
|
||||
mans []*kopia.ManifestEntry,
|
||||
shortRefsFromPrevBackup map[string]path.Path,
|
||||
deets *details.Builder,
|
||||
errs *fault.Errors,
|
||||
) error {
|
||||
// Don't bother loading any of the base details if there's nothing we need to
|
||||
// merge.
|
||||
@ -527,7 +536,8 @@ func mergeDetails(
|
||||
ctx,
|
||||
model.StableID(bID),
|
||||
ms,
|
||||
detailsStore)
|
||||
detailsStore,
|
||||
errs)
|
||||
if err != nil {
|
||||
return clues.New("fetching base details for backup").WithClues(mctx)
|
||||
}
|
||||
@ -648,7 +658,7 @@ func (op *BackupOperation) createBackupModels(
|
||||
return clues.New("no backup details to record").WithClues(ctx)
|
||||
}
|
||||
|
||||
detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails)
|
||||
detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails, op.Errors)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "creating backupDetails model").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -31,6 +31,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
@ -250,7 +251,7 @@ func checkMetadataFilesExist(
|
||||
pathsByRef[dir.ShortRef()] = append(pathsByRef[dir.ShortRef()], fName)
|
||||
}
|
||||
|
||||
cols, err := kw.RestoreMultipleItems(ctx, bup.SnapshotID, paths, nil)
|
||||
cols, err := kw.RestoreMultipleItems(ctx, bup.SnapshotID, paths, nil, fault.New(true))
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, col := range cols {
|
||||
@ -346,8 +347,7 @@ func generateContainerOfItems(
|
||||
sel,
|
||||
dest,
|
||||
control.Options{RestorePermissions: true},
|
||||
dataColls,
|
||||
)
|
||||
dataColls)
|
||||
require.NoError(t, err)
|
||||
|
||||
return deets
|
||||
@ -387,10 +387,10 @@ func buildCollections(
|
||||
tenant, user string,
|
||||
dest control.RestoreDestination,
|
||||
colls []incrementalCollection,
|
||||
) []data.Collection {
|
||||
) []data.RestoreCollection {
|
||||
t.Helper()
|
||||
|
||||
collections := make([]data.Collection, 0, len(colls))
|
||||
collections := make([]data.RestoreCollection, 0, len(colls))
|
||||
|
||||
for _, c := range colls {
|
||||
pth := toDataLayerPath(
|
||||
@ -409,7 +409,7 @@ func buildCollections(
|
||||
mc.Data[i] = c.items[i].data
|
||||
}
|
||||
|
||||
collections = append(collections, mc)
|
||||
collections = append(collections, data.NotFoundRestoreCollection{Collection: mc})
|
||||
}
|
||||
|
||||
return collections
|
||||
@ -669,7 +669,12 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
||||
m365, err := acct.M365Config()
|
||||
require.NoError(t, err)
|
||||
|
||||
gc, err := connector.NewGraphConnector(ctx, graph.HTTPClient(graph.NoTimeout()), acct, connector.Users)
|
||||
gc, err := connector.NewGraphConnector(
|
||||
ctx,
|
||||
graph.HTTPClient(graph.NoTimeout()),
|
||||
acct,
|
||||
connector.Users,
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
ac, err := api.NewClient(m365)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user