Merge branch 'updateKopiaPassword' of https://github.com/alcionai/corso into updateKopiaPassword
This commit is contained in:
commit
1ee148554d
@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
|
||||
- Increase Exchange backup performance by lazily fetching data only for items whose content changed.
|
||||
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
|
||||
- Backup now includes all sites that belongs to a team, not just the root site.
|
||||
|
||||
## Fixed
|
||||
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.
|
||||
|
||||
@ -16,6 +16,8 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
@ -163,7 +165,7 @@ func handleDeleteCmd(cmd *cobra.Command, args []string) error {
|
||||
// standard set of selector behavior that we want used in the cli
|
||||
var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true}
|
||||
|
||||
func runBackups(
|
||||
func genericCreateCommand(
|
||||
ctx context.Context,
|
||||
r repository.Repositoryer,
|
||||
serviceName string,
|
||||
@ -332,6 +334,65 @@ func genericListCommand(
|
||||
return nil
|
||||
}
|
||||
|
||||
func genericDetailsCommand(
|
||||
cmd *cobra.Command,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
) (*details.Details, error) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
return genericDetailsCore(
|
||||
ctx,
|
||||
r,
|
||||
backupID,
|
||||
sel,
|
||||
rdao.Opts)
|
||||
}
|
||||
|
||||
func genericDetailsCore(
|
||||
ctx context.Context,
|
||||
bg repository.BackupGetter,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
opts control.Options,
|
||||
) (*details.Details, error) {
|
||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
||||
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
|
||||
d, _, errs := bg.GetBackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Failure() != nil {
|
||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||
return nil, clues.New("no backup exists with the id " + backupID)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
if opts.SkipReduce {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
d, err := sel.Reduce(ctx, d, errs)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "filtering backup details to selection")
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper funcs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func ifShow(flag string) bool {
|
||||
return strings.ToLower(strings.TrimSpace(flag)) == "show"
|
||||
}
|
||||
|
||||
68
src/cli/backup/backup_test.go
Normal file
68
src/cli/backup/backup_test.go
Normal file
@ -0,0 +1,68 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
type BackupUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestBackupUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &BackupUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *BackupUnitSuite) TestGenericDetailsCore() {
|
||||
t := suite.T()
|
||||
|
||||
expected := append(
|
||||
append(
|
||||
dtd.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
0,
|
||||
-1),
|
||||
dtd.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.EventsCategory,
|
||||
0,
|
||||
-1)...),
|
||||
dtd.GetItemsForVersion(
|
||||
t,
|
||||
path.ExchangeService,
|
||||
path.ContactsCategory,
|
||||
0,
|
||||
-1)...)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
bg := testdata.VersionedBackupGetter{
|
||||
Details: dtd.GetDetailsSetForVersion(t, 0),
|
||||
}
|
||||
|
||||
sel := selectors.NewExchangeBackup([]string{"user-id"})
|
||||
sel.Include(sel.AllData())
|
||||
|
||||
output, err := genericDetailsCore(
|
||||
ctx,
|
||||
bg,
|
||||
"backup-ID",
|
||||
sel.Selector,
|
||||
control.DefaultOptions())
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, expected, output.Entries)
|
||||
}
|
||||
@ -1,21 +1,15 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
@ -182,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
selectorSet = append(selectorSet, discSel.Selector)
|
||||
}
|
||||
|
||||
return runBackups(
|
||||
return genericCreateCommand(
|
||||
ctx,
|
||||
r,
|
||||
"Exchange",
|
||||
@ -272,74 +266,31 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return runDetailsExchangeCmd(cmd)
|
||||
}
|
||||
|
||||
func runDetailsExchangeCmd(cmd *cobra.Command) error {
|
||||
ctx := cmd.Context()
|
||||
opts := utils.MakeExchangeOpts(cmd)
|
||||
|
||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService)
|
||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||
|
||||
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
ds, err := runDetailsExchangeCmd(
|
||||
ctx,
|
||||
r,
|
||||
flags.BackupIDFV,
|
||||
opts,
|
||||
rdao.Opts.SkipReduce)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
if len(ds.Entries) == 0 {
|
||||
if len(ds.Entries) > 0 {
|
||||
ds.PrintEntries(ctx)
|
||||
} else {
|
||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||
return nil
|
||||
}
|
||||
|
||||
ds.PrintEntries(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDetailsExchangeCmd actually performs the lookup in backup details.
|
||||
// the fault.Errors return is always non-nil. Callers should check if
|
||||
// errs.Failure() == nil.
|
||||
func runDetailsExchangeCmd(
|
||||
ctx context.Context,
|
||||
r repository.BackupGetter,
|
||||
backupID string,
|
||||
opts utils.ExchangeOpts,
|
||||
skipReduce bool,
|
||||
) (*details.Details, error) {
|
||||
if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
||||
|
||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Failure() != nil {
|
||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||
return nil, clues.New("No backup exists with the id " + backupID)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
||||
|
||||
if !skipReduce {
|
||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||
d = sel.Reduce(ctx, d, errs)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// backup delete
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -55,7 +55,7 @@ func (suite *NoBackupExchangeE2ESuite) SetupSuite() {
|
||||
defer flush()
|
||||
|
||||
suite.its = newIntegrationTesterSetup(t)
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||
}
|
||||
|
||||
func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() {
|
||||
@ -109,7 +109,7 @@ func (suite *BackupExchangeE2ESuite) SetupSuite() {
|
||||
defer flush()
|
||||
|
||||
suite.its = newIntegrationTesterSetup(t)
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||
}
|
||||
|
||||
func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() {
|
||||
@ -336,7 +336,7 @@ func (suite *PreparedBackupExchangeE2ESuite) SetupSuite() {
|
||||
defer flush()
|
||||
|
||||
suite.its = newIntegrationTesterSetup(t)
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||
suite.backupOps = make(map[path.CategoryType]string)
|
||||
|
||||
var (
|
||||
@ -579,7 +579,7 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||
|
||||
m365UserID := tconfig.M365UserID(t)
|
||||
users := []string{m365UserID}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
@ -15,10 +14,7 @@ import (
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
)
|
||||
|
||||
@ -368,51 +364,3 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupCreateSelectors() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectors() {
|
||||
for v := 0; v <= version.Backup; v++ {
|
||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
||||
for _, test := range utilsTD.ExchangeOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
bg := utilsTD.VersionedBackupGetter{
|
||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
||||
}
|
||||
|
||||
output, err := runDetailsExchangeCmd(
|
||||
ctx,
|
||||
bg,
|
||||
"backup-ID",
|
||||
test.Opts(t, v),
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
||||
for _, test := range utilsTD.BadExchangeOptionsFormats {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
output, err := runDetailsExchangeCmd(
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts(t, version.Backup),
|
||||
false)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,7 +2,6 @@ package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -14,12 +13,9 @@ import (
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365"
|
||||
)
|
||||
@ -174,7 +170,7 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error {
|
||||
selectorSet = append(selectorSet, discSel.Selector)
|
||||
}
|
||||
|
||||
return runBackups(
|
||||
return genericCreateCommand(
|
||||
ctx,
|
||||
r,
|
||||
"Group",
|
||||
@ -225,74 +221,31 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return runDetailsGroupsCmd(cmd)
|
||||
}
|
||||
|
||||
func runDetailsGroupsCmd(cmd *cobra.Command) error {
|
||||
ctx := cmd.Context()
|
||||
opts := utils.MakeGroupsOpts(cmd)
|
||||
|
||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService)
|
||||
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
|
||||
|
||||
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
ds, err := runDetailsGroupsCmd(
|
||||
ctx,
|
||||
r,
|
||||
flags.BackupIDFV,
|
||||
opts,
|
||||
rdao.Opts.SkipReduce)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
if len(ds.Entries) == 0 {
|
||||
if len(ds.Entries) > 0 {
|
||||
ds.PrintEntries(ctx)
|
||||
} else {
|
||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||
return nil
|
||||
}
|
||||
|
||||
ds.PrintEntries(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDetailsGroupsCmd actually performs the lookup in backup details.
|
||||
// the fault.Errors return is always non-nil. Callers should check if
|
||||
// errs.Failure() == nil.
|
||||
func runDetailsGroupsCmd(
|
||||
ctx context.Context,
|
||||
r repository.BackupGetter,
|
||||
backupID string,
|
||||
opts utils.GroupsOpts,
|
||||
skipReduce bool,
|
||||
) (*details.Details, error) {
|
||||
if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
||||
|
||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Failure() != nil {
|
||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||
return nil, clues.New("no backup exists with the id " + backupID)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
||||
|
||||
if !skipReduce {
|
||||
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
|
||||
d = sel.Reduce(ctx, d, errs)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// backup delete
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -56,7 +56,7 @@ func (suite *NoBackupGroupsE2ESuite) SetupSuite() {
|
||||
defer flush()
|
||||
|
||||
suite.its = newIntegrationTesterSetup(t)
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||
}
|
||||
|
||||
func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() {
|
||||
@ -110,7 +110,7 @@ func (suite *BackupGroupsE2ESuite) SetupSuite() {
|
||||
defer flush()
|
||||
|
||||
suite.its = newIntegrationTesterSetup(t)
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||
}
|
||||
|
||||
func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() {
|
||||
@ -287,7 +287,7 @@ func (suite *PreparedBackupGroupsE2ESuite) SetupSuite() {
|
||||
defer flush()
|
||||
|
||||
suite.its = newIntegrationTesterSetup(t)
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||
suite.backupOps = make(map[path.CategoryType]string)
|
||||
|
||||
var (
|
||||
@ -515,7 +515,7 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||
|
||||
m365GroupID := tconfig.M365GroupID(t)
|
||||
groups := []string{m365GroupID}
|
||||
|
||||
@ -21,7 +21,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
@ -133,6 +133,7 @@ type dependencies struct {
|
||||
func prepM365Test(
|
||||
t *testing.T,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
pst path.ServiceType,
|
||||
) dependencies {
|
||||
var (
|
||||
acct = tconfig.NewM365Account(t)
|
||||
@ -160,7 +161,9 @@ func prepM365Test(
|
||||
repository.NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = repo.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = repo.Initialize(ctx, repository.InitConfig{
|
||||
Service: pst,
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return dependencies{
|
||||
|
||||
@ -1,21 +1,15 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
@ -162,7 +156,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
selectorSet = append(selectorSet, discSel.Selector)
|
||||
}
|
||||
|
||||
return runBackups(
|
||||
return genericCreateCommand(
|
||||
ctx,
|
||||
r,
|
||||
"OneDrive",
|
||||
@ -229,74 +223,31 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return runDetailsOneDriveCmd(cmd)
|
||||
}
|
||||
|
||||
func runDetailsOneDriveCmd(cmd *cobra.Command) error {
|
||||
ctx := cmd.Context()
|
||||
opts := utils.MakeOneDriveOpts(cmd)
|
||||
|
||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
|
||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||
|
||||
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
ds, err := runDetailsOneDriveCmd(
|
||||
ctx,
|
||||
r,
|
||||
flags.BackupIDFV,
|
||||
opts,
|
||||
rdao.Opts.SkipReduce)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
if len(ds.Entries) == 0 {
|
||||
if len(ds.Entries) > 0 {
|
||||
ds.PrintEntries(ctx)
|
||||
} else {
|
||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||
return nil
|
||||
}
|
||||
|
||||
ds.PrintEntries(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDetailsOneDriveCmd actually performs the lookup in backup details.
|
||||
// the fault.Errors return is always non-nil. Callers should check if
|
||||
// errs.Failure() == nil.
|
||||
func runDetailsOneDriveCmd(
|
||||
ctx context.Context,
|
||||
r repository.BackupGetter,
|
||||
backupID string,
|
||||
opts utils.OneDriveOpts,
|
||||
skipReduce bool,
|
||||
) (*details.Details, error) {
|
||||
if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
||||
|
||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Failure() != nil {
|
||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||
return nil, clues.New("no backup exists with the id " + backupID)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
||||
|
||||
if !skipReduce {
|
||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||
d = sel.Reduce(ctx, d, errs)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// `corso backup delete onedrive [<flag>...]`
|
||||
func oneDriveDeleteCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
@ -48,7 +49,7 @@ func (suite *NoBackupOneDriveE2ESuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
|
||||
}
|
||||
|
||||
func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() {
|
||||
@ -139,7 +140,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
|
||||
|
||||
var (
|
||||
m365UserID = tconfig.M365UserID(t)
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -14,10 +13,7 @@ import (
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
)
|
||||
|
||||
@ -227,51 +223,3 @@ func (suite *OneDriveUnitSuite) TestValidateOneDriveBackupCreateFlags() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectors() {
|
||||
for v := 0; v <= version.Backup; v++ {
|
||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
||||
for _, test := range utilsTD.OneDriveOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
bg := utilsTD.VersionedBackupGetter{
|
||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
||||
}
|
||||
|
||||
output, err := runDetailsOneDriveCmd(
|
||||
ctx,
|
||||
bg,
|
||||
"backup-ID",
|
||||
test.Opts(t, v),
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
|
||||
for _, test := range utilsTD.BadOneDriveOptionsFormats {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
output, err := runDetailsOneDriveCmd(
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts(t, version.Backup),
|
||||
false)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/exp/slices"
|
||||
@ -13,12 +12,9 @@ import (
|
||||
. "github.com/alcionai/corso/src/cli/print"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/filters"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365"
|
||||
)
|
||||
@ -179,7 +175,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
selectorSet = append(selectorSet, discSel.Selector)
|
||||
}
|
||||
|
||||
return runBackups(
|
||||
return genericCreateCommand(
|
||||
ctx,
|
||||
r,
|
||||
"SharePoint",
|
||||
@ -303,7 +299,7 @@ func deleteSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
// backup details
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
// `corso backup details onedrive [<flag>...]`
|
||||
// `corso backup details SharePoint [<flag>...]`
|
||||
func sharePointDetailsCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: sharePointServiceCommand,
|
||||
@ -324,70 +320,27 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return runDetailsSharePointCmd(cmd)
|
||||
}
|
||||
|
||||
func runDetailsSharePointCmd(cmd *cobra.Command) error {
|
||||
ctx := cmd.Context()
|
||||
opts := utils.MakeSharePointOpts(cmd)
|
||||
|
||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService)
|
||||
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||
|
||||
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
ds, err := runDetailsSharePointCmd(
|
||||
ctx,
|
||||
r,
|
||||
flags.BackupIDFV,
|
||||
opts,
|
||||
rdao.Opts.SkipReduce)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
if len(ds.Entries) == 0 {
|
||||
if len(ds.Entries) > 0 {
|
||||
ds.PrintEntries(ctx)
|
||||
} else {
|
||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||
return nil
|
||||
}
|
||||
|
||||
ds.PrintEntries(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDetailsSharePointCmd actually performs the lookup in backup details.
|
||||
// the fault.Errors return is always non-nil. Callers should check if
|
||||
// errs.Failure() == nil.
|
||||
func runDetailsSharePointCmd(
|
||||
ctx context.Context,
|
||||
r repository.BackupGetter,
|
||||
backupID string,
|
||||
opts utils.SharePointOpts,
|
||||
skipReduce bool,
|
||||
) (*details.Details, error) {
|
||||
if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
||||
|
||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
||||
// TODO: log/track recoverable errors
|
||||
if errs.Failure() != nil {
|
||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||
return nil, clues.New("no backup exists with the id " + backupID)
|
||||
}
|
||||
|
||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
||||
|
||||
if !skipReduce {
|
||||
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
|
||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||
d = sel.Reduce(ctx, d, errs)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
@ -46,7 +47,7 @@ func (suite *NoBackupSharePointE2ESuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
|
||||
}
|
||||
|
||||
func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() {
|
||||
@ -103,7 +104,7 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.dpnd = prepM365Test(t, ctx)
|
||||
suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
|
||||
|
||||
var (
|
||||
m365SiteID = tconfig.M365SiteID(t)
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@ -15,11 +14,8 @@ import (
|
||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||
"github.com/alcionai/corso/src/cli/utils"
|
||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
@ -339,51 +335,3 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectors() {
|
||||
for v := 0; v <= version.Backup; v++ {
|
||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
||||
for _, test := range utilsTD.SharePointOptionDetailLookups {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
bg := utilsTD.VersionedBackupGetter{
|
||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
||||
}
|
||||
|
||||
output, err := runDetailsSharePointCmd(
|
||||
ctx,
|
||||
bg,
|
||||
"backup-ID",
|
||||
test.Opts(t, v),
|
||||
false)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectorsBadFormats() {
|
||||
for _, test := range utilsTD.BadSharePointOptionsFormats {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
output, err := runDetailsSharePointCmd(
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts(t, version.Backup),
|
||||
false)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
3
src/cli/flags/testdata/backup_list.go
vendored
3
src/cli/flags/testdata/backup_list.go
vendored
@ -3,9 +3,10 @@ package testdata
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
"github.com/spf13/cobra"
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
"github.com/alcionai/corso/src/cli/flags"
|
||||
)
|
||||
|
||||
func PreparedBackupListFlags() []string {
|
||||
|
||||
@ -85,7 +85,7 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
|
||||
|
||||
opt := utils.ControlWithConfig(cfg)
|
||||
// Retention is not supported for filesystem repos.
|
||||
retention := ctrlRepo.Retention{}
|
||||
retentionOpts := ctrlRepo.Retention{}
|
||||
|
||||
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
|
||||
utils.SendStartCorsoEvent(
|
||||
@ -116,7 +116,9 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
|
||||
}
|
||||
|
||||
if err = r.Initialize(ctx, retention); err != nil {
|
||||
ric := repository.InitConfig{RetentionOpts: retentionOpts}
|
||||
|
||||
if err = r.Initialize(ctx, ric); err != nil {
|
||||
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
|
||||
return nil
|
||||
}
|
||||
@ -207,7 +209,7 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
|
||||
}
|
||||
|
||||
if err := r.Connect(ctx); err != nil {
|
||||
if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
|
||||
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
||||
}
|
||||
|
||||
|
||||
@ -16,7 +16,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
@ -132,13 +131,13 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() {
|
||||
// init the repo first
|
||||
r, err := repository.New(
|
||||
ctx,
|
||||
account.Account{},
|
||||
tconfig.NewM365Account(t),
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
repository.NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, repository.InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// then test it
|
||||
|
||||
@ -143,7 +143,9 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
|
||||
}
|
||||
|
||||
if err = r.Initialize(ctx, retentionOpts); err != nil {
|
||||
ric := repository.InitConfig{RetentionOpts: retentionOpts}
|
||||
|
||||
if err = r.Initialize(ctx, ric); err != nil {
|
||||
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
|
||||
return nil
|
||||
}
|
||||
@ -226,7 +228,7 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
|
||||
}
|
||||
|
||||
if err := r.Connect(ctx); err != nil {
|
||||
if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
|
||||
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
||||
}
|
||||
|
||||
|
||||
@ -18,7 +18,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
@ -208,13 +207,13 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
|
||||
// init the repo first
|
||||
r, err := repository.New(
|
||||
ctx,
|
||||
account.Account{},
|
||||
tconfig.NewM365Account(t),
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
repository.NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, repository.InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// then test it
|
||||
|
||||
@ -20,7 +20,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/repository"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
@ -92,7 +91,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
|
||||
repository.NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = suite.repo.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = suite.repo.Initialize(ctx, repository.InitConfig{Service: path.ExchangeService})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)
|
||||
|
||||
@ -78,16 +78,10 @@ func GetAccountAndConnectWithOverrides(
|
||||
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller")
|
||||
}
|
||||
|
||||
if err := r.Connect(ctx); err != nil {
|
||||
if err := r.Connect(ctx, repository.ConnConfig{Service: pst}); err != nil {
|
||||
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository")
|
||||
}
|
||||
|
||||
// this initializes our graph api client configurations,
|
||||
// including control options such as concurency limitations.
|
||||
if _, err := r.ConnectToM365(ctx, pst); err != nil {
|
||||
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
rdao := RepoDetailsAndOpts{
|
||||
Repo: cfg,
|
||||
Opts: opts,
|
||||
|
||||
@ -72,7 +72,7 @@ func deleteBackups(
|
||||
// Only supported for S3 repos currently.
|
||||
func pitrListBackups(
|
||||
ctx context.Context,
|
||||
service path.ServiceType,
|
||||
pst path.ServiceType,
|
||||
pitr time.Time,
|
||||
backupIDs []string,
|
||||
) error {
|
||||
@ -113,14 +113,14 @@ func pitrListBackups(
|
||||
return clues.Wrap(err, "creating a repo")
|
||||
}
|
||||
|
||||
err = r.Connect(ctx)
|
||||
err = r.Connect(ctx, repository.ConnConfig{Service: pst})
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "connecting to the repository")
|
||||
}
|
||||
|
||||
defer r.Close(ctx)
|
||||
|
||||
backups, err := r.BackupsByTag(ctx, store.Service(service))
|
||||
backups, err := r.BackupsByTag(ctx, store.Service(pst))
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "listing backups").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) {
|
||||
}
|
||||
}
|
||||
else {
|
||||
Write-Host "User (for OneDrive) or Site (for Sharepoint) is required"
|
||||
Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required"
|
||||
Exit
|
||||
}
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1
|
||||
github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||
github.com/aws/aws-xray-sdk-go v1.8.2
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/google/uuid v1.3.1
|
||||
github.com/h2non/gock v1.2.0
|
||||
|
||||
@ -71,8 +71,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc=
|
||||
github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.2 h1:PVxNWnQG+rAYjxsmhEN97DTO57Dipg6VS0wsu6bXUB0=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.2/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
||||
@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap {
|
||||
|
||||
func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) {
|
||||
if pm.Empty() {
|
||||
require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys())
|
||||
require.True(t, r.Empty(), "both prefix maps are empty")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
187
src/internal/common/readers/serialization_version.go
Normal file
187
src/internal/common/readers/serialization_version.go
Normal file
@ -0,0 +1,187 @@
|
||||
package readers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
)
|
||||
|
||||
// persistedSerializationVersion is the size of the serialization version in
|
||||
// storage.
|
||||
//
|
||||
// The current on-disk format of this field is written in big endian. The
|
||||
// highest bit denotes if the item is empty because it was deleted between the
|
||||
// time we told the storage about it and when we needed to get data for it. The
|
||||
// lowest two bytes are the version number. All other bits are reserved for
|
||||
// future use.
|
||||
//
|
||||
// MSB 31 30 16 8 0 LSB
|
||||
// +----------+----+---------+--------+-------+
|
||||
// | del flag | reserved | version number |
|
||||
// +----------+----+---------+--------+-------+
|
||||
type persistedSerializationVersion = uint32
|
||||
|
||||
// SerializationVersion is the in-memory size of the version number that gets
|
||||
// added to the persisted serialization version.
|
||||
//
|
||||
// Right now it's only a uint16 but we can expand it to be larger so long as the
|
||||
// expanded size doesn't clash with the flags in the high-order bits.
|
||||
type SerializationVersion uint16
|
||||
|
||||
// DefaultSerializationVersion is the current (default) version number for all
|
||||
// services. As services evolve their storage format they should begin tracking
|
||||
// their own version numbers separate from other services.
|
||||
const DefaultSerializationVersion SerializationVersion = 1
|
||||
|
||||
const (
|
||||
VersionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0)))
|
||||
delInFlightMask persistedSerializationVersion = 1 << ((VersionFormatSize * 8) - 1)
|
||||
)
|
||||
|
||||
// SerializationFormat is a struct describing serialization format versions and
|
||||
// flags to add for this item.
|
||||
type SerializationFormat struct {
|
||||
Version SerializationVersion
|
||||
DelInFlight bool
|
||||
}
|
||||
|
||||
// NewVersionedBackupReader creates a reader that injects format into the first
|
||||
// bytes of the returned data. After format has been returned, data is returned
|
||||
// from baseReaders in the order they're passed in.
|
||||
func NewVersionedBackupReader(
|
||||
format SerializationFormat,
|
||||
baseReaders ...io.ReadCloser,
|
||||
) (io.ReadCloser, error) {
|
||||
if format.DelInFlight && len(baseReaders) > 0 {
|
||||
// This is a conservative check, but we can always loosen it later on if
|
||||
// needed. At the moment we really don't expect any data if the item was
|
||||
// deleted.
|
||||
return nil, clues.New("item marked deleted but has reader(s)")
|
||||
}
|
||||
|
||||
formattedVersion := persistedSerializationVersion(format.Version)
|
||||
if format.DelInFlight {
|
||||
formattedVersion |= delInFlightMask
|
||||
}
|
||||
|
||||
formattedBuf := make([]byte, VersionFormatSize)
|
||||
binary.BigEndian.PutUint32(formattedBuf, formattedVersion)
|
||||
|
||||
versionReader := io.NopCloser(bytes.NewReader(formattedBuf))
|
||||
|
||||
// Need to add readers individually because types differ.
|
||||
allReaders := make([]io.Reader, 0, len(baseReaders)+1)
|
||||
allReaders = append(allReaders, versionReader)
|
||||
|
||||
for _, r := range baseReaders {
|
||||
allReaders = append(allReaders, r)
|
||||
}
|
||||
|
||||
res := &versionedBackupReader{
|
||||
baseReaders: append([]io.ReadCloser{versionReader}, baseReaders...),
|
||||
combined: io.MultiReader(allReaders...),
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type versionedBackupReader struct {
|
||||
// baseReaders is a reference to the original readers so we can close them.
|
||||
baseReaders []io.ReadCloser
|
||||
// combined is the reader that will return all data.
|
||||
combined io.Reader
|
||||
}
|
||||
|
||||
func (vbr *versionedBackupReader) Read(p []byte) (int, error) {
|
||||
if vbr.combined == nil {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
|
||||
n, err := vbr.combined.Read(p)
|
||||
if err == io.EOF {
|
||||
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
|
||||
// thinking it's an actual error.
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, clues.Stack(err).OrNil()
|
||||
}
|
||||
|
||||
func (vbr *versionedBackupReader) Close() error {
|
||||
if vbr.combined == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
vbr.combined = nil
|
||||
|
||||
var errs *clues.Err
|
||||
|
||||
for i, r := range vbr.baseReaders {
|
||||
if err := r.Close(); err != nil {
|
||||
errs = clues.Stack(
|
||||
errs,
|
||||
clues.Wrap(err, "closing reader").With("reader_index", i))
|
||||
}
|
||||
}
|
||||
|
||||
vbr.baseReaders = nil
|
||||
|
||||
return errs.OrNil()
|
||||
}
|
||||
|
||||
// NewVersionedRestoreReader wraps baseReader and provides easy access to the
|
||||
// SerializationFormat info in the first bytes of the data contained in
|
||||
// baseReader.
|
||||
func NewVersionedRestoreReader(
|
||||
baseReader io.ReadCloser,
|
||||
) (*VersionedRestoreReader, error) {
|
||||
versionBuf := make([]byte, VersionFormatSize)
|
||||
|
||||
// Loop to account for the unlikely case where we get a short read.
|
||||
for read := 0; read < VersionFormatSize; {
|
||||
n, err := baseReader.Read(versionBuf[read:])
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "reading serialization version")
|
||||
}
|
||||
|
||||
read += n
|
||||
}
|
||||
|
||||
formattedVersion := binary.BigEndian.Uint32(versionBuf)
|
||||
|
||||
return &VersionedRestoreReader{
|
||||
baseReader: baseReader,
|
||||
format: SerializationFormat{
|
||||
Version: SerializationVersion(formattedVersion),
|
||||
DelInFlight: (formattedVersion & delInFlightMask) != 0,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type VersionedRestoreReader struct {
|
||||
baseReader io.ReadCloser
|
||||
format SerializationFormat
|
||||
}
|
||||
|
||||
func (vrr *VersionedRestoreReader) Read(p []byte) (int, error) {
|
||||
n, err := vrr.baseReader.Read(p)
|
||||
if err == io.EOF {
|
||||
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
|
||||
// thinking it's an actual error.
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, clues.Stack(err).OrNil()
|
||||
}
|
||||
|
||||
func (vrr *VersionedRestoreReader) Close() error {
|
||||
return clues.Stack(vrr.baseReader.Close()).OrNil()
|
||||
}
|
||||
|
||||
func (vrr VersionedRestoreReader) Format() SerializationFormat {
|
||||
return vrr.format
|
||||
}
|
||||
362
src/internal/common/readers/serialization_version_test.go
Normal file
362
src/internal/common/readers/serialization_version_test.go
Normal file
@ -0,0 +1,362 @@
|
||||
package readers_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
type shortReader struct {
|
||||
maxReadLen int
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (s *shortReader) Read(p []byte) (int, error) {
|
||||
toRead := s.maxReadLen
|
||||
if len(p) < toRead {
|
||||
toRead = len(p)
|
||||
}
|
||||
|
||||
return s.ReadCloser.Read(p[:toRead])
|
||||
}
|
||||
|
||||
type SerializationReaderUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestSerializationReaderUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &SerializationReaderUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader() {
|
||||
baseData := []byte("hello world")
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
format readers.SerializationFormat
|
||||
inputReaders []io.ReadCloser
|
||||
|
||||
expectErr require.ErrorAssertionFunc
|
||||
expectData []byte
|
||||
}{
|
||||
{
|
||||
name: "DeletedInFlight NoVersion NoReaders",
|
||||
format: readers.SerializationFormat{
|
||||
DelInFlight: true,
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectData: []byte{0x80, 0x0, 0x0, 0x0},
|
||||
},
|
||||
{
|
||||
name: "DeletedInFlight NoReaders",
|
||||
format: readers.SerializationFormat{
|
||||
Version: 42,
|
||||
DelInFlight: true,
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectData: []byte{0x80, 0x0, 0x0, 42},
|
||||
},
|
||||
{
|
||||
name: "NoVersion NoReaders",
|
||||
expectErr: require.NoError,
|
||||
expectData: []byte{0x00, 0x0, 0x0, 0x0},
|
||||
},
|
||||
{
|
||||
name: "NoReaders",
|
||||
format: readers.SerializationFormat{
|
||||
Version: 42,
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectData: []byte{0x00, 0x0, 0x0, 42},
|
||||
},
|
||||
{
|
||||
name: "SingleReader",
|
||||
format: readers.SerializationFormat{
|
||||
Version: 42,
|
||||
},
|
||||
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
|
||||
expectErr: require.NoError,
|
||||
expectData: append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
|
||||
},
|
||||
{
|
||||
name: "MultipleReaders",
|
||||
format: readers.SerializationFormat{
|
||||
Version: 42,
|
||||
},
|
||||
inputReaders: []io.ReadCloser{
|
||||
io.NopCloser(bytes.NewReader(baseData)),
|
||||
io.NopCloser(bytes.NewReader(baseData)),
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectData: append(
|
||||
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
|
||||
baseData...),
|
||||
},
|
||||
// Uncomment if we expand the version to 32 bits.
|
||||
//{
|
||||
// name: "VersionWithHighBitSet NoReaders Errors",
|
||||
// format: readers.SerializationFormat{
|
||||
// Version: 0x80000000,
|
||||
// },
|
||||
// expectErr: require.Error,
|
||||
//},
|
||||
{
|
||||
name: "DeletedInFlight SingleReader Errors",
|
||||
format: readers.SerializationFormat{
|
||||
DelInFlight: true,
|
||||
},
|
||||
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
|
||||
expectErr: require.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
test.format,
|
||||
test.inputReaders...)
|
||||
test.expectErr(t, err, "getting backup reader: %v", clues.ToCore(err))
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := r.Close()
|
||||
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
|
||||
}()
|
||||
|
||||
buf, err := io.ReadAll(r)
|
||||
require.NoError(
|
||||
t,
|
||||
err,
|
||||
"reading serialized data: %v",
|
||||
clues.ToCore(err))
|
||||
|
||||
// Need to use equal because output is order-sensitive.
|
||||
assert.Equal(t, test.expectData, buf, "serialized data")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader_ShortReads() {
|
||||
t := suite.T()
|
||||
|
||||
baseData := []byte("hello world")
|
||||
expectData := append(
|
||||
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
|
||||
baseData...)
|
||||
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{Version: 42},
|
||||
io.NopCloser(bytes.NewReader(baseData)),
|
||||
io.NopCloser(bytes.NewReader(baseData)))
|
||||
require.NoError(t, err, "getting backup reader: %v", clues.ToCore(err))
|
||||
|
||||
defer func() {
|
||||
err := r.Close()
|
||||
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
|
||||
}()
|
||||
|
||||
buf := make([]byte, len(expectData))
|
||||
r = &shortReader{
|
||||
maxReadLen: 3,
|
||||
ReadCloser: r,
|
||||
}
|
||||
|
||||
for read := 0; ; {
|
||||
n, err := r.Read(buf[read:])
|
||||
|
||||
read += n
|
||||
if read >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
||||
}
|
||||
|
||||
// Need to use equal because output is order-sensitive.
|
||||
assert.Equal(t, expectData, buf, "serialized data")
|
||||
}
|
||||
|
||||
// TestRestoreSerializationReader checks that we can read previously serialized
|
||||
// data. For simplicity, it uses the versionedBackupReader to generate the
|
||||
// input. This should be relatively safe because the tests for
|
||||
// versionedBackupReader do compare directly against serialized data.
|
||||
func (suite *SerializationReaderUnitSuite) TestRestoreSerializationReader() {
|
||||
baseData := []byte("hello world")
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
inputReader func(*testing.T) io.ReadCloser
|
||||
|
||||
expectErr require.ErrorAssertionFunc
|
||||
expectVersion readers.SerializationVersion
|
||||
expectDelInFlight bool
|
||||
expectData []byte
|
||||
}{
|
||||
{
|
||||
name: "NoVersion NoReaders",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
r, err := readers.NewVersionedBackupReader(readers.SerializationFormat{})
|
||||
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||
|
||||
return r
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectData: []byte{},
|
||||
},
|
||||
{
|
||||
name: "DeletedInFlight NoReaders",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{
|
||||
Version: 42,
|
||||
DelInFlight: true,
|
||||
})
|
||||
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||
|
||||
return r
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectVersion: 42,
|
||||
expectDelInFlight: true,
|
||||
expectData: []byte{},
|
||||
},
|
||||
{
|
||||
name: "DeletedInFlight SingleReader",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
// Need to specify the bytes manually because the backup reader won't
|
||||
// allow creating something with the deleted flag and data.
|
||||
return io.NopCloser(bytes.NewReader(append(
|
||||
[]byte{0x80, 0x0, 0x0, 42},
|
||||
baseData...)))
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectVersion: 42,
|
||||
expectDelInFlight: true,
|
||||
expectData: baseData,
|
||||
},
|
||||
{
|
||||
name: "NoVersion SingleReader",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{},
|
||||
io.NopCloser(bytes.NewReader(baseData)))
|
||||
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||
|
||||
return r
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectData: baseData,
|
||||
},
|
||||
{
|
||||
name: "SingleReader",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{Version: 42},
|
||||
io.NopCloser(bytes.NewReader(baseData)))
|
||||
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||
|
||||
return r
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectVersion: 42,
|
||||
expectData: baseData,
|
||||
},
|
||||
{
|
||||
name: "ShortReads SingleReader",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{Version: 42},
|
||||
io.NopCloser(bytes.NewReader(baseData)))
|
||||
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||
|
||||
r = &shortReader{
|
||||
maxReadLen: 3,
|
||||
ReadCloser: r,
|
||||
}
|
||||
|
||||
return r
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectVersion: 42,
|
||||
expectData: baseData,
|
||||
},
|
||||
{
|
||||
name: "MultipleReaders",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{Version: 42},
|
||||
io.NopCloser(bytes.NewReader(baseData)),
|
||||
io.NopCloser(bytes.NewReader(baseData)))
|
||||
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||
|
||||
return r
|
||||
},
|
||||
expectErr: require.NoError,
|
||||
expectVersion: 42,
|
||||
expectData: append(slices.Clone(baseData), baseData...),
|
||||
},
|
||||
{
|
||||
name: "EmptyReader Errors",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
return io.NopCloser(bytes.NewReader([]byte{}))
|
||||
},
|
||||
expectErr: require.Error,
|
||||
},
|
||||
{
|
||||
name: "TruncatedVersion Errors",
|
||||
inputReader: func(t *testing.T) io.ReadCloser {
|
||||
return io.NopCloser(bytes.NewReader([]byte{0x80, 0x0}))
|
||||
},
|
||||
expectErr: require.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
r, err := readers.NewVersionedRestoreReader(test.inputReader(t))
|
||||
test.expectErr(t, err, "getting restore reader: %v", clues.ToCore(err))
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := r.Close()
|
||||
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
|
||||
}()
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
test.expectVersion,
|
||||
r.Format().Version,
|
||||
"version")
|
||||
assert.Equal(
|
||||
t,
|
||||
test.expectDelInFlight,
|
||||
r.Format().DelInFlight,
|
||||
"deleted in flight")
|
||||
|
||||
buf, err := io.ReadAll(r)
|
||||
require.NoError(t, err, "reading serialized data: %v", clues.ToCore(err))
|
||||
|
||||
// Need to use equal because output is order-sensitive.
|
||||
assert.Equal(t, test.expectData, buf, "serialized data")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,7 +1,6 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
@ -10,6 +9,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/spatialcurrent/go-lazy/pkg/lazy"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -46,12 +46,19 @@ func NewUnindexedPrefetchedItem(
|
||||
reader io.ReadCloser,
|
||||
itemID string,
|
||||
modTime time.Time,
|
||||
) Item {
|
||||
) (*unindexedPrefetchedItem, error) {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{Version: readers.DefaultSerializationVersion},
|
||||
reader)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
return &unindexedPrefetchedItem{
|
||||
id: itemID,
|
||||
reader: reader,
|
||||
reader: r,
|
||||
modTime: modTime,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// unindexedPrefetchedItem represents a single item retrieved from the remote
|
||||
@ -92,15 +99,16 @@ func NewPrefetchedItem(
|
||||
reader io.ReadCloser,
|
||||
itemID string,
|
||||
info details.ItemInfo,
|
||||
) Item {
|
||||
return &prefetchedItem{
|
||||
unindexedPrefetchedItem: unindexedPrefetchedItem{
|
||||
id: itemID,
|
||||
reader: reader,
|
||||
modTime: info.Modified(),
|
||||
},
|
||||
info: info,
|
||||
) (*prefetchedItem, error) {
|
||||
inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified())
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
return &prefetchedItem{
|
||||
unindexedPrefetchedItem: inner,
|
||||
info: info,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// prefetchedItem represents a single item retrieved from the remote service.
|
||||
@ -108,7 +116,7 @@ func NewPrefetchedItem(
|
||||
// This item implements ItemInfo so it should be used for things that need to
|
||||
// appear in backup details.
|
||||
type prefetchedItem struct {
|
||||
unindexedPrefetchedItem
|
||||
*unindexedPrefetchedItem
|
||||
info details.ItemInfo
|
||||
}
|
||||
|
||||
@ -129,7 +137,7 @@ func NewUnindexedLazyItem(
|
||||
itemID string,
|
||||
modTime time.Time,
|
||||
errs *fault.Bus,
|
||||
) Item {
|
||||
) *unindexedLazyItem {
|
||||
return &unindexedLazyItem{
|
||||
ctx: ctx,
|
||||
id: itemID,
|
||||
@ -182,6 +190,10 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
format := readers.SerializationFormat{
|
||||
Version: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
// If an item was deleted then return an empty file so we don't fail the
|
||||
// backup and return a sentinel error when asked for ItemInfo so we don't
|
||||
// display the item in the backup.
|
||||
@ -193,13 +205,17 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
||||
logger.Ctx(i.ctx).Info("item not found")
|
||||
|
||||
i.delInFlight = true
|
||||
format.DelInFlight = true
|
||||
r, err := readers.NewVersionedBackupReader(format)
|
||||
|
||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
return r, clues.Stack(err).OrNil()
|
||||
}
|
||||
|
||||
i.info = info
|
||||
|
||||
return reader, nil
|
||||
r, err := readers.NewVersionedBackupReader(format, reader)
|
||||
|
||||
return r, clues.Stack(err).OrNil()
|
||||
})
|
||||
}
|
||||
|
||||
@ -217,15 +233,14 @@ func NewLazyItem(
|
||||
itemID string,
|
||||
modTime time.Time,
|
||||
errs *fault.Bus,
|
||||
) Item {
|
||||
) *lazyItem {
|
||||
return &lazyItem{
|
||||
unindexedLazyItem: unindexedLazyItem{
|
||||
ctx: ctx,
|
||||
id: itemID,
|
||||
itemGetter: itemGetter,
|
||||
modTime: modTime,
|
||||
errs: errs,
|
||||
},
|
||||
unindexedLazyItem: NewUnindexedLazyItem(
|
||||
ctx,
|
||||
itemGetter,
|
||||
itemID,
|
||||
modTime,
|
||||
errs),
|
||||
}
|
||||
}
|
||||
|
||||
@ -236,7 +251,7 @@ func NewLazyItem(
|
||||
// This item implements ItemInfo so it should be used for things that need to
|
||||
// appear in backup details.
|
||||
type lazyItem struct {
|
||||
unindexedLazyItem
|
||||
*unindexedLazyItem
|
||||
}
|
||||
|
||||
func (i *lazyItem) Info() (details.ItemInfo, error) {
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
@ -50,11 +51,15 @@ func TestItemUnitSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
|
||||
prefetch := data.NewUnindexedPrefetchedItem(
|
||||
prefetch, err := data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader([]byte{})),
|
||||
"foo",
|
||||
time.Time{})
|
||||
_, ok := prefetch.(data.ItemInfo)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
var item data.Item = prefetch
|
||||
|
||||
_, ok := item.(data.ItemInfo)
|
||||
assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()")
|
||||
}
|
||||
|
||||
@ -70,7 +75,10 @@ func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
|
||||
"foo",
|
||||
time.Time{},
|
||||
fault.New(true))
|
||||
_, ok := lazy.(data.ItemInfo)
|
||||
|
||||
var item data.Item = lazy
|
||||
|
||||
_, ok := item.(data.ItemInfo)
|
||||
assert.False(t, ok, "unindexedLazyItem implements Info()")
|
||||
}
|
||||
|
||||
@ -140,18 +148,29 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
item := data.NewPrefetchedItem(test.reader, id, test.info)
|
||||
item, err := data.NewPrefetchedItem(test.reader, id, test.info)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, id, item.ID(), "ID")
|
||||
assert.False(t, item.Deleted(), "deleted")
|
||||
assert.Equal(
|
||||
t,
|
||||
test.info.Modified(),
|
||||
item.(data.ItemModTime).ModTime(),
|
||||
item.ModTime(),
|
||||
"mod time")
|
||||
|
||||
readData, err := io.ReadAll(item.ToReader())
|
||||
test.readErr(t, err, clues.ToCore(err), "read error")
|
||||
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||
require.NoError(t, err, "version error: %v", clues.ToCore(err))
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||
assert.False(t, r.Format().DelInFlight)
|
||||
|
||||
readData, err := io.ReadAll(r)
|
||||
test.readErr(t, err, "read error: %v", clues.ToCore(err))
|
||||
assert.Equal(t, test.expectData, readData, "read data")
|
||||
})
|
||||
}
|
||||
@ -194,6 +213,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
table := []struct {
|
||||
name string
|
||||
mid *mockItemDataGetter
|
||||
versionErr assert.ErrorAssertionFunc
|
||||
readErr assert.ErrorAssertionFunc
|
||||
infoErr assert.ErrorAssertionFunc
|
||||
expectData []byte
|
||||
@ -205,6 +225,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
reader: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
|
||||
},
|
||||
versionErr: assert.NoError,
|
||||
readErr: assert.NoError,
|
||||
infoErr: assert.NoError,
|
||||
expectData: []byte{},
|
||||
@ -215,6 +236,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
reader: io.NopCloser(bytes.NewReader(baseData)),
|
||||
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
|
||||
},
|
||||
versionErr: assert.NoError,
|
||||
readErr: assert.NoError,
|
||||
infoErr: assert.NoError,
|
||||
expectData: baseData,
|
||||
@ -225,6 +247,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
reader: io.NopCloser(bytes.NewReader(baseData)),
|
||||
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
|
||||
},
|
||||
versionErr: assert.NoError,
|
||||
readErr: assert.NoError,
|
||||
infoErr: assert.NoError,
|
||||
expectData: baseData,
|
||||
@ -234,6 +257,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
mid: &mockItemDataGetter{
|
||||
err: assert.AnError,
|
||||
},
|
||||
versionErr: assert.Error,
|
||||
readErr: assert.Error,
|
||||
infoErr: assert.Error,
|
||||
expectData: []byte{},
|
||||
@ -249,6 +273,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
},
|
||||
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
|
||||
},
|
||||
versionErr: assert.NoError,
|
||||
readErr: assert.Error,
|
||||
infoErr: assert.NoError,
|
||||
expectData: baseData[:5],
|
||||
@ -278,15 +303,25 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
||||
assert.Equal(
|
||||
t,
|
||||
now,
|
||||
item.(data.ItemModTime).ModTime(),
|
||||
item.ModTime(),
|
||||
"mod time")
|
||||
|
||||
// Read data to execute lazy reader.
|
||||
readData, err := io.ReadAll(item.ToReader())
|
||||
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||
test.versionErr(t, err, "version error: %v", clues.ToCore(err))
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||
assert.False(t, r.Format().DelInFlight)
|
||||
|
||||
readData, err := io.ReadAll(r)
|
||||
test.readErr(t, err, clues.ToCore(err), "read error")
|
||||
assert.Equal(t, test.expectData, readData, "read data")
|
||||
|
||||
_, err = item.(data.ItemInfo).Info()
|
||||
_, err = item.Info()
|
||||
test.infoErr(t, err, "Info(): %v", clues.ToCore(err))
|
||||
|
||||
e := errs.Errors()
|
||||
@ -326,15 +361,21 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
|
||||
assert.Equal(
|
||||
t,
|
||||
now,
|
||||
item.(data.ItemModTime).ModTime(),
|
||||
item.ModTime(),
|
||||
"mod time")
|
||||
|
||||
// Read data to execute lazy reader.
|
||||
readData, err := io.ReadAll(item.ToReader())
|
||||
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||
require.NoError(t, err, "version error: %v", clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||
assert.True(t, r.Format().DelInFlight)
|
||||
|
||||
readData, err := io.ReadAll(r)
|
||||
require.NoError(t, err, clues.ToCore(err), "read error")
|
||||
assert.Empty(t, readData, "read data")
|
||||
|
||||
_, err = item.(data.ItemInfo).Info()
|
||||
_, err = item.Info()
|
||||
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
|
||||
|
||||
e := errs.Errors()
|
||||
@ -366,9 +407,9 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
|
||||
assert.Equal(
|
||||
t,
|
||||
now,
|
||||
item.(data.ItemModTime).ModTime(),
|
||||
item.ModTime(),
|
||||
"mod time")
|
||||
|
||||
_, err := item.(data.ItemInfo).Info()
|
||||
_, err := item.Info()
|
||||
assert.Error(t, err, "Info() error")
|
||||
}
|
||||
|
||||
@ -3,8 +3,13 @@ package mock
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -163,3 +168,106 @@ func (rc RestoreCollection) FetchItemByName(
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ data.BackupCollection = &versionedBackupCollection{}
|
||||
_ data.RestoreCollection = &unversionedRestoreCollection{}
|
||||
_ data.Item = &itemWrapper{}
|
||||
)
|
||||
|
||||
type itemWrapper struct {
|
||||
data.Item
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
func (i *itemWrapper) ToReader() io.ReadCloser {
|
||||
return i.reader
|
||||
}
|
||||
|
||||
func NewUnversionedRestoreCollection(
|
||||
t *testing.T,
|
||||
col data.RestoreCollection,
|
||||
) *unversionedRestoreCollection {
|
||||
return &unversionedRestoreCollection{
|
||||
RestoreCollection: col,
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
// unversionedRestoreCollection strips out version format headers on all items.
|
||||
//
|
||||
// Wrap data.RestoreCollections in this type if you don't need access to the
|
||||
// version format header during tests and you know the item readers can't return
|
||||
// an error.
|
||||
type unversionedRestoreCollection struct {
|
||||
data.RestoreCollection
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (c *unversionedRestoreCollection) Items(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
) <-chan data.Item {
|
||||
res := make(chan data.Item)
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for item := range c.RestoreCollection.Items(ctx, errs) {
|
||||
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||
require.NoError(c.t, err, clues.ToCore(err))
|
||||
|
||||
res <- &itemWrapper{
|
||||
Item: item,
|
||||
reader: r,
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func NewVersionedBackupCollection(
|
||||
t *testing.T,
|
||||
col data.BackupCollection,
|
||||
) *versionedBackupCollection {
|
||||
return &versionedBackupCollection{
|
||||
BackupCollection: col,
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
// versionedBackupCollection injects basic version information on all items.
|
||||
//
|
||||
// Wrap data.BackupCollections in this type if you don't need to explicitly set
|
||||
// the version format header during tests, aren't trying to check reader errors
|
||||
// cases, and aren't populating backup details.
|
||||
type versionedBackupCollection struct {
|
||||
data.BackupCollection
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (c *versionedBackupCollection) Items(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
) <-chan data.Item {
|
||||
res := make(chan data.Item)
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for item := range c.BackupCollection.Items(ctx, errs) {
|
||||
r, err := readers.NewVersionedBackupReader(
|
||||
readers.SerializationFormat{
|
||||
Version: readers.DefaultSerializationVersion,
|
||||
},
|
||||
item.ToReader())
|
||||
require.NoError(c.t, err, clues.ToCore(err))
|
||||
|
||||
res <- &itemWrapper{
|
||||
Item: item,
|
||||
reader: r,
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
@ -580,6 +580,10 @@ func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) {
|
||||
}
|
||||
|
||||
func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error {
|
||||
if len(password) <= 0 {
|
||||
return clues.New("empty password provided")
|
||||
}
|
||||
|
||||
kopiaRef := NewConn(w.storage)
|
||||
if err := kopiaRef.Connect(ctx, opts); err != nil {
|
||||
return clues.Wrap(err, "connecting kopia client")
|
||||
@ -587,8 +591,10 @@ func (w *conn) UpdatePassword(ctx context.Context, password string, opts reposit
|
||||
|
||||
defer kopiaRef.Close(ctx)
|
||||
|
||||
repository := kopiaRef.Repository.(repo.DirectRepository)
|
||||
err := repository.FormatManager().ChangePassword(ctx, password)
|
||||
kopiaRepo := kopiaRef.Repository.(repo.DirectRepository)
|
||||
if err := kopiaRepo.FormatManager().ChangePassword(ctx, password); err != nil {
|
||||
return clues.Wrap(err, "unable to update password")
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "unable to update password")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -22,6 +22,20 @@ import (
|
||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||
)
|
||||
|
||||
func openLocalKopiaRepo(
|
||||
t tester.TestT,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
) (*conn, error) {
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
|
||||
k := NewConn(st)
|
||||
if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
func openKopiaRepo(
|
||||
t tester.TestT,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
@ -81,7 +95,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
k := NewConn(st)
|
||||
|
||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
@ -101,7 +115,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
st.Provider = storage.ProviderUnknown
|
||||
k := NewConn(st)
|
||||
|
||||
@ -115,7 +129,7 @@ func (suite *WrapperIntegrationSuite) TestConnectWithoutInitErrors() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
k := NewConn(st)
|
||||
|
||||
err := k.Connect(ctx, repository.Options{})
|
||||
@ -408,7 +422,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
|
||||
Host: "bar",
|
||||
}
|
||||
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
k := NewConn(st)
|
||||
|
||||
err := k.Initialize(ctx, opts, repository.Retention{})
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/fs"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -16,6 +17,7 @@ import (
|
||||
var (
|
||||
_ data.RestoreCollection = &kopiaDataCollection{}
|
||||
_ data.Item = &kopiaDataStream{}
|
||||
_ data.ItemSize = &kopiaDataStream{}
|
||||
)
|
||||
|
||||
type kopiaDataCollection struct {
|
||||
@ -23,7 +25,7 @@ type kopiaDataCollection struct {
|
||||
dir fs.Directory
|
||||
items []string
|
||||
counter ByteCounter
|
||||
expectedVersion uint32
|
||||
expectedVersion readers.SerializationVersion
|
||||
}
|
||||
|
||||
func (kdc *kopiaDataCollection) Items(
|
||||
@ -102,7 +104,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
||||
return nil, clues.New("object is not a file").WithClues(ctx)
|
||||
}
|
||||
|
||||
size := f.Size() - int64(versionSize)
|
||||
size := f.Size() - int64(readers.VersionFormatSize)
|
||||
if size < 0 {
|
||||
logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size)
|
||||
|
||||
@ -118,13 +120,32 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
||||
return nil, clues.Wrap(err, "opening file").WithClues(ctx)
|
||||
}
|
||||
|
||||
// TODO(ashmrtn): Remove this when individual services implement checks for
|
||||
// version and deleted items.
|
||||
rr, err := readers.NewVersionedRestoreReader(r)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
if rr.Format().Version != kdc.expectedVersion {
|
||||
return nil, clues.New("unexpected data format").
|
||||
WithClues(ctx).
|
||||
With(
|
||||
"read_version", rr.Format().Version,
|
||||
"expected_version", kdc.expectedVersion)
|
||||
}
|
||||
|
||||
// This is a conservative check, but we shouldn't be seeing items that were
|
||||
// deleted in flight during restores because there's no way to select them.
|
||||
if rr.Format().DelInFlight {
|
||||
return nil, clues.New("selected item marked as deleted in flight").
|
||||
WithClues(ctx)
|
||||
}
|
||||
|
||||
return &kopiaDataStream{
|
||||
id: name,
|
||||
reader: &restoreStreamReader{
|
||||
ReadCloser: r,
|
||||
expectedVersion: kdc.expectedVersion,
|
||||
},
|
||||
size: size,
|
||||
id: name,
|
||||
reader: rr,
|
||||
size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -121,25 +122,35 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
)
|
||||
|
||||
// Needs to be a function so the readers get refreshed each time.
|
||||
getLayout := func() fs.Directory {
|
||||
getLayout := func(t *testing.T) fs.Directory {
|
||||
format := readers.SerializationFormat{
|
||||
Version: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
r1, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
io.NopCloser(bytes.NewReader(files[0].data)))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r2, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
io.NopCloser(bytes.NewReader(files[1].data)))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(files[0].uuid),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(files[0].data))),
|
||||
size: int64(len(files[0].data) + versionSize),
|
||||
r: r1,
|
||||
size: int64(len(files[0].data) + readers.VersionFormatSize),
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(files[1].uuid),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(files[1].data))),
|
||||
size: int64(len(files[1].data) + versionSize),
|
||||
r: r2,
|
||||
size: int64(len(files[1].data) + readers.VersionFormatSize),
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
@ -224,10 +235,10 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
}
|
||||
|
||||
c := kopiaDataCollection{
|
||||
dir: getLayout(),
|
||||
dir: getLayout(t),
|
||||
path: nil,
|
||||
items: items,
|
||||
expectedVersion: serializationVersion,
|
||||
expectedVersion: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
var (
|
||||
@ -291,23 +302,34 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
||||
|
||||
// Needs to be a function so we can switch the serialization version as
|
||||
// needed.
|
||||
getLayout := func(serVersion uint32) fs.Directory {
|
||||
getLayout := func(
|
||||
t *testing.T,
|
||||
serVersion readers.SerializationVersion,
|
||||
) fs.Directory {
|
||||
format := readers.SerializationFormat{Version: serVersion}
|
||||
|
||||
r1, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
io.NopCloser(bytes.NewReader([]byte(noErrFileData))))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r2, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
errReader.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(noErrFileName),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serVersion,
|
||||
io.NopCloser(bytes.NewReader([]byte(noErrFileData)))),
|
||||
r: r1,
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(errFileName),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serVersion,
|
||||
errReader.ToReader()),
|
||||
r: r2,
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
@ -330,7 +352,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
||||
table := []struct {
|
||||
name string
|
||||
inputName string
|
||||
inputSerializationVersion uint32
|
||||
inputSerializationVersion readers.SerializationVersion
|
||||
expectedData []byte
|
||||
lookupErr assert.ErrorAssertionFunc
|
||||
readErr assert.ErrorAssertionFunc
|
||||
@ -339,7 +361,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
||||
{
|
||||
name: "FileFound_NoError",
|
||||
inputName: noErrFileName,
|
||||
inputSerializationVersion: serializationVersion,
|
||||
inputSerializationVersion: readers.DefaultSerializationVersion,
|
||||
expectedData: []byte(noErrFileData),
|
||||
lookupErr: assert.NoError,
|
||||
readErr: assert.NoError,
|
||||
@ -347,21 +369,20 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
||||
{
|
||||
name: "FileFound_ReadError",
|
||||
inputName: errFileName,
|
||||
inputSerializationVersion: serializationVersion,
|
||||
inputSerializationVersion: readers.DefaultSerializationVersion,
|
||||
lookupErr: assert.NoError,
|
||||
readErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "FileFound_VersionError",
|
||||
inputName: noErrFileName,
|
||||
inputSerializationVersion: serializationVersion + 1,
|
||||
lookupErr: assert.NoError,
|
||||
readErr: assert.Error,
|
||||
inputSerializationVersion: readers.DefaultSerializationVersion + 1,
|
||||
lookupErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "FileNotFound",
|
||||
inputName: "foo",
|
||||
inputSerializationVersion: serializationVersion + 1,
|
||||
inputSerializationVersion: readers.DefaultSerializationVersion + 1,
|
||||
lookupErr: assert.Error,
|
||||
notFoundErr: true,
|
||||
},
|
||||
@ -373,14 +394,14 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
root := getLayout(test.inputSerializationVersion)
|
||||
root := getLayout(t, test.inputSerializationVersion)
|
||||
c := &i64counter{}
|
||||
|
||||
col := &kopiaDataCollection{
|
||||
path: pth,
|
||||
dir: root,
|
||||
counter: c,
|
||||
expectedVersion: serializationVersion,
|
||||
expectedVersion: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
s, err := col.FetchItemByName(ctx, test.inputName)
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -150,20 +151,27 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
// Needs to be a function so the readers get refreshed each time.
|
||||
layouts := []func() fs.Directory{
|
||||
layouts := []func(t *testing.T) fs.Directory{
|
||||
// Has the following;
|
||||
// - file1: data[0]
|
||||
// - errOpen: (error opening file)
|
||||
func() fs.Directory {
|
||||
func(t *testing.T) fs.Directory {
|
||||
format := readers.SerializationFormat{
|
||||
Version: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
r1, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
io.NopCloser(bytes.NewReader(fileData1)))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(fileName1),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData1))),
|
||||
size: int64(len(fileData1) + versionSize),
|
||||
r: r1,
|
||||
size: int64(len(fileData1) + readers.VersionFormatSize),
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
@ -178,34 +186,47 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
|
||||
// - file1: data[1]
|
||||
// - file2: data[0]
|
||||
// - errOpen: data[2]
|
||||
func() fs.Directory {
|
||||
func(t *testing.T) fs.Directory {
|
||||
format := readers.SerializationFormat{
|
||||
Version: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
r1, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
io.NopCloser(bytes.NewReader(fileData2)))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r2, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
io.NopCloser(bytes.NewReader(fileData1)))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r3, err := readers.NewVersionedBackupReader(
|
||||
format,
|
||||
io.NopCloser(bytes.NewReader(fileData3)))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(fileName1),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData2))),
|
||||
size: int64(len(fileData2) + versionSize),
|
||||
r: r1,
|
||||
size: int64(len(fileData2) + readers.VersionFormatSize),
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(fileName2),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData1))),
|
||||
size: int64(len(fileData1) + versionSize),
|
||||
r: r2,
|
||||
size: int64(len(fileData1) + readers.VersionFormatSize),
|
||||
},
|
||||
&mockFile{
|
||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||
encodeAsPath(fileOpenErrName),
|
||||
nil),
|
||||
r: newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData3))),
|
||||
size: int64(len(fileData3) + versionSize),
|
||||
r: r3,
|
||||
size: int64(len(fileData3) + readers.VersionFormatSize),
|
||||
},
|
||||
})
|
||||
},
|
||||
@ -257,9 +278,9 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
|
||||
for i, layout := range layouts {
|
||||
col := &kopiaDataCollection{
|
||||
path: pth,
|
||||
dir: layout(),
|
||||
dir: layout(t),
|
||||
counter: c,
|
||||
expectedVersion: serializationVersion,
|
||||
expectedVersion: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
err := dc.addCollection(colPaths[i], col)
|
||||
|
||||
@ -29,7 +29,7 @@ type fooModel struct {
|
||||
|
||||
//revive:disable-next-line:context-as-argument
|
||||
func getModelStore(t *testing.T, ctx context.Context) *ModelStore {
|
||||
c, err := openKopiaRepo(t, ctx)
|
||||
c, err := openLocalKopiaRepo(t, ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return &ModelStore{c: c, modelVersion: globalModelVersion}
|
||||
@ -856,7 +856,7 @@ func openConnAndModelStore(
|
||||
t *testing.T,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
) (*conn, *ModelStore) {
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
st := storeTD.NewFilesystemStorage(t)
|
||||
c := NewConn(st)
|
||||
|
||||
err := c.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||
|
||||
@ -1,19 +1,14 @@
|
||||
package kopia
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"runtime/trace"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/fs"
|
||||
@ -37,101 +32,6 @@ import (
|
||||
|
||||
const maxInflateTraversalDepth = 500
|
||||
|
||||
var versionSize = int(unsafe.Sizeof(serializationVersion))
|
||||
|
||||
func newBackupStreamReader(version uint32, reader io.ReadCloser) *backupStreamReader {
|
||||
buf := make([]byte, versionSize)
|
||||
binary.BigEndian.PutUint32(buf, version)
|
||||
bufReader := io.NopCloser(bytes.NewReader(buf))
|
||||
|
||||
return &backupStreamReader{
|
||||
readers: []io.ReadCloser{bufReader, reader},
|
||||
combined: io.NopCloser(io.MultiReader(bufReader, reader)),
|
||||
}
|
||||
}
|
||||
|
||||
// backupStreamReader is a wrapper around the io.Reader that other Corso
|
||||
// components return when backing up information. It injects a version number at
|
||||
// the start of the data stream. Future versions of Corso may not need this if
|
||||
// they use more complex serialization logic as serialization/version injection
|
||||
// will be handled by other components.
|
||||
type backupStreamReader struct {
|
||||
readers []io.ReadCloser
|
||||
combined io.ReadCloser
|
||||
}
|
||||
|
||||
func (rw *backupStreamReader) Read(p []byte) (n int, err error) {
|
||||
if rw.combined == nil {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
|
||||
return rw.combined.Read(p)
|
||||
}
|
||||
|
||||
func (rw *backupStreamReader) Close() error {
|
||||
if rw.combined == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rw.combined = nil
|
||||
|
||||
var errs *clues.Err
|
||||
|
||||
for _, r := range rw.readers {
|
||||
err := r.Close()
|
||||
if err != nil {
|
||||
errs = clues.Stack(clues.Wrap(err, "closing reader"), errs)
|
||||
}
|
||||
}
|
||||
|
||||
return errs.OrNil()
|
||||
}
|
||||
|
||||
// restoreStreamReader is a wrapper around the io.Reader that kopia returns when
|
||||
// reading data from an item. It examines and strips off the version number of
|
||||
// the restored data. Future versions of Corso may not need this if they use
|
||||
// more complex serialization logic as version checking/deserialization will be
|
||||
// handled by other components. A reader that returns a version error is no
|
||||
// longer valid and should not be used once the version error is returned.
|
||||
type restoreStreamReader struct {
|
||||
io.ReadCloser
|
||||
expectedVersion uint32
|
||||
readVersion bool
|
||||
}
|
||||
|
||||
func (rw *restoreStreamReader) checkVersion() error {
|
||||
versionBuf := make([]byte, versionSize)
|
||||
|
||||
for newlyRead := 0; newlyRead < versionSize; {
|
||||
n, err := rw.ReadCloser.Read(versionBuf[newlyRead:])
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "reading data format version")
|
||||
}
|
||||
|
||||
newlyRead += n
|
||||
}
|
||||
|
||||
version := binary.BigEndian.Uint32(versionBuf)
|
||||
|
||||
if version != rw.expectedVersion {
|
||||
return clues.New("unexpected data format").With("read_version", version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *restoreStreamReader) Read(p []byte) (n int, err error) {
|
||||
if !rw.readVersion {
|
||||
rw.readVersion = true
|
||||
|
||||
if err := rw.checkVersion(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return rw.ReadCloser.Read(p)
|
||||
}
|
||||
|
||||
type itemDetails struct {
|
||||
infoer data.ItemInfo
|
||||
repoPath path.Path
|
||||
@ -436,7 +336,7 @@ func collectionEntries(
|
||||
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodedName,
|
||||
modTime,
|
||||
newBackupStreamReader(serializationVersion, e.ToReader()))
|
||||
e.ToReader())
|
||||
|
||||
err = ctr(ctx, entry)
|
||||
if err != nil {
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/kopia/kopia/snapshot"
|
||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -124,12 +123,6 @@ func expectFileData(
|
||||
return
|
||||
}
|
||||
|
||||
// Need to wrap with a restore stream reader to remove the version.
|
||||
r = &restoreStreamReader{
|
||||
ReadCloser: io.NopCloser(r),
|
||||
expectedVersion: serializationVersion,
|
||||
}
|
||||
|
||||
got, err := io.ReadAll(r)
|
||||
if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) {
|
||||
return
|
||||
@ -226,135 +219,6 @@ func getDirEntriesForEntry(
|
||||
// ---------------
|
||||
// unit tests
|
||||
// ---------------
|
||||
type limitedRangeReader struct {
|
||||
readLen int
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (lrr *limitedRangeReader) Read(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
// Not well specified behavior, defer to underlying reader.
|
||||
return lrr.ReadCloser.Read(p)
|
||||
}
|
||||
|
||||
toRead := lrr.readLen
|
||||
if len(p) < toRead {
|
||||
toRead = len(p)
|
||||
}
|
||||
|
||||
return lrr.ReadCloser.Read(p[:toRead])
|
||||
}
|
||||
|
||||
type VersionReadersUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestVersionReadersUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &VersionReadersUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *VersionReadersUnitSuite) TestWriteAndRead() {
|
||||
inputData := []byte("This is some data for the reader to test with")
|
||||
table := []struct {
|
||||
name string
|
||||
readVersion uint32
|
||||
writeVersion uint32
|
||||
check assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "SameVersionSucceeds",
|
||||
readVersion: 42,
|
||||
writeVersion: 42,
|
||||
check: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "DifferentVersionsFail",
|
||||
readVersion: 7,
|
||||
writeVersion: 42,
|
||||
check: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
baseReader := bytes.NewReader(inputData)
|
||||
|
||||
reversible := &restoreStreamReader{
|
||||
expectedVersion: test.readVersion,
|
||||
ReadCloser: newBackupStreamReader(
|
||||
test.writeVersion,
|
||||
io.NopCloser(baseReader)),
|
||||
}
|
||||
|
||||
defer reversible.Close()
|
||||
|
||||
allData, err := io.ReadAll(reversible)
|
||||
test.check(t, err, clues.ToCore(err))
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, inputData, allData)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func readAllInParts(
|
||||
t *testing.T,
|
||||
partLen int,
|
||||
reader io.ReadCloser,
|
||||
) ([]byte, int) {
|
||||
res := []byte{}
|
||||
read := 0
|
||||
tmp := make([]byte, partLen)
|
||||
|
||||
for {
|
||||
n, err := reader.Read(tmp)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
read += n
|
||||
res = append(res, tmp[:n]...)
|
||||
}
|
||||
|
||||
return res, read
|
||||
}
|
||||
|
||||
func (suite *VersionReadersUnitSuite) TestWriteHandlesShortReads() {
|
||||
t := suite.T()
|
||||
inputData := []byte("This is some data for the reader to test with")
|
||||
version := uint32(42)
|
||||
baseReader := bytes.NewReader(inputData)
|
||||
versioner := newBackupStreamReader(version, io.NopCloser(baseReader))
|
||||
expectedToWrite := len(inputData) + int(versionSize)
|
||||
|
||||
// "Write" all the data.
|
||||
versionedData, writtenLen := readAllInParts(t, 1, versioner)
|
||||
assert.Equal(t, expectedToWrite, writtenLen)
|
||||
|
||||
// Read all of the data back.
|
||||
baseReader = bytes.NewReader(versionedData)
|
||||
reader := &restoreStreamReader{
|
||||
expectedVersion: version,
|
||||
// Be adversarial and only allow reads of length 1 from the byte reader.
|
||||
ReadCloser: &limitedRangeReader{
|
||||
readLen: 1,
|
||||
ReadCloser: io.NopCloser(baseReader),
|
||||
},
|
||||
}
|
||||
readData, readLen := readAllInParts(t, 1, reader)
|
||||
// This reports the bytes read and returned to the user, excluding the version
|
||||
// that is stripped off at the start.
|
||||
assert.Equal(t, len(inputData), readLen)
|
||||
assert.Equal(t, inputData, readData)
|
||||
}
|
||||
|
||||
type CorsoProgressUnitSuite struct {
|
||||
tester.Suite
|
||||
targetFilePath path.Path
|
||||
@ -2420,9 +2284,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
||||
encodeElements(inboxFileName1)[0],
|
||||
time.Time{},
|
||||
// Wrap with a backup reader so it gets the version injected.
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(inboxFileData1v2)))),
|
||||
io.NopCloser(bytes.NewReader(inboxFileData1v2))),
|
||||
}),
|
||||
}),
|
||||
virtualfs.NewStaticDirectory(
|
||||
@ -2582,9 +2444,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(inboxFileName1)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(inboxFileData1)))),
|
||||
io.NopCloser(bytes.NewReader(inboxFileData1))),
|
||||
}),
|
||||
}),
|
||||
virtualfs.NewStaticDirectory(
|
||||
@ -2596,9 +2456,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(contactsFileName1)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(contactsFileData1)))),
|
||||
io.NopCloser(bytes.NewReader(contactsFileData1))),
|
||||
}),
|
||||
}),
|
||||
})
|
||||
@ -2817,15 +2675,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName5)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData5)))),
|
||||
io.NopCloser(bytes.NewReader(fileData5))),
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName6)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData6)))),
|
||||
io.NopCloser(bytes.NewReader(fileData6))),
|
||||
})
|
||||
counters[folderID3] = count
|
||||
|
||||
@ -2835,15 +2689,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName3)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData3)))),
|
||||
io.NopCloser(bytes.NewReader(fileData3))),
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName4)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData4)))),
|
||||
io.NopCloser(bytes.NewReader(fileData4))),
|
||||
folder,
|
||||
})
|
||||
counters[folderID2] = count
|
||||
@ -2859,15 +2709,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName1)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData1)))),
|
||||
io.NopCloser(bytes.NewReader(fileData1))),
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName2)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData2)))),
|
||||
io.NopCloser(bytes.NewReader(fileData2))),
|
||||
folder,
|
||||
folder4,
|
||||
})
|
||||
@ -2879,15 +2725,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName7)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData7)))),
|
||||
io.NopCloser(bytes.NewReader(fileData7))),
|
||||
virtualfs.StreamingFileWithModTimeFromReader(
|
||||
encodeElements(fileName8)[0],
|
||||
time.Time{},
|
||||
newBackupStreamReader(
|
||||
serializationVersion,
|
||||
io.NopCloser(bytes.NewReader(fileData8)))),
|
||||
io.NopCloser(bytes.NewReader(fileData8))),
|
||||
})
|
||||
counters[folderID5] = count
|
||||
|
||||
|
||||
@ -18,6 +18,7 @@ import (
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
@ -36,8 +37,6 @@ const (
|
||||
// possibly corresponding to who is making the backup.
|
||||
corsoHost = "corso-host"
|
||||
corsoUser = "corso"
|
||||
|
||||
serializationVersion uint32 = 1
|
||||
)
|
||||
|
||||
// common manifest tags
|
||||
@ -447,7 +446,7 @@ func loadDirsAndItems(
|
||||
dir: dir,
|
||||
items: dirItems.items,
|
||||
counter: bcounter,
|
||||
expectedVersion: serializationVersion,
|
||||
expectedVersion: readers.DefaultSerializationVersion,
|
||||
}
|
||||
|
||||
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
|
||||
|
||||
@ -184,7 +184,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_FirstRun_NoChanges() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
k, err := openKopiaRepo(t, ctx)
|
||||
k, err := openLocalKopiaRepo(t, ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
w := &Wrapper{k}
|
||||
@ -204,7 +204,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_NoForce_Fails
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
k, err := openKopiaRepo(t, ctx)
|
||||
k, err := openLocalKopiaRepo(t, ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
w := &Wrapper{k}
|
||||
@ -241,7 +241,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_Force_Succeed
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
k, err := openKopiaRepo(t, ctx)
|
||||
k, err := openLocalKopiaRepo(t, ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
w := &Wrapper{k}
|
||||
@ -754,7 +754,7 @@ func (suite *KopiaIntegrationSuite) SetupTest() {
|
||||
t := suite.T()
|
||||
suite.ctx, suite.flush = tester.NewContext(t)
|
||||
|
||||
c, err := openKopiaRepo(t, suite.ctx)
|
||||
c, err := openLocalKopiaRepo(t, suite.ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.w = &Wrapper{c}
|
||||
@ -1245,7 +1245,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
k, err := openKopiaRepo(t, ctx)
|
||||
k, err := openLocalKopiaRepo(t, ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = k.Compression(ctx, "s2-default")
|
||||
@ -1268,7 +1268,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
||||
ctx,
|
||||
[]identity.Reasoner{r},
|
||||
nil,
|
||||
[]data.BackupCollection{dc1, dc2},
|
||||
[]data.BackupCollection{
|
||||
dataMock.NewVersionedBackupCollection(t, dc1),
|
||||
dataMock.NewVersionedBackupCollection(t, dc2),
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
true,
|
||||
@ -1556,7 +1559,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
//nolint:forbidigo
|
||||
suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls)
|
||||
|
||||
c, err := openKopiaRepo(t, suite.ctx)
|
||||
c, err := openLocalKopiaRepo(t, suite.ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.w = &Wrapper{c}
|
||||
@ -1577,12 +1580,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
})
|
||||
}
|
||||
|
||||
collections = append(collections, collection)
|
||||
collections = append(
|
||||
collections,
|
||||
dataMock.NewVersionedBackupCollection(t, collection))
|
||||
}
|
||||
|
||||
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
|
||||
|
||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
||||
// Other tests check basic things about deets so not doing that again here.
|
||||
stats, _, _, err := suite.w.ConsumeBackupCollections(
|
||||
suite.ctx,
|
||||
[]identity.Reasoner{r},
|
||||
nil,
|
||||
@ -1597,8 +1603,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
||||
require.Equal(t, stats.TotalDirectoryCount, expectedDirs)
|
||||
require.Equal(t, stats.IgnoredErrorCount, 0)
|
||||
require.False(t, stats.Incomplete)
|
||||
// 6 file and 2 folder entries.
|
||||
assert.Len(t, deets.Details().Entries, expectedFiles+2)
|
||||
|
||||
suite.snapshotID = manifest.ID(stats.SnapshotID)
|
||||
}
|
||||
@ -1629,7 +1633,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
excludePrefix bool
|
||||
expectedCachedItems int
|
||||
expectedUncachedItems int
|
||||
cols func() []data.BackupCollection
|
||||
cols func(t *testing.T) []data.BackupCollection
|
||||
backupIDCheck require.ValueAssertionFunc
|
||||
restoreCheck assert.ErrorAssertionFunc
|
||||
}{
|
||||
@ -1638,7 +1642,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
excludeItem: true,
|
||||
expectedCachedItems: len(suite.filesByPath) - 1,
|
||||
expectedUncachedItems: 0,
|
||||
cols: func() []data.BackupCollection {
|
||||
cols: func(t *testing.T) []data.BackupCollection {
|
||||
return nil
|
||||
},
|
||||
backupIDCheck: require.NotEmpty,
|
||||
@ -1650,7 +1654,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
excludePrefix: true,
|
||||
expectedCachedItems: len(suite.filesByPath) - 1,
|
||||
expectedUncachedItems: 0,
|
||||
cols: func() []data.BackupCollection {
|
||||
cols: func(t *testing.T) []data.BackupCollection {
|
||||
return nil
|
||||
},
|
||||
backupIDCheck: require.NotEmpty,
|
||||
@ -1661,7 +1665,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
// No snapshot should be made since there were no changes.
|
||||
expectedCachedItems: 0,
|
||||
expectedUncachedItems: 0,
|
||||
cols: func() []data.BackupCollection {
|
||||
cols: func(t *testing.T) []data.BackupCollection {
|
||||
return nil
|
||||
},
|
||||
// Backup doesn't run.
|
||||
@ -1671,7 +1675,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
name: "NoExcludeItemWithChanges",
|
||||
expectedCachedItems: len(suite.filesByPath),
|
||||
expectedUncachedItems: 1,
|
||||
cols: func() []data.BackupCollection {
|
||||
cols: func(t *testing.T) []data.BackupCollection {
|
||||
c := exchMock.NewCollection(
|
||||
suite.testPath1,
|
||||
suite.testPath1,
|
||||
@ -1679,7 +1683,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
c.ColState = data.NotMovedState
|
||||
c.PrevPath = suite.testPath1
|
||||
|
||||
return []data.BackupCollection{c}
|
||||
return []data.BackupCollection{
|
||||
dataMock.NewVersionedBackupCollection(t, c),
|
||||
}
|
||||
},
|
||||
backupIDCheck: require.NotEmpty,
|
||||
restoreCheck: assert.NoError,
|
||||
@ -1717,7 +1723,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
Manifest: man,
|
||||
Reasons: []identity.Reasoner{r},
|
||||
}),
|
||||
test.cols(),
|
||||
test.cols(t),
|
||||
excluded,
|
||||
nil,
|
||||
true,
|
||||
|
||||
@ -100,7 +100,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
||||
}
|
||||
|
||||
case path.GroupsService:
|
||||
colls, ssmb, canUsePreviousBackup, err = groups.ProduceBackupCollections(
|
||||
colls, ssmb, err = groups.ProduceBackupCollections(
|
||||
ctx,
|
||||
bpc,
|
||||
ctrl.AC,
|
||||
@ -111,6 +111,10 @@ func (ctrl *Controller) ProduceBackupCollections(
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
// canUsePreviousBacukp can be always returned true for groups as we
|
||||
// return a tombstone collection in case the metadata read fails
|
||||
canUsePreviousBackup = true
|
||||
|
||||
default:
|
||||
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
|
||||
}
|
||||
|
||||
@ -11,6 +11,9 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/data/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/service/exchange"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/m365/service/sharepoint"
|
||||
@ -458,9 +461,8 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||
for item := range collection.Items(ctx, fault.New(true)) {
|
||||
t.Log("File: " + item.ID())
|
||||
|
||||
bs, err := io.ReadAll(item.ToReader())
|
||||
_, err := io.ReadAll(item.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
t.Log(string(bs))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -575,3 +577,123 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint()
|
||||
assert.NotZero(t, status.Successes)
|
||||
t.Log(status.String())
|
||||
}
|
||||
|
||||
func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_InvalidMetadata() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
groupID = tconfig.M365GroupID(t)
|
||||
ctrl = newController(ctx, t, path.GroupsService)
|
||||
groupIDs = []string{groupID}
|
||||
)
|
||||
|
||||
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
sel := selectors.NewGroupsBackup(groupIDs)
|
||||
sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
|
||||
|
||||
sel.SetDiscreteOwnerIDName(id, name)
|
||||
|
||||
site, err := suite.connector.AC.Groups().GetRootSite(ctx, groupID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
pth, err := path.Build(
|
||||
suite.tenantID,
|
||||
groupID,
|
||||
path.GroupsService,
|
||||
path.LibrariesCategory,
|
||||
true,
|
||||
odConsts.SitesPathDir,
|
||||
ptr.Val(site.GetId()))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
mmc := []data.RestoreCollection{
|
||||
mock.Collection{
|
||||
Path: pth,
|
||||
ItemData: []data.Item{
|
||||
&mock.Item{
|
||||
ItemID: "previouspath",
|
||||
Reader: io.NopCloser(bytes.NewReader([]byte("invalid"))),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
bpc := inject.BackupProducerConfig{
|
||||
LastBackupVersion: version.NoBackup,
|
||||
Options: control.DefaultOptions(),
|
||||
ProtectedResource: inMock.NewProvider(id, name),
|
||||
Selector: sel.Selector,
|
||||
MetadataCollections: mmc,
|
||||
}
|
||||
|
||||
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
||||
ctx,
|
||||
bpc,
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
// No excludes yet as this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
// we don't know an exact count of drives this will produce,
|
||||
// but it should be more than one.
|
||||
assert.Greater(t, len(collections), 1)
|
||||
|
||||
p, err := path.BuildMetadata(
|
||||
suite.tenantID,
|
||||
groupID,
|
||||
path.GroupsService,
|
||||
path.LibrariesCategory,
|
||||
false)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
p, err = p.Append(false, odConsts.SitesPathDir)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
foundSitesMetadata := false
|
||||
foundRootTombstone := false
|
||||
|
||||
sp, err := path.BuildPrefix(
|
||||
suite.tenantID,
|
||||
groupID,
|
||||
path.GroupsService,
|
||||
path.LibrariesCategory)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId()))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
for _, coll := range collections {
|
||||
if coll.State() == data.DeletedState {
|
||||
if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() {
|
||||
foundRootTombstone = true
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
sitesMetadataCollection := coll.FullPath().String() == p.String()
|
||||
|
||||
for object := range coll.Items(ctx, fault.New(true)) {
|
||||
if object.ID() == "previouspath" && sitesMetadataCollection {
|
||||
foundSitesMetadata = true
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
_, err := buf.ReadFrom(object.ToReader())
|
||||
assert.NoError(t, err, "reading item", clues.ToCore(err))
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundSitesMetadata, "missing sites metadata")
|
||||
assert.True(t, foundRootTombstone, "missing root tombstone")
|
||||
|
||||
status := ctrl.Wait()
|
||||
assert.NotZero(t, status.Successes)
|
||||
t.Log(status.String())
|
||||
}
|
||||
|
||||
@ -584,15 +584,24 @@ func (oc *Collection) streamDriveItem(
|
||||
return progReader, nil
|
||||
})
|
||||
|
||||
// We wrap the reader with a lazy reader so that the progress bar is only
|
||||
// initialized if the file is read. Since we're not actually lazily reading
|
||||
// data just use the eager item implementation.
|
||||
oc.data <- data.NewUnindexedPrefetchedItem(
|
||||
storeItem, err := data.NewUnindexedPrefetchedItem(
|
||||
metaReader,
|
||||
metaFileName+metaSuffix,
|
||||
// Metadata file should always use the latest time as
|
||||
// permissions change does not update mod time.
|
||||
time.Now())
|
||||
if err != nil {
|
||||
errs.AddRecoverable(ctx, clues.Stack(err).
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We wrap the reader with a lazy reader so that the progress bar is only
|
||||
// initialized if the file is read. Since we're not actually lazily reading
|
||||
// data just use the eager item implementation.
|
||||
oc.data <- storeItem
|
||||
|
||||
// Item read successfully, add to collection
|
||||
if isFile {
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||
metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata"
|
||||
@ -256,7 +257,7 @@ func (suite *CollectionUnitSuite) TestCollection() {
|
||||
mt := readItem.(data.ItemModTime)
|
||||
assert.Equal(t, now, mt.ModTime())
|
||||
|
||||
readData, err := io.ReadAll(readItem.ToReader())
|
||||
rr, err := readers.NewVersionedRestoreReader(readItem.ToReader())
|
||||
test.expectErr(t, err)
|
||||
|
||||
if err != nil {
|
||||
@ -267,13 +268,25 @@ func (suite *CollectionUnitSuite) TestCollection() {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
assert.False(t, rr.Format().DelInFlight)
|
||||
|
||||
readData, err := io.ReadAll(rr)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, stubItemContent, readData)
|
||||
|
||||
readItemMeta := readItems[1]
|
||||
assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID())
|
||||
|
||||
rr, err = readers.NewVersionedRestoreReader(readItemMeta.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
assert.False(t, rr.Format().DelInFlight)
|
||||
|
||||
readMeta := metadata.Metadata{}
|
||||
err = json.NewDecoder(readItemMeta.ToReader()).Decode(&readMeta)
|
||||
err = json.NewDecoder(rr).Decode(&readMeta)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
metaTD.AssertMetadataEqual(t, stubMeta, readMeta)
|
||||
@ -485,12 +498,18 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime()
|
||||
|
||||
for _, i := range readItems {
|
||||
if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) {
|
||||
content, err := io.ReadAll(i.ToReader())
|
||||
rr, err := readers.NewVersionedRestoreReader(i.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
assert.False(t, rr.Format().DelInFlight)
|
||||
|
||||
content, err := io.ReadAll(rr)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content))
|
||||
|
||||
im, ok := i.(data.ItemModTime)
|
||||
require.Equal(t, ok, true, "modtime interface")
|
||||
require.True(t, ok, "modtime interface")
|
||||
require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time")
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,11 +135,6 @@ func deserializeMetadata(
|
||||
continue
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Successful decode.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is conservative, but report an error if either any of the items
|
||||
// for any of the deserialized maps have duplicate drive IDs or there's
|
||||
// some other problem deserializing things. This will cause the entire
|
||||
@ -147,7 +142,9 @@ func deserializeMetadata(
|
||||
// these cases. We can make the logic for deciding when to continue vs.
|
||||
// when to fail less strict in the future if needed.
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Stack(err).WithClues(ictx)
|
||||
errs.Fail(clues.Stack(err).WithClues(ictx))
|
||||
|
||||
return map[string]string{}, map[string]map[string]string{}, false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -230,16 +227,16 @@ func (c *Collections) Get(
|
||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, bool, error) {
|
||||
prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup)
|
||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
||||
|
||||
driveTombstones := map[string]struct{}{}
|
||||
|
||||
for driveID := range oldPrevPathsByDriveID {
|
||||
for driveID := range oldPathsByDriveID {
|
||||
driveTombstones[driveID] = struct{}{}
|
||||
}
|
||||
|
||||
@ -257,88 +254,76 @@ func (c *Collections) Get(
|
||||
}
|
||||
|
||||
var (
|
||||
driveIDToDeltaLink = map[string]string{}
|
||||
driveIDToPrevPaths = map[string]map[string]string{}
|
||||
numPrevItems = 0
|
||||
// Drive ID -> delta URL for drive
|
||||
deltaURLs = map[string]string{}
|
||||
// Drive ID -> folder ID -> folder path
|
||||
folderPaths = map[string]map[string]string{}
|
||||
numPrevItems = 0
|
||||
)
|
||||
|
||||
for _, d := range drives {
|
||||
var (
|
||||
driveID = ptr.Val(d.GetId())
|
||||
driveName = ptr.Val(d.GetName())
|
||||
ictx = clues.Add(
|
||||
ctx,
|
||||
"drive_id", driveID,
|
||||
"drive_name", clues.Hide(driveName))
|
||||
|
||||
excludedItemIDs = map[string]struct{}{}
|
||||
oldPrevPaths = oldPrevPathsByDriveID[driveID]
|
||||
prevDeltaLink = prevDriveIDToDelta[driveID]
|
||||
|
||||
// itemCollection is used to identify which collection a
|
||||
// file belongs to. This is useful to delete a file from the
|
||||
// collection it was previously in, in case it was moved to a
|
||||
// different collection within the same delta query
|
||||
// item ID -> item ID
|
||||
itemCollection = map[string]string{}
|
||||
driveID = ptr.Val(d.GetId())
|
||||
driveName = ptr.Val(d.GetName())
|
||||
prevDelta = prevDeltas[driveID]
|
||||
oldPaths = oldPathsByDriveID[driveID]
|
||||
numOldDelta = 0
|
||||
ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
||||
)
|
||||
|
||||
delete(driveTombstones, driveID)
|
||||
|
||||
if _, ok := driveIDToPrevPaths[driveID]; !ok {
|
||||
driveIDToPrevPaths[driveID] = map[string]string{}
|
||||
}
|
||||
|
||||
if _, ok := c.CollectionMap[driveID]; !ok {
|
||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||
}
|
||||
|
||||
if len(prevDelta) > 0 {
|
||||
numOldDelta++
|
||||
}
|
||||
|
||||
logger.Ctx(ictx).Infow(
|
||||
"previous metadata for drive",
|
||||
"num_paths_entries", len(oldPrevPaths))
|
||||
"num_paths_entries", len(oldPaths),
|
||||
"num_deltas_entries", numOldDelta)
|
||||
|
||||
items, du, err := c.handler.EnumerateDriveItemsDelta(
|
||||
delta, paths, excluded, err := collectItems(
|
||||
ictx,
|
||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()),
|
||||
driveID,
|
||||
prevDeltaLink)
|
||||
driveName,
|
||||
c.UpdateCollections,
|
||||
oldPaths,
|
||||
prevDelta,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Used for logging below.
|
||||
numDeltas := 0
|
||||
|
||||
// It's alright to have an empty folders map (i.e. no folders found) but not
|
||||
// an empty delta token. This is because when deserializing the metadata we
|
||||
// remove entries for which there is no corresponding delta token/folder. If
|
||||
// we leave empty delta tokens then we may end up setting the State field
|
||||
// for collections when not actually getting delta results.
|
||||
if len(du.URL) > 0 {
|
||||
driveIDToDeltaLink[driveID] = du.URL
|
||||
}
|
||||
|
||||
newPrevPaths, err := c.UpdateCollections(
|
||||
ctx,
|
||||
driveID,
|
||||
driveName,
|
||||
items,
|
||||
oldPrevPaths,
|
||||
itemCollection,
|
||||
excludedItemIDs,
|
||||
du.Reset,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, false, clues.Stack(err)
|
||||
if len(delta.URL) > 0 {
|
||||
deltaURLs[driveID] = delta.URL
|
||||
numDeltas++
|
||||
}
|
||||
|
||||
// Avoid the edge case where there's no paths but we do have a valid delta
|
||||
// token. We can accomplish this by adding an empty paths map for this
|
||||
// drive. If we don't have this then the next backup won't use the delta
|
||||
// token because it thinks the folder paths weren't persisted.
|
||||
driveIDToPrevPaths[driveID] = map[string]string{}
|
||||
maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths)
|
||||
folderPaths[driveID] = map[string]string{}
|
||||
maps.Copy(folderPaths[driveID], paths)
|
||||
|
||||
logger.Ctx(ictx).Infow(
|
||||
"persisted metadata for drive",
|
||||
"num_new_paths_entries", len(newPrevPaths),
|
||||
"delta_reset", du.Reset)
|
||||
"num_paths_entries", len(paths),
|
||||
"num_deltas_entries", numDeltas,
|
||||
"delta_reset", delta.Reset)
|
||||
|
||||
numDriveItems := c.NumItems - numPrevItems
|
||||
numPrevItems = c.NumItems
|
||||
@ -350,7 +335,7 @@ func (c *Collections) Get(
|
||||
err = c.addURLCacheToDriveCollections(
|
||||
ictx,
|
||||
driveID,
|
||||
prevDeltaLink,
|
||||
prevDelta,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@ -359,8 +344,8 @@ func (c *Collections) Get(
|
||||
|
||||
// For both cases we don't need to do set difference on folder map if the
|
||||
// delta token was valid because we should see all the changes.
|
||||
if !du.Reset {
|
||||
if len(excludedItemIDs) == 0 {
|
||||
if !delta.Reset {
|
||||
if len(excluded) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -369,7 +354,7 @@ func (c *Collections) Get(
|
||||
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||
}
|
||||
|
||||
ssmb.Add(p.String(), excludedItemIDs)
|
||||
ssmb.Add(p.String(), excluded)
|
||||
|
||||
continue
|
||||
}
|
||||
@ -384,11 +369,13 @@ func (c *Collections) Get(
|
||||
foundFolders[id] = struct{}{}
|
||||
}
|
||||
|
||||
for fldID, p := range oldPrevPaths {
|
||||
for fldID, p := range oldPaths {
|
||||
if _, ok := foundFolders[fldID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
delete(paths, fldID)
|
||||
|
||||
prevPath, err := path.FromDataLayerPath(p, false)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||
@ -456,14 +443,14 @@ func (c *Collections) Get(
|
||||
// empty/missing and default to a full backup.
|
||||
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
||||
|
||||
return collections, canUsePrevBackup, nil
|
||||
return collections, canUsePreviousBackup, nil
|
||||
}
|
||||
|
||||
md, err := graph.MakeMetadataCollection(
|
||||
pathPrefix,
|
||||
[]graph.MetadataCollectionEntry{
|
||||
graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths),
|
||||
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink),
|
||||
graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths),
|
||||
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs),
|
||||
},
|
||||
c.statusUpdater)
|
||||
|
||||
@ -476,7 +463,7 @@ func (c *Collections) Get(
|
||||
collections = append(collections, md)
|
||||
}
|
||||
|
||||
return collections, canUsePrevBackup, nil
|
||||
return collections, canUsePreviousBackup, nil
|
||||
}
|
||||
|
||||
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
||||
@ -490,7 +477,7 @@ func (c *Collections) addURLCacheToDriveCollections(
|
||||
driveID,
|
||||
prevDelta,
|
||||
urlCacheRefreshInterval,
|
||||
c.handler,
|
||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()),
|
||||
errs)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -546,21 +533,22 @@ func updateCollectionPaths(
|
||||
|
||||
func (c *Collections) handleDelete(
|
||||
itemID, driveID string,
|
||||
oldPrevPaths, currPrevPaths, newPrevPaths map[string]string,
|
||||
oldPaths, newPaths map[string]string,
|
||||
isFolder bool,
|
||||
excluded map[string]struct{},
|
||||
itemCollection map[string]map[string]string,
|
||||
invalidPrevDelta bool,
|
||||
) error {
|
||||
if !isFolder {
|
||||
// Try to remove the item from the Collection if an entry exists for this
|
||||
// item. This handles cases where an item was created and deleted during the
|
||||
// same delta query.
|
||||
if parentID, ok := currPrevPaths[itemID]; ok {
|
||||
if parentID, ok := itemCollection[driveID][itemID]; ok {
|
||||
if col := c.CollectionMap[driveID][parentID]; col != nil {
|
||||
col.Remove(itemID)
|
||||
}
|
||||
|
||||
delete(currPrevPaths, itemID)
|
||||
delete(itemCollection[driveID], itemID)
|
||||
}
|
||||
|
||||
// Don't need to add to exclude list if the delta is invalid since the
|
||||
@ -581,7 +569,7 @@ func (c *Collections) handleDelete(
|
||||
|
||||
var prevPath path.Path
|
||||
|
||||
prevPathStr, ok := oldPrevPaths[itemID]
|
||||
prevPathStr, ok := oldPaths[itemID]
|
||||
if ok {
|
||||
var err error
|
||||
|
||||
@ -598,7 +586,7 @@ func (c *Collections) handleDelete(
|
||||
// Nested folders also return deleted delta results so we don't have to
|
||||
// worry about doing a prefix search in the map to remove the subtree of
|
||||
// the deleted folder/package.
|
||||
delete(newPrevPaths, itemID)
|
||||
delete(newPaths, itemID)
|
||||
|
||||
if prevPath == nil || invalidPrevDelta {
|
||||
// It is possible that an item was created and deleted between two delta
|
||||
@ -688,29 +676,21 @@ func (c *Collections) getCollectionPath(
|
||||
|
||||
// UpdateCollections initializes and adds the provided drive items to Collections
|
||||
// A new collection is created for every drive folder (or package).
|
||||
// oldPrevPaths is the unchanged data that was loaded from the metadata file.
|
||||
// This map is not modified during the call.
|
||||
// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in
|
||||
// the returned results. Items are added to this collection throughout the call.
|
||||
// newPrevPaths, ie: the items added during this call, get returned as a map.
|
||||
// oldPaths is the unchanged data that was loaded from the metadata file.
|
||||
// newPaths starts as a copy of oldPaths and is updated as changes are found in
|
||||
// the returned results.
|
||||
func (c *Collections) UpdateCollections(
|
||||
ctx context.Context,
|
||||
driveID, driveName string,
|
||||
items []models.DriveItemable,
|
||||
oldPrevPaths map[string]string,
|
||||
currPrevPaths map[string]string,
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
itemCollection map[string]map[string]string,
|
||||
invalidPrevDelta bool,
|
||||
errs *fault.Bus,
|
||||
) (map[string]string, error) {
|
||||
var (
|
||||
el = errs.Local()
|
||||
newPrevPaths = map[string]string{}
|
||||
)
|
||||
|
||||
if !invalidPrevDelta {
|
||||
maps.Copy(newPrevPaths, oldPrevPaths)
|
||||
}
|
||||
) error {
|
||||
el := errs.Local()
|
||||
|
||||
for _, item := range items {
|
||||
if el.Failure() != nil {
|
||||
@ -720,12 +700,8 @@ func (c *Collections) UpdateCollections(
|
||||
var (
|
||||
itemID = ptr.Val(item.GetId())
|
||||
itemName = ptr.Val(item.GetName())
|
||||
ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName))
|
||||
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
||||
ictx = clues.Add(
|
||||
ctx,
|
||||
"item_id", itemID,
|
||||
"item_name", clues.Hide(itemName),
|
||||
"item_is_folder", isFolder)
|
||||
)
|
||||
|
||||
if item.GetMalware() != nil {
|
||||
@ -747,13 +723,13 @@ func (c *Collections) UpdateCollections(
|
||||
if err := c.handleDelete(
|
||||
itemID,
|
||||
driveID,
|
||||
oldPrevPaths,
|
||||
currPrevPaths,
|
||||
newPrevPaths,
|
||||
oldPaths,
|
||||
newPaths,
|
||||
isFolder,
|
||||
excluded,
|
||||
itemCollection,
|
||||
invalidPrevDelta); err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ictx)
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
|
||||
continue
|
||||
@ -779,13 +755,13 @@ func (c *Collections) UpdateCollections(
|
||||
// Deletions are handled above so this is just moves/renames.
|
||||
var prevPath path.Path
|
||||
|
||||
prevPathStr, ok := oldPrevPaths[itemID]
|
||||
prevPathStr, ok := oldPaths[itemID]
|
||||
if ok {
|
||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
|
||||
WithClues(ictx).
|
||||
With("prev_path_string", path.LoggableDir(prevPathStr)))
|
||||
With("path_string", prevPathStr))
|
||||
}
|
||||
} else if item.GetRoot() != nil {
|
||||
// Root doesn't move or get renamed.
|
||||
@ -795,11 +771,11 @@ func (c *Collections) UpdateCollections(
|
||||
// Moved folders don't cause delta results for any subfolders nested in
|
||||
// them. We need to go through and update paths to handle that. We only
|
||||
// update newPaths so we don't accidentally clobber previous deletes.
|
||||
updatePath(newPrevPaths, itemID, collectionPath.String())
|
||||
updatePath(newPaths, itemID, collectionPath.String())
|
||||
|
||||
found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ictx)
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
|
||||
if found {
|
||||
@ -822,7 +798,7 @@ func (c *Collections) UpdateCollections(
|
||||
invalidPrevDelta,
|
||||
nil)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ictx)
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
|
||||
col.driveName = driveName
|
||||
@ -844,38 +820,35 @@ func (c *Collections) UpdateCollections(
|
||||
case item.GetFile() != nil:
|
||||
// Deletions are handled above so this is just moves/renames.
|
||||
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
||||
return nil, clues.New("file without parent ID").WithClues(ictx)
|
||||
return clues.New("file without parent ID").WithClues(ictx)
|
||||
}
|
||||
|
||||
// Get the collection for this item.
|
||||
parentID := ptr.Val(item.GetParentReference().GetId())
|
||||
ictx = clues.Add(ictx, "parent_id", parentID)
|
||||
|
||||
collection, ok := c.CollectionMap[driveID][parentID]
|
||||
if !ok {
|
||||
return nil, clues.New("item seen before parent folder").WithClues(ictx)
|
||||
collection, found := c.CollectionMap[driveID][parentID]
|
||||
if !found {
|
||||
return clues.New("item seen before parent folder").WithClues(ictx)
|
||||
}
|
||||
|
||||
// This will only kick in if the file was moved multiple times
|
||||
// within a single delta query. We delete the file from the previous
|
||||
// collection so that it doesn't appear in two places.
|
||||
prevParentContainerID, ok := currPrevPaths[itemID]
|
||||
if ok {
|
||||
prevColl, found := c.CollectionMap[driveID][prevParentContainerID]
|
||||
// Delete the file from previous collection. This will
|
||||
// only kick in if the file was moved multiple times
|
||||
// within a single delta query
|
||||
icID, found := itemCollection[driveID][itemID]
|
||||
if found {
|
||||
pcollection, found := c.CollectionMap[driveID][icID]
|
||||
if !found {
|
||||
return nil, clues.New("previous collection not found").
|
||||
With("prev_parent_container_id", prevParentContainerID).
|
||||
WithClues(ictx)
|
||||
return clues.New("previous collection not found").WithClues(ictx)
|
||||
}
|
||||
|
||||
if ok := prevColl.Remove(itemID); !ok {
|
||||
return nil, clues.New("removing item from prev collection").
|
||||
With("prev_parent_container_id", prevParentContainerID).
|
||||
WithClues(ictx)
|
||||
removed := pcollection.Remove(itemID)
|
||||
if !removed {
|
||||
return clues.New("removing from prev collection").WithClues(ictx)
|
||||
}
|
||||
}
|
||||
|
||||
currPrevPaths[itemID] = parentID
|
||||
itemCollection[driveID][itemID] = parentID
|
||||
|
||||
if collection.Add(item) {
|
||||
c.NumItems++
|
||||
@ -896,13 +869,11 @@ func (c *Collections) UpdateCollections(
|
||||
}
|
||||
|
||||
default:
|
||||
el.AddRecoverable(ictx, clues.New("item is neither folder nor file").
|
||||
WithClues(ictx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
return clues.New("item type not supported").WithClues(ictx)
|
||||
}
|
||||
}
|
||||
|
||||
return newPrevPaths, el.Failure()
|
||||
return el.Failure()
|
||||
}
|
||||
|
||||
type dirScopeChecker interface {
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -16,6 +17,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||
@ -135,7 +137,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
testCase string
|
||||
items []models.DriveItemable
|
||||
inputFolderMap map[string]string
|
||||
scope selectors.OneDriveScope
|
||||
@ -145,11 +147,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedContainerCount int
|
||||
expectedFileCount int
|
||||
expectedSkippedCount int
|
||||
expectedPrevPaths map[string]string
|
||||
expectedMetadataPaths map[string]string
|
||||
expectedExcludes map[string]struct{}
|
||||
}{
|
||||
{
|
||||
name: "Invalid item",
|
||||
testCase: "Invalid item",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("item", "item", testBaseDrivePath, "root", false, false, false),
|
||||
@ -161,13 +163,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
"root": expectedStatePath(data.NotMovedState, ""),
|
||||
},
|
||||
expectedContainerCount: 1,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "Single File",
|
||||
testCase: "Single File",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", testBaseDrivePath, "root", true, false, false),
|
||||
@ -182,13 +184,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
// Root folder is skipped since it's always present.
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
name: "Single Folder",
|
||||
testCase: "Single Folder",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -200,7 +202,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
"root": expectedStatePath(data.NotMovedState, ""),
|
||||
"folder": expectedStatePath(data.NewState, folder),
|
||||
},
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
},
|
||||
@ -209,7 +211,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "Single Package",
|
||||
testCase: "Single Package",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("package", "package", testBaseDrivePath, "root", false, false, true),
|
||||
@ -221,7 +223,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
"root": expectedStatePath(data.NotMovedState, ""),
|
||||
"package": expectedStatePath(data.NewState, pkg),
|
||||
},
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"package": expectedPath("/package"),
|
||||
},
|
||||
@ -230,7 +232,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
||||
testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -250,7 +252,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 5,
|
||||
expectedFileCount: 3,
|
||||
expectedContainerCount: 3,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
"package": expectedPath("/package"),
|
||||
@ -258,7 +260,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"),
|
||||
},
|
||||
{
|
||||
name: "contains folder selector",
|
||||
testCase: "contains folder selector",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -283,7 +285,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedContainerCount: 3,
|
||||
// just "folder" isn't added here because the include check is done on the
|
||||
// parent path since we only check later if something is a folder or not.
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath(folderSub),
|
||||
"folder2": expectedPath(folderSub + folder),
|
||||
@ -291,7 +293,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("fileInFolder", "fileInFolder2"),
|
||||
},
|
||||
{
|
||||
name: "prefix subfolder selector",
|
||||
testCase: "prefix subfolder selector",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -314,14 +316,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 3,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"subfolder": expectedPath(folderSub),
|
||||
"folder2": expectedPath(folderSub + folder),
|
||||
},
|
||||
expectedExcludes: getDelList("fileInFolder2"),
|
||||
},
|
||||
{
|
||||
name: "match subfolder selector",
|
||||
testCase: "match subfolder selector",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -342,13 +344,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
// No child folders for subfolder so nothing here.
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"subfolder": expectedPath(folderSub),
|
||||
},
|
||||
expectedExcludes: getDelList("fileInSubfolder"),
|
||||
},
|
||||
{
|
||||
name: "not moved folder tree",
|
||||
testCase: "not moved folder tree",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -366,7 +368,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 2,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath(folderSub),
|
||||
@ -374,7 +376,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "moved folder tree",
|
||||
testCase: "moved folder tree",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -392,7 +394,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 2,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath(folderSub),
|
||||
@ -400,7 +402,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "moved folder tree with file no previous",
|
||||
testCase: "moved folder tree with file no previous",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -417,14 +419,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder2"),
|
||||
},
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
name: "moved folder tree with file no previous 1",
|
||||
testCase: "moved folder tree with file no previous 1",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -440,14 +442,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
},
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
name: "moved folder tree and subfolder 1",
|
||||
testCase: "moved folder tree and subfolder 1",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -467,7 +469,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 3,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath("/subfolder"),
|
||||
@ -475,7 +477,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "moved folder tree and subfolder 2",
|
||||
testCase: "moved folder tree and subfolder 2",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -495,7 +497,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 3,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath(folder),
|
||||
"subfolder": expectedPath("/subfolder"),
|
||||
@ -503,7 +505,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "move subfolder when moving parent",
|
||||
testCase: "move subfolder when moving parent",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false),
|
||||
@ -537,7 +539,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 5,
|
||||
expectedFileCount: 2,
|
||||
expectedContainerCount: 4,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
"folder2": expectedPath("/folder2"),
|
||||
@ -546,7 +548,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"),
|
||||
},
|
||||
{
|
||||
name: "moved folder tree multiple times",
|
||||
testCase: "moved folder tree multiple times",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -566,7 +568,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 2,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 2,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder2"),
|
||||
"subfolder": expectedPath("/folder2/subfolder"),
|
||||
@ -574,7 +576,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedExcludes: getDelList("file"),
|
||||
},
|
||||
{
|
||||
name: "deleted folder and package",
|
||||
testCase: "deleted folder and package",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"), // root is always present, but not necessary here
|
||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -595,13 +597,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 0,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 1,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "delete folder without previous",
|
||||
testCase: "delete folder without previous",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -617,13 +619,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 0,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 1,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "delete folder tree move subfolder",
|
||||
testCase: "delete folder tree move subfolder",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||
@ -644,14 +646,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 2,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"subfolder": expectedPath("/subfolder"),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "delete file",
|
||||
testCase: "delete file",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("item", testBaseDrivePath, "root", true, false, false),
|
||||
@ -667,13 +669,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: getDelList("item"),
|
||||
},
|
||||
{
|
||||
name: "item before parent errors",
|
||||
testCase: "item before parent errors",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false),
|
||||
@ -688,11 +690,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedItemCount: 0,
|
||||
expectedFileCount: 0,
|
||||
expectedContainerCount: 1,
|
||||
expectedPrevPaths: nil,
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
},
|
||||
expectedExcludes: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
||||
testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||
@ -713,7 +717,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
expectedFileCount: 2,
|
||||
expectedContainerCount: 3,
|
||||
expectedSkippedCount: 1,
|
||||
expectedPrevPaths: map[string]string{
|
||||
expectedMetadataPaths: map[string]string{
|
||||
"root": expectedPath(""),
|
||||
"folder": expectedPath("/folder"),
|
||||
"package": expectedPath("/package"),
|
||||
@ -722,23 +726,26 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
for _, tt := range tests {
|
||||
suite.Run(tt.testCase, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
excludes = map[string]struct{}{}
|
||||
currPrevPaths = map[string]string{}
|
||||
errs = fault.New(true)
|
||||
excludes = map[string]struct{}{}
|
||||
outputFolderMap = map[string]string{}
|
||||
itemCollection = map[string]map[string]string{
|
||||
driveID: {},
|
||||
}
|
||||
errs = fault.New(true)
|
||||
)
|
||||
|
||||
maps.Copy(currPrevPaths, test.inputFolderMap)
|
||||
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
||||
|
||||
c := NewCollections(
|
||||
&itemBackupHandler{api.Drives{}, user, test.scope},
|
||||
&itemBackupHandler{api.Drives{}, user, tt.scope},
|
||||
tenant,
|
||||
user,
|
||||
nil,
|
||||
@ -746,24 +753,25 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
|
||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||
|
||||
newPrevPaths, err := c.UpdateCollections(
|
||||
err := c.UpdateCollections(
|
||||
ctx,
|
||||
driveID,
|
||||
"General",
|
||||
test.items,
|
||||
test.inputFolderMap,
|
||||
currPrevPaths,
|
||||
tt.items,
|
||||
tt.inputFolderMap,
|
||||
outputFolderMap,
|
||||
excludes,
|
||||
itemCollection,
|
||||
false,
|
||||
errs)
|
||||
test.expect(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
||||
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
||||
assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count")
|
||||
assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count")
|
||||
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
||||
tt.expect(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
||||
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
|
||||
assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count")
|
||||
assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count")
|
||||
assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
||||
|
||||
for id, sp := range test.expectedCollectionIDs {
|
||||
for id, sp := range tt.expectedCollectionIDs {
|
||||
if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) {
|
||||
// Skip collections we don't find so we don't get an NPE.
|
||||
continue
|
||||
@ -774,8 +782,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id)
|
||||
}
|
||||
|
||||
assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths")
|
||||
assert.Equal(t, test.expectedExcludes, excludes, "exclude list")
|
||||
assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths")
|
||||
assert.Equal(t, tt.expectedExcludes, excludes, "exclude list")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -977,7 +985,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
{
|
||||
// Bad formats are logged but skip adding entries to the maps and don't
|
||||
// return an error.
|
||||
name: "BadFormat",
|
||||
name: "BadFormat",
|
||||
expectedDeltas: map[string]string{},
|
||||
expectedPaths: map[string]map[string]string{},
|
||||
cols: []func() []graph.MetadataCollectionEntry{
|
||||
func() []graph.MetadataCollectionEntry {
|
||||
return []graph.MetadataCollectionEntry{
|
||||
@ -988,7 +998,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: false,
|
||||
errCheck: assert.Error,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
// Unexpected files are logged and skipped. They don't cause an error to
|
||||
@ -1053,10 +1063,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
},
|
||||
},
|
||||
expectedDeltas: nil,
|
||||
expectedPaths: nil,
|
||||
expectedDeltas: map[string]string{},
|
||||
expectedPaths: map[string]map[string]string{},
|
||||
canUsePreviousBackup: false,
|
||||
errCheck: assert.Error,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "DriveAlreadyFound_Deltas",
|
||||
@ -1083,10 +1093,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
},
|
||||
},
|
||||
expectedDeltas: nil,
|
||||
expectedPaths: nil,
|
||||
expectedDeltas: map[string]string{},
|
||||
expectedPaths: map[string]map[string]string{},
|
||||
canUsePreviousBackup: false,
|
||||
errCheck: assert.Error,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1114,7 +1124,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
func(*support.ControllerOperationStatus) {})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cols = append(cols, data.NoFetchRestoreCollection{Collection: mc})
|
||||
cols = append(cols, dataMock.NewUnversionedRestoreCollection(
|
||||
t,
|
||||
data.NoFetchRestoreCollection{Collection: mc}))
|
||||
}
|
||||
|
||||
deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols)
|
||||
@ -1293,8 +1305,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1332,8 +1343,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1410,8 +1420,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &empty, // probably will never happen with graph
|
||||
ResetDelta: true,
|
||||
DeltaLink: &empty, // probably will never happen with graph
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1448,8 +1457,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
NextLink: &next,
|
||||
ResetDelta: true,
|
||||
NextLink: &next,
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
@ -1457,8 +1465,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1500,8 +1507,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
driveID2: {
|
||||
@ -1511,8 +1517,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false),
|
||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta2,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1564,8 +1569,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
driveID2: {
|
||||
@ -1575,8 +1579,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("folder", "folder", driveBasePath2, "root", false, true, false),
|
||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta2,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1634,6 +1637,87 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
expectedFolderPaths: nil,
|
||||
expectedDelList: nil,
|
||||
},
|
||||
{
|
||||
name: "OneDrive_OneItemPage_DeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||
},
|
||||
expectedDeltaURLs: map[string]string{
|
||||
driveID1: delta,
|
||||
},
|
||||
expectedFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
},
|
||||
},
|
||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||
doNotMergeItems: map[string]bool{
|
||||
rootFolderPath1: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OneDrive_TwoItemPage_DeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
NextLink: &next,
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||
expectedPath1("/folder"): {data.NewState: {"folder", "file2"}},
|
||||
},
|
||||
expectedDeltaURLs: map[string]string{
|
||||
driveID1: delta,
|
||||
},
|
||||
expectedFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
"folder": folderPath1,
|
||||
},
|
||||
},
|
||||
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||
doNotMergeItems: map[string]bool{
|
||||
rootFolderPath1: true,
|
||||
folderPath1: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OneDrive_TwoItemPage_NoDeltaError",
|
||||
drives: []models.Driveable{drive1},
|
||||
@ -1686,14 +1770,16 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1731,14 +1817,16 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("folder2", "folder", driveBasePath1, "root", false, true, false),
|
||||
driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1795,8 +1883,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1826,10 +1913,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
expectedSkippedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "One Drive Deleted Folder In New Results",
|
||||
name: "One Drive Delta Error Deleted Folder In New Results",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
@ -1846,8 +1936,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
delItem("folder2", driveBasePath1, "root", false, true, false),
|
||||
delItem("file2", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta2,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1882,17 +1971,19 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "One Drive Random Folder Delete",
|
||||
name: "One Drive Delta Error Random Folder Delete",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1923,17 +2014,19 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "One Drive Random Item Delete",
|
||||
name: "One Drive Delta Error Random Item Delete",
|
||||
drives: []models.Driveable{drive1},
|
||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID1: {
|
||||
{
|
||||
Err: getDeltaError(),
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1979,8 +2072,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta2,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta2,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2023,8 +2115,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveRootItem("root"),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2062,8 +2153,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveRootItem("root"),
|
||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2098,8 +2188,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
driveRootItem("root"),
|
||||
delItem("file", driveBasePath1, "root", true, false, false),
|
||||
},
|
||||
DeltaLink: &delta,
|
||||
ResetDelta: true,
|
||||
DeltaLink: &delta,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2181,7 +2270,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
mbh := mock.DefaultOneDriveBH("a-user")
|
||||
mbh.DrivePagerV = mockDrivePager
|
||||
mbh.ItemPagerV = itemPagers
|
||||
mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items)
|
||||
|
||||
c := NewCollections(
|
||||
mbh,
|
||||
@ -2211,7 +2299,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
func(*support.ControllerOperationStatus) {})
|
||||
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
|
||||
|
||||
prevMetadata := []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: mc}}
|
||||
prevMetadata := []data.RestoreCollection{
|
||||
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: mc}),
|
||||
}
|
||||
errs := fault.New(true)
|
||||
|
||||
delList := prefixmatcher.NewStringSetBuilder()
|
||||
@ -2238,7 +2328,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
deltas, paths, _, err := deserializeMetadata(
|
||||
ctx,
|
||||
[]data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: baseCol},
|
||||
dataMock.NewUnversionedRestoreCollection(
|
||||
t,
|
||||
data.NoFetchRestoreCollection{Collection: baseCol}),
|
||||
})
|
||||
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
||||
continue
|
||||
@ -2408,6 +2500,121 @@ func delItem(
|
||||
return item
|
||||
}
|
||||
|
||||
func getDeltaError() error {
|
||||
syncStateNotFound := "SyncStateNotFound"
|
||||
me := odataerrors.NewMainError()
|
||||
me.SetCode(&syncStateNotFound)
|
||||
|
||||
deltaError := odataerrors.NewODataError()
|
||||
deltaError.SetErrorEscaped(me)
|
||||
|
||||
return deltaError
|
||||
}
|
||||
|
||||
func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() {
|
||||
next := "next"
|
||||
delta := "delta"
|
||||
prevDelta := "prev-delta"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
items []apiMock.PagerResult[models.DriveItemable]
|
||||
deltaURL string
|
||||
prevDeltaSuccess bool
|
||||
prevDelta string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "delta on first run",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{DeltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: true,
|
||||
prevDelta: prevDelta,
|
||||
},
|
||||
{
|
||||
name: "empty prev delta",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{DeltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: false,
|
||||
prevDelta: "",
|
||||
},
|
||||
{
|
||||
name: "next then delta",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{NextLink: &next},
|
||||
{DeltaLink: &delta},
|
||||
},
|
||||
prevDeltaSuccess: true,
|
||||
prevDelta: prevDelta,
|
||||
},
|
||||
{
|
||||
name: "invalid prev delta",
|
||||
deltaURL: delta,
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{Err: getDeltaError()},
|
||||
{DeltaLink: &delta}, // works on retry
|
||||
},
|
||||
prevDelta: prevDelta,
|
||||
prevDeltaSuccess: false,
|
||||
},
|
||||
{
|
||||
name: "fail a normal delta query",
|
||||
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||
{NextLink: &next},
|
||||
{Err: assert.AnError},
|
||||
},
|
||||
prevDelta: prevDelta,
|
||||
prevDeltaSuccess: true,
|
||||
err: assert.AnError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
||||
ToReturn: test.items,
|
||||
}
|
||||
|
||||
collectorFunc := func(
|
||||
ctx context.Context,
|
||||
driveID, driveName string,
|
||||
driveItems []models.DriveItemable,
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
itemCollection map[string]map[string]string,
|
||||
doNotMergeItems bool,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
delta, _, _, err := collectItems(
|
||||
ctx,
|
||||
itemPager,
|
||||
"",
|
||||
"General",
|
||||
collectorFunc,
|
||||
map[string]string{},
|
||||
test.prevDelta,
|
||||
fault.New(true))
|
||||
|
||||
require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err))
|
||||
require.Equal(t, test.deltaURL, delta.URL, "delta url")
|
||||
require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
|
||||
driveID := "test-drive"
|
||||
collCount := 3
|
||||
|
||||
@ -36,7 +36,6 @@ type BackupHandler interface {
|
||||
GetItemPermissioner
|
||||
GetItemer
|
||||
NewDrivePagerer
|
||||
EnumerateDriveItemsDeltaer
|
||||
|
||||
// PathPrefix constructs the service and category specific path prefix for
|
||||
// the given values.
|
||||
@ -51,7 +50,7 @@ type BackupHandler interface {
|
||||
|
||||
// ServiceCat returns the service and category used by this implementation.
|
||||
ServiceCat() (path.ServiceType, path.CategoryType)
|
||||
|
||||
NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable]
|
||||
// FormatDisplayPath creates a human-readable string to represent the
|
||||
// provided path.
|
||||
FormatDisplayPath(driveName string, parentPath *path.Builder) string
|
||||
@ -80,17 +79,6 @@ type GetItemer interface {
|
||||
) (models.DriveItemable, error)
|
||||
}
|
||||
|
||||
type EnumerateDriveItemsDeltaer interface {
|
||||
EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) (
|
||||
[]models.DriveItemable,
|
||||
api.DeltaUpdate,
|
||||
error,
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
142
src/internal/m365/collection/drive/item_collector.go
Normal file
142
src/internal/m365/collection/drive/item_collector.go
Normal file
@ -0,0 +1,142 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
// a delta-queryable folder.
|
||||
// FIXME: This is same as exchange.api.DeltaUpdate
|
||||
type DeltaUpdate struct {
|
||||
// the deltaLink itself
|
||||
URL string
|
||||
// true if the old delta was marked as invalid
|
||||
Reset bool
|
||||
}
|
||||
|
||||
// itemCollector functions collect the items found in a drive
|
||||
type itemCollector func(
|
||||
ctx context.Context,
|
||||
driveID, driveName string,
|
||||
driveItems []models.DriveItemable,
|
||||
oldPaths map[string]string,
|
||||
newPaths map[string]string,
|
||||
excluded map[string]struct{},
|
||||
itemCollections map[string]map[string]string,
|
||||
validPrevDelta bool,
|
||||
errs *fault.Bus,
|
||||
) error
|
||||
|
||||
// collectItems will enumerate all items in the specified drive and hand them to the
|
||||
// provided `collector` method
|
||||
func collectItems(
|
||||
ctx context.Context,
|
||||
pager api.DeltaPager[models.DriveItemable],
|
||||
driveID, driveName string,
|
||||
collector itemCollector,
|
||||
oldPaths map[string]string,
|
||||
prevDelta string,
|
||||
errs *fault.Bus,
|
||||
) (
|
||||
DeltaUpdate,
|
||||
map[string]string, // newPaths
|
||||
map[string]struct{}, // excluded
|
||||
error,
|
||||
) {
|
||||
var (
|
||||
newDeltaURL = ""
|
||||
newPaths = map[string]string{}
|
||||
excluded = map[string]struct{}{}
|
||||
invalidPrevDelta = len(prevDelta) == 0
|
||||
|
||||
// itemCollection is used to identify which collection a
|
||||
// file belongs to. This is useful to delete a file from the
|
||||
// collection it was previously in, in case it was moved to a
|
||||
// different collection within the same delta query
|
||||
// drive ID -> item ID -> item ID
|
||||
itemCollection = map[string]map[string]string{
|
||||
driveID: {},
|
||||
}
|
||||
)
|
||||
|
||||
if !invalidPrevDelta {
|
||||
maps.Copy(newPaths, oldPaths)
|
||||
pager.SetNextLink(prevDelta)
|
||||
}
|
||||
|
||||
for {
|
||||
// assume delta urls here, which allows single-token consumption
|
||||
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC))
|
||||
|
||||
if graph.IsErrInvalidDelta(err) {
|
||||
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
||||
|
||||
invalidPrevDelta = true
|
||||
newPaths = map[string]string{}
|
||||
|
||||
pager.Reset(ctx)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page")
|
||||
}
|
||||
|
||||
vals := page.GetValue()
|
||||
|
||||
err = collector(
|
||||
ctx,
|
||||
driveID,
|
||||
driveName,
|
||||
vals,
|
||||
oldPaths,
|
||||
newPaths,
|
||||
excluded,
|
||||
itemCollection,
|
||||
invalidPrevDelta,
|
||||
errs)
|
||||
if err != nil {
|
||||
return DeltaUpdate{}, nil, nil, err
|
||||
}
|
||||
|
||||
nextLink, deltaLink := api.NextAndDeltaLink(page)
|
||||
|
||||
if len(deltaLink) > 0 {
|
||||
newDeltaURL = deltaLink
|
||||
}
|
||||
|
||||
// Check if there are more items
|
||||
if len(nextLink) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink)
|
||||
pager.SetNextLink(nextLink)
|
||||
}
|
||||
|
||||
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
|
||||
}
|
||||
|
||||
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
|
||||
func newItem(name string, folder bool) *models.DriveItem {
|
||||
itemToCreate := models.NewDriveItem()
|
||||
itemToCreate.SetName(&name)
|
||||
|
||||
if folder {
|
||||
itemToCreate.SetFolder(models.NewFolder())
|
||||
} else {
|
||||
itemToCreate.SetFile(models.NewFile())
|
||||
}
|
||||
|
||||
return itemToCreate
|
||||
}
|
||||
@ -87,6 +87,13 @@ func (h itemBackupHandler) NewDrivePager(
|
||||
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) NewItemPager(
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) api.DeltaPager[models.DriveItemable] {
|
||||
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
@ -132,13 +139,6 @@ func (h itemBackupHandler) IncludesDir(dir string) bool {
|
||||
return h.scope.Matches(selectors.OneDriveFolder, dir)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -20,6 +20,8 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
@ -58,6 +60,83 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
||||
suite.userDriveID = ptr.Val(odDrives[0].GetId())
|
||||
}
|
||||
|
||||
// TestItemReader is an integration test that makes a few assumptions
|
||||
// about the test environment
|
||||
// 1) It assumes the test user has a drive
|
||||
// 2) It assumes the drive has a file it can use to test `driveItemReader`
|
||||
// The test checks these in below
|
||||
func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
var driveItem models.DriveItemable
|
||||
// This item collector tries to find "a" drive item that is a non-empty
|
||||
// file to test the reader function
|
||||
itemCollector := func(
|
||||
_ context.Context,
|
||||
_, _ string,
|
||||
items []models.DriveItemable,
|
||||
_ map[string]string,
|
||||
_ map[string]string,
|
||||
_ map[string]struct{},
|
||||
_ map[string]map[string]string,
|
||||
_ bool,
|
||||
_ *fault.Bus,
|
||||
) error {
|
||||
if driveItem != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 {
|
||||
driveItem = item
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
ip := suite.service.ac.
|
||||
Drives().
|
||||
NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault())
|
||||
|
||||
_, _, _, err := collectItems(
|
||||
ctx,
|
||||
ip,
|
||||
suite.userDriveID,
|
||||
"General",
|
||||
itemCollector,
|
||||
map[string]string{},
|
||||
"",
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// Test Requirement 2: Need a file
|
||||
require.NotEmpty(
|
||||
t,
|
||||
driveItem,
|
||||
"no file item found for user %s drive %s",
|
||||
suite.user,
|
||||
suite.userDriveID)
|
||||
|
||||
bh := itemBackupHandler{
|
||||
suite.service.ac.Drives(),
|
||||
suite.user,
|
||||
(&selectors.OneDriveBackup{}).Folders(selectors.Any())[0],
|
||||
}
|
||||
|
||||
// Read data for the file
|
||||
itemData, err := downloadItem(ctx, bh, driveItem)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
size, err := io.Copy(io.Discard, itemData)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotZero(t, size)
|
||||
}
|
||||
|
||||
// TestItemWriter is an integration test for uploading data to OneDrive
|
||||
// It creates a new folder with a new item and writes data to it
|
||||
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
@ -92,7 +171,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
ctx,
|
||||
test.driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
api.NewDriveItem(newFolderName, true),
|
||||
newItem(newFolderName, true),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, newFolder.GetId())
|
||||
@ -104,7 +183,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
ctx,
|
||||
test.driveID,
|
||||
ptr.Val(newFolder.GetId()),
|
||||
api.NewDriveItem(newItemName, false),
|
||||
newItem(newItemName, false),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, newItem.GetId())
|
||||
@ -238,7 +317,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "success",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := api.NewDriveItem("test", false)
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -257,7 +336,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "success, content url set instead of download url",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := api.NewDriveItem("test", false)
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@content.downloadUrl": url,
|
||||
})
|
||||
@ -276,7 +355,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "api getter returns error",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := api.NewDriveItem("test", false)
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -292,7 +371,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "download url is empty",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := api.NewDriveItem("test", false)
|
||||
di := newItem("test", false)
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
@ -307,7 +386,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "malware",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := api.NewDriveItem("test", false)
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -329,7 +408,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
{
|
||||
name: "non-2xx http response",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := api.NewDriveItem("test", false)
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
@ -378,7 +457,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead
|
||||
url = "https://example.com"
|
||||
|
||||
itemFunc = func() models.DriveItemable {
|
||||
di := api.NewDriveItem("test", false)
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]any{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
|
||||
@ -92,6 +92,13 @@ func (h libraryBackupHandler) NewDrivePager(
|
||||
return h.ac.NewSiteDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) NewItemPager(
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) api.DeltaPager[models.DriveItemable] {
|
||||
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
@ -170,13 +177,6 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
|
||||
return h.scope.Matches(selectors.SharePointLibraryFolder, dir)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -671,7 +671,7 @@ func createFolder(
|
||||
ctx,
|
||||
driveID,
|
||||
parentFolderID,
|
||||
api.NewDriveItem(folderName, true),
|
||||
newItem(folderName, true),
|
||||
control.Replace)
|
||||
|
||||
// ErrItemAlreadyExistsConflict can only occur for folders if the
|
||||
@ -692,7 +692,7 @@ func createFolder(
|
||||
ctx,
|
||||
driveID,
|
||||
parentFolderID,
|
||||
api.NewDriveItem(folderName, true),
|
||||
newItem(folderName, true),
|
||||
control.Copy)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating folder")
|
||||
@ -733,7 +733,7 @@ func restoreFile(
|
||||
}
|
||||
|
||||
var (
|
||||
item = api.NewDriveItem(name, false)
|
||||
item = newItem(name, false)
|
||||
collisionKey = api.DriveItemCollisionKey(item)
|
||||
collision api.DriveItemIDType
|
||||
shouldDeleteOriginal bool
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -46,7 +47,7 @@ type urlCache struct {
|
||||
refreshMu sync.Mutex
|
||||
deltaQueryCount int
|
||||
|
||||
edid EnumerateDriveItemsDeltaer
|
||||
itemPager api.DeltaPager[models.DriveItemable]
|
||||
|
||||
errs *fault.Bus
|
||||
}
|
||||
@ -55,10 +56,13 @@ type urlCache struct {
|
||||
func newURLCache(
|
||||
driveID, prevDelta string,
|
||||
refreshInterval time.Duration,
|
||||
edid EnumerateDriveItemsDeltaer,
|
||||
itemPager api.DeltaPager[models.DriveItemable],
|
||||
errs *fault.Bus,
|
||||
) (*urlCache, error) {
|
||||
err := validateCacheParams(driveID, refreshInterval, edid)
|
||||
err := validateCacheParams(
|
||||
driveID,
|
||||
refreshInterval,
|
||||
itemPager)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "cache params")
|
||||
}
|
||||
@ -67,9 +71,9 @@ func newURLCache(
|
||||
idToProps: make(map[string]itemProps),
|
||||
lastRefreshTime: time.Time{},
|
||||
driveID: driveID,
|
||||
edid: edid,
|
||||
prevDelta: prevDelta,
|
||||
refreshInterval: refreshInterval,
|
||||
itemPager: itemPager,
|
||||
errs: errs,
|
||||
},
|
||||
nil
|
||||
@ -79,7 +83,7 @@ func newURLCache(
|
||||
func validateCacheParams(
|
||||
driveID string,
|
||||
refreshInterval time.Duration,
|
||||
edid EnumerateDriveItemsDeltaer,
|
||||
itemPager api.DeltaPager[models.DriveItemable],
|
||||
) error {
|
||||
if len(driveID) == 0 {
|
||||
return clues.New("drive id is empty")
|
||||
@ -89,8 +93,8 @@ func validateCacheParams(
|
||||
return clues.New("invalid refresh interval")
|
||||
}
|
||||
|
||||
if edid == nil {
|
||||
return clues.New("nil item enumerator")
|
||||
if itemPager == nil {
|
||||
return clues.New("nil item pager")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -156,23 +160,44 @@ func (uc *urlCache) refreshCache(
|
||||
// Issue a delta query to graph
|
||||
logger.Ctx(ctx).Info("refreshing url cache")
|
||||
|
||||
items, du, err := uc.edid.EnumerateDriveItemsDelta(ctx, uc.driveID, uc.prevDelta)
|
||||
err := uc.deltaQuery(ctx)
|
||||
if err != nil {
|
||||
// clear cache
|
||||
uc.idToProps = make(map[string]itemProps)
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
uc.deltaQueryCount++
|
||||
|
||||
if err := uc.updateCache(ctx, items, uc.errs); err != nil {
|
||||
return clues.Stack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Info("url cache refreshed")
|
||||
|
||||
// Update last refresh time
|
||||
uc.lastRefreshTime = time.Now()
|
||||
uc.prevDelta = du.URL
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deltaQuery performs a delta query on the drive and update the cache
|
||||
func (uc *urlCache) deltaQuery(
|
||||
ctx context.Context,
|
||||
) error {
|
||||
logger.Ctx(ctx).Debug("starting delta query")
|
||||
// Reset item pager to remove any previous state
|
||||
uc.itemPager.Reset(ctx)
|
||||
|
||||
_, _, _, err := collectItems(
|
||||
ctx,
|
||||
uc.itemPager,
|
||||
uc.driveID,
|
||||
"",
|
||||
uc.updateCache,
|
||||
map[string]string{},
|
||||
uc.prevDelta,
|
||||
uc.errs)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "delta query")
|
||||
}
|
||||
|
||||
uc.deltaQueryCount++
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -199,7 +224,13 @@ func (uc *urlCache) readCache(
|
||||
// It assumes that cacheMu is held by caller in write mode
|
||||
func (uc *urlCache) updateCache(
|
||||
ctx context.Context,
|
||||
_, _ string,
|
||||
items []models.DriveItemable,
|
||||
_ map[string]string,
|
||||
_ map[string]string,
|
||||
_ map[string]struct{},
|
||||
_ map[string]map[string]string,
|
||||
_ bool,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
el := errs.Local()
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
@ -17,19 +18,15 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// integration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type URLCacheIntegrationSuite struct {
|
||||
tester.Suite
|
||||
ac api.Client
|
||||
@ -71,10 +68,11 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
||||
// url cache
|
||||
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
var (
|
||||
t = suite.T()
|
||||
ac = suite.ac.Drives()
|
||||
driveID = suite.driveID
|
||||
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
||||
t = suite.T()
|
||||
ac = suite.ac.Drives()
|
||||
driveID = suite.driveID
|
||||
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
||||
driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault())
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
@ -84,11 +82,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
root, err := ac.GetRootFolder(ctx, driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
newFolder, err := ac.PostItemInContainer(
|
||||
newFolder, err := ac.Drives().PostItemInContainer(
|
||||
ctx,
|
||||
driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
api.NewDriveItem(newFolderName, true),
|
||||
newItem(newFolderName, true),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -96,10 +94,33 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
|
||||
nfid := ptr.Val(newFolder.GetId())
|
||||
|
||||
collectorFunc := func(
|
||||
context.Context,
|
||||
string,
|
||||
string,
|
||||
[]models.DriveItemable,
|
||||
map[string]string,
|
||||
map[string]string,
|
||||
map[string]struct{},
|
||||
map[string]map[string]string,
|
||||
bool,
|
||||
*fault.Bus,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the previous delta to feed into url cache
|
||||
_, du, err := ac.EnumerateDriveItemsDelta(ctx, suite.driveID, "")
|
||||
prevDelta, _, _, err := collectItems(
|
||||
ctx,
|
||||
suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()),
|
||||
suite.driveID,
|
||||
"drive-name",
|
||||
collectorFunc,
|
||||
map[string]string{},
|
||||
"",
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotEmpty(t, du.URL)
|
||||
require.NotNil(t, prevDelta.URL)
|
||||
|
||||
// Create a bunch of files in the new folder
|
||||
var items []models.DriveItemable
|
||||
@ -107,11 +128,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
for i := 0; i < 5; i++ {
|
||||
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
|
||||
item, err := ac.PostItemInContainer(
|
||||
item, err := ac.Drives().PostItemInContainer(
|
||||
ctx,
|
||||
driveID,
|
||||
nfid,
|
||||
api.NewDriveItem(newItemName, false),
|
||||
newItem(newItemName, false),
|
||||
control.Copy)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -121,9 +142,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
// Create a new URL cache with a long TTL
|
||||
uc, err := newURLCache(
|
||||
suite.driveID,
|
||||
du.URL,
|
||||
prevDelta.URL,
|
||||
1*time.Hour,
|
||||
suite.ac.Drives(),
|
||||
driveItemPager,
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -174,10 +195,6 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
require.Equal(t, 1, uc.deltaQueryCount)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// unit
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type URLCacheUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
@ -188,20 +205,27 @@ func TestURLCacheUnitSuite(t *testing.T) {
|
||||
|
||||
func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
deltaString := "delta"
|
||||
next := "next"
|
||||
driveID := "drive1"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
pagerItems map[string][]models.DriveItemable
|
||||
pagerErr map[string]error
|
||||
pagerResult map[string][]apiMock.PagerResult[models.DriveItemable]
|
||||
expectedItemProps map[string]itemProps
|
||||
expectedErr require.ErrorAssertionFunc
|
||||
cacheAssert func(*urlCache, time.Time)
|
||||
}{
|
||||
{
|
||||
name: "single item in cache",
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
"1": {
|
||||
@ -218,13 +242,18 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "multiple items in cache",
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
||||
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
||||
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -258,13 +287,18 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "duplicate items with potentially new urls",
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -290,11 +324,16 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "deleted items",
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -316,8 +355,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
{
|
||||
name: "item not found in cache",
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
"2": {},
|
||||
@ -330,10 +376,23 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delta query error",
|
||||
pagerItems: map[string][]models.DriveItemable{},
|
||||
pagerErr: map[string]error{
|
||||
driveID: errors.New("delta query error"),
|
||||
name: "multi-page delta query error",
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
},
|
||||
NextLink: &next,
|
||||
},
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
Err: errors.New("delta query error"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
"1": {},
|
||||
@ -349,10 +408,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
|
||||
{
|
||||
name: "folder item",
|
||||
pagerItems: map[string][]models.DriveItemable{
|
||||
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||
driveID: {
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
driveItem("2", "folder2", "root", "root", false, true, false),
|
||||
{
|
||||
Values: []models.DriveItemable{
|
||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||
driveItem("2", "folder2", "root", "root", false, true, false),
|
||||
},
|
||||
DeltaLink: &deltaString,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedItemProps: map[string]itemProps{
|
||||
@ -373,17 +437,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
medi := mock.EnumeratesDriveItemsDelta{
|
||||
Items: test.pagerItems,
|
||||
Err: test.pagerErr,
|
||||
DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}},
|
||||
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
||||
ToReturn: test.pagerResult[driveID],
|
||||
}
|
||||
|
||||
cache, err := newURLCache(
|
||||
driveID,
|
||||
"",
|
||||
1*time.Hour,
|
||||
&medi,
|
||||
itemPager,
|
||||
fault.New(true))
|
||||
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
@ -418,17 +480,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||
|
||||
// Test needsRefresh
|
||||
func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
||||
var (
|
||||
t = suite.T()
|
||||
driveID = "drive1"
|
||||
refreshInterval = 1 * time.Second
|
||||
)
|
||||
driveID := "drive1"
|
||||
t := suite.T()
|
||||
refreshInterval := 1 * time.Second
|
||||
|
||||
cache, err := newURLCache(
|
||||
driveID,
|
||||
"",
|
||||
refreshInterval,
|
||||
&mock.EnumeratesDriveItemsDelta{},
|
||||
&apiMock.DeltaPager[models.DriveItemable]{},
|
||||
fault.New(true))
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -450,12 +510,14 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
||||
require.False(t, cache.needsRefresh())
|
||||
}
|
||||
|
||||
// Test newURLCache
|
||||
func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
// table driven tests
|
||||
table := []struct {
|
||||
name string
|
||||
driveID string
|
||||
refreshInt time.Duration
|
||||
itemPager EnumerateDriveItemsDeltaer
|
||||
itemPager api.DeltaPager[models.DriveItemable]
|
||||
errors *fault.Bus
|
||||
expectedErr require.ErrorAssertionFunc
|
||||
}{
|
||||
@ -463,7 +525,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
name: "invalid driveID",
|
||||
driveID: "",
|
||||
refreshInt: 1 * time.Hour,
|
||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||
errors: fault.New(true),
|
||||
expectedErr: require.Error,
|
||||
},
|
||||
@ -471,12 +533,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
name: "invalid refresh interval",
|
||||
driveID: "drive1",
|
||||
refreshInt: 100 * time.Millisecond,
|
||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||
errors: fault.New(true),
|
||||
expectedErr: require.Error,
|
||||
},
|
||||
{
|
||||
name: "invalid item enumerator",
|
||||
name: "invalid itemPager",
|
||||
driveID: "drive1",
|
||||
refreshInt: 1 * time.Hour,
|
||||
itemPager: nil,
|
||||
@ -487,7 +549,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||
name: "valid",
|
||||
driveID: "drive1",
|
||||
refreshInt: 1 * time.Hour,
|
||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
||||
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||
errors: fault.New(true),
|
||||
expectedErr: require.NoError,
|
||||
},
|
||||
|
||||
@ -15,7 +15,9 @@ import (
|
||||
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
@ -322,7 +324,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: coll},
|
||||
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: coll}),
|
||||
})
|
||||
test.expectError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -591,7 +593,7 @@ func (suite *BackupIntgSuite) TestDelta() {
|
||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||
|
||||
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: metadata},
|
||||
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: metadata}),
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
@ -666,7 +668,12 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() {
|
||||
for stream := range streamChannel {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
read, err := buf.ReadFrom(stream.ToReader())
|
||||
rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
|
||||
read, err := buf.ReadFrom(rr)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
@ -744,7 +751,13 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() {
|
||||
|
||||
for stream := range edc.Items(ctx, fault.New(true)) {
|
||||
buf := &bytes.Buffer{}
|
||||
read, err := buf.ReadFrom(stream.ToReader())
|
||||
|
||||
rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
|
||||
read, err := buf.ReadFrom(rr)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
@ -878,7 +891,12 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() {
|
||||
for item := range edc.Items(ctx, fault.New(true)) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
read, err := buf.ReadFrom(item.ToReader())
|
||||
rr, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
|
||||
read, err := buf.ReadFrom(rr)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotZero(t, read)
|
||||
|
||||
@ -1198,7 +1216,9 @@ func checkMetadata(
|
||||
) {
|
||||
catPaths, _, err := ParseMetadataCollections(
|
||||
ctx,
|
||||
[]data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}})
|
||||
[]data.RestoreCollection{
|
||||
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: c}),
|
||||
})
|
||||
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
|
||||
return
|
||||
}
|
||||
|
||||
@ -278,10 +278,21 @@ func (col *prefetchCollection) streamItems(
|
||||
return
|
||||
}
|
||||
|
||||
stream <- data.NewPrefetchedItem(
|
||||
item, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(itemData)),
|
||||
id,
|
||||
details.ItemInfo{Exchange: info})
|
||||
if err != nil {
|
||||
el.AddRecoverable(
|
||||
ctx,
|
||||
clues.Stack(err).
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
stream <- item
|
||||
|
||||
atomic.AddInt64(&success, 1)
|
||||
atomic.AddInt64(&totalBytes, info.Size)
|
||||
|
||||
@ -17,6 +17,7 @@ import (
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
@ -55,13 +56,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ed := data.NewPrefetchedItem(
|
||||
ed, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(test.readData)),
|
||||
"itemID",
|
||||
details.ItemInfo{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||
assert.False(t, r.Format().DelInFlight)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
_, err := buf.ReadFrom(ed.ToReader())
|
||||
_, err = buf.ReadFrom(r)
|
||||
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
||||
assert.Equal(t, test.readData, buf.Bytes(), "read data")
|
||||
assert.Equal(t, "itemID", ed.ID(), "item ID")
|
||||
@ -493,11 +501,11 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() {
|
||||
time.Now(),
|
||||
fault.New(true))
|
||||
|
||||
_, err := li.(data.ItemInfo).Info()
|
||||
_, err := li.Info()
|
||||
assert.Error(suite.T(), err, "Info without reading data should error")
|
||||
}
|
||||
|
||||
func (suite *CollectionUnitSuite) TestLazyItem() {
|
||||
func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() {
|
||||
var (
|
||||
parentPath = "inbox/private/silly cats"
|
||||
now = time.Now()
|
||||
@ -505,44 +513,19 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
getErr error
|
||||
serializeErr error
|
||||
expectModTime time.Time
|
||||
expectReadErrType error
|
||||
dataCheck assert.ValueAssertionFunc
|
||||
expectInfoErr bool
|
||||
expectInfoErrType error
|
||||
}{
|
||||
{
|
||||
name: "ReturnsEmptyReaderOnDeletedInFlight",
|
||||
modTime: now,
|
||||
getErr: graph.ErrDeletedInFlight,
|
||||
dataCheck: assert.Empty,
|
||||
expectInfoErr: true,
|
||||
expectInfoErrType: data.ErrNotFound,
|
||||
},
|
||||
{
|
||||
name: "ReturnsValidReaderAndInfo",
|
||||
modTime: now,
|
||||
dataCheck: assert.NotEmpty,
|
||||
expectModTime: now,
|
||||
},
|
||||
{
|
||||
name: "ReturnsErrorOnGenericGetError",
|
||||
modTime: now,
|
||||
getErr: assert.AnError,
|
||||
expectReadErrType: assert.AnError,
|
||||
dataCheck: assert.Empty,
|
||||
expectInfoErr: true,
|
||||
},
|
||||
{
|
||||
name: "ReturnsErrorOnGenericSerializeError",
|
||||
modTime: now,
|
||||
serializeErr: assert.AnError,
|
||||
expectReadErrType: assert.AnError,
|
||||
dataCheck: assert.Empty,
|
||||
expectInfoErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -575,47 +558,128 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
|
||||
userID: "userID",
|
||||
itemID: "itemID",
|
||||
getter: getter,
|
||||
modTime: test.modTime,
|
||||
modTime: now,
|
||||
immutableIDs: false,
|
||||
parentPath: parentPath,
|
||||
},
|
||||
"itemID",
|
||||
test.modTime,
|
||||
now,
|
||||
fault.New(true))
|
||||
|
||||
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||
assert.Equal(
|
||||
t,
|
||||
test.modTime,
|
||||
li.(data.ItemModTime).ModTime(),
|
||||
"item mod time")
|
||||
assert.Equal(t, now, li.ModTime(), "item mod time")
|
||||
|
||||
readData, err := io.ReadAll(li.ToReader())
|
||||
if test.expectReadErrType == nil {
|
||||
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
|
||||
} else {
|
||||
assert.ErrorIs(t, err, test.expectReadErrType, "read error")
|
||||
}
|
||||
|
||||
test.dataCheck(t, readData, "read item data")
|
||||
|
||||
info, err := li.(data.ItemInfo).Info()
|
||||
|
||||
// Didn't expect an error getting info, it should be valid.
|
||||
if !test.expectInfoErr {
|
||||
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
|
||||
assert.Equal(t, parentPath, info.Exchange.ParentPath)
|
||||
assert.Equal(t, test.expectModTime, info.Modified())
|
||||
|
||||
return
|
||||
}
|
||||
_, err := readers.NewVersionedRestoreReader(li.ToReader())
|
||||
assert.ErrorIs(t, err, test.expectReadErrType)
|
||||
|
||||
// Should get some form of error when trying to get info.
|
||||
_, err = li.Info()
|
||||
assert.Error(t, err, "Info()")
|
||||
|
||||
if test.expectInfoErrType != nil {
|
||||
assert.ErrorIs(t, err, test.expectInfoErrType, "Info() error")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlight() {
|
||||
var (
|
||||
t = suite.T()
|
||||
|
||||
parentPath = "inbox/private/silly cats"
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight}
|
||||
|
||||
li := data.NewLazyItem(
|
||||
ctx,
|
||||
&lazyItemGetter{
|
||||
userID: "userID",
|
||||
itemID: "itemID",
|
||||
getter: getter,
|
||||
modTime: now,
|
||||
immutableIDs: false,
|
||||
parentPath: parentPath,
|
||||
},
|
||||
"itemID",
|
||||
now,
|
||||
fault.New(true))
|
||||
|
||||
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||
assert.Equal(
|
||||
t,
|
||||
now,
|
||||
li.ModTime(),
|
||||
"item mod time")
|
||||
|
||||
r, err := readers.NewVersionedRestoreReader(li.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||
assert.True(t, r.Format().DelInFlight)
|
||||
|
||||
readData, err := io.ReadAll(r)
|
||||
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
|
||||
|
||||
assert.Empty(t, readData, "read item data")
|
||||
|
||||
_, err = li.Info()
|
||||
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
|
||||
}
|
||||
|
||||
func (suite *CollectionUnitSuite) TestLazyItem() {
|
||||
var (
|
||||
t = suite.T()
|
||||
|
||||
parentPath = "inbox/private/silly cats"
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
// Exact data type doesn't really matter.
|
||||
testData := models.NewMessage()
|
||||
testData.SetSubject(ptr.To("hello world"))
|
||||
|
||||
getter := &mock.ItemGetSerialize{GetData: testData}
|
||||
|
||||
li := data.NewLazyItem(
|
||||
ctx,
|
||||
&lazyItemGetter{
|
||||
userID: "userID",
|
||||
itemID: "itemID",
|
||||
getter: getter,
|
||||
modTime: now,
|
||||
immutableIDs: false,
|
||||
parentPath: parentPath,
|
||||
},
|
||||
"itemID",
|
||||
now,
|
||||
fault.New(true))
|
||||
|
||||
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||
assert.Equal(
|
||||
t,
|
||||
now,
|
||||
li.ModTime(),
|
||||
"item mod time")
|
||||
|
||||
r, err := readers.NewVersionedRestoreReader(li.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||
assert.False(t, r.Format().DelInFlight)
|
||||
|
||||
readData, err := io.ReadAll(r)
|
||||
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
|
||||
|
||||
assert.NotEmpty(t, readData, "read item data")
|
||||
|
||||
info, err := li.Info()
|
||||
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, parentPath, info.Exchange.ParentPath)
|
||||
assert.Equal(t, now, info.Modified())
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package groups
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -526,6 +527,8 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
||||
|
||||
require.NotEmpty(t, c.FullPath().Folder(false))
|
||||
|
||||
fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false))
|
||||
|
||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||
// interface.
|
||||
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
||||
@ -534,6 +537,8 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
||||
|
||||
loc := c.(data.LocationPather).LocationPath().String()
|
||||
|
||||
fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String())
|
||||
|
||||
require.NotEmpty(t, loc)
|
||||
|
||||
delete(test.channelNames, loc)
|
||||
|
||||
@ -67,6 +67,15 @@ func (bh channelsBackupHandler) canonicalPath(
|
||||
false)
|
||||
}
|
||||
|
||||
func (bh channelsBackupHandler) PathPrefix(tenantID string) (path.Path, error) {
|
||||
return path.Build(
|
||||
tenantID,
|
||||
bh.protectedResource,
|
||||
path.GroupsService,
|
||||
path.ChannelMessagesCategory,
|
||||
false)
|
||||
}
|
||||
|
||||
func (bh channelsBackupHandler) GetChannelMessage(
|
||||
ctx context.Context,
|
||||
teamID, channelID, itemID string,
|
||||
|
||||
@ -150,27 +150,47 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
||||
parentFolderID,
|
||||
id)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer"))
|
||||
el.AddRecoverable(
|
||||
ctx,
|
||||
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if err := writer.WriteObjectValue("", item); err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer"))
|
||||
el.AddRecoverable(
|
||||
ctx,
|
||||
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
itemData, err := writer.GetSerializedContent()
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "serializing channel message"))
|
||||
el.AddRecoverable(
|
||||
ctx,
|
||||
clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
info.ParentPath = col.LocationPath().String()
|
||||
|
||||
col.stream <- data.NewPrefetchedItem(
|
||||
storeItem, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(itemData)),
|
||||
id,
|
||||
details.ItemInfo{Groups: info})
|
||||
if err != nil {
|
||||
el.AddRecoverable(
|
||||
ctx,
|
||||
clues.Stack(err).
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
col.stream <- storeItem
|
||||
|
||||
atomic.AddInt64(&streamedItems, 1)
|
||||
atomic.AddInt64(&totalBytes, info.Size)
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/groups/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
@ -48,13 +49,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ed := data.NewPrefetchedItem(
|
||||
ed, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(test.readData)),
|
||||
"itemID",
|
||||
details.ItemInfo{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||
assert.False(t, r.Format().DelInFlight)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
_, err := buf.ReadFrom(ed.ToReader())
|
||||
_, err = buf.ReadFrom(r)
|
||||
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
||||
assert.Equal(t, test.readData, buf.Bytes(), "read data")
|
||||
assert.Equal(t, "itemID", ed.ID(), "item ID")
|
||||
|
||||
@ -211,11 +211,17 @@ func (sc *Collection) retrieveLists(
|
||||
metrics.Bytes += size
|
||||
|
||||
metrics.Successes++
|
||||
sc.data <- data.NewPrefetchedItem(
|
||||
|
||||
item, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
ptr.Val(lst.GetId()),
|
||||
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
sc.data <- item
|
||||
progress <- struct{}{}
|
||||
}
|
||||
}
|
||||
@ -272,11 +278,17 @@ func (sc *Collection) retrievePages(
|
||||
if size > 0 {
|
||||
metrics.Bytes += size
|
||||
metrics.Successes++
|
||||
sc.data <- data.NewPrefetchedItem(
|
||||
|
||||
item, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
ptr.Val(pg.GetId()),
|
||||
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||
continue
|
||||
}
|
||||
|
||||
sc.data <- item
|
||||
progress <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,10 +103,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
byteArray, err := ow.GetSerializedContent()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
data := data.NewPrefetchedItem(
|
||||
data, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
name,
|
||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return data
|
||||
},
|
||||
@ -132,10 +133,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
page, err := betaAPI.CreatePageFromBytes(byteArray)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
data := data.NewPrefetchedItem(
|
||||
data, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
itemName,
|
||||
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return data
|
||||
},
|
||||
@ -194,10 +196,11 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
||||
byteArray, err := service.Serialize(listing)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
listData := data.NewPrefetchedItem(
|
||||
listData, err := data.NewPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
testName,
|
||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
destName := testdata.DefaultRestoreConfig("").Location
|
||||
|
||||
|
||||
@ -79,20 +79,29 @@ func NewController(
|
||||
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||
}
|
||||
|
||||
rc := resource.UnknownResource
|
||||
var rCli *resourceClient
|
||||
|
||||
switch pst {
|
||||
case path.ExchangeService, path.OneDriveService:
|
||||
rc = resource.Users
|
||||
case path.GroupsService:
|
||||
rc = resource.Groups
|
||||
case path.SharePointService:
|
||||
rc = resource.Sites
|
||||
}
|
||||
// no failure for unknown service.
|
||||
// In that case we create a controller that doesn't attempt to look up any resource
|
||||
// data. This case helps avoid unnecessary service calls when the end user is running
|
||||
// repo init and connect commands via the CLI. All other callers should be expected
|
||||
// to pass in a known service, or else expect downstream failures.
|
||||
if pst != path.UnknownService {
|
||||
rc := resource.UnknownResource
|
||||
|
||||
rCli, err := getResourceClient(rc, ac)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
|
||||
switch pst {
|
||||
case path.ExchangeService, path.OneDriveService:
|
||||
rc = resource.Users
|
||||
case path.GroupsService:
|
||||
rc = resource.Groups
|
||||
case path.SharePointService:
|
||||
rc = resource.Sites
|
||||
}
|
||||
|
||||
rCli, err = getResourceClient(rc, ac)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
ctrl := Controller{
|
||||
@ -110,6 +119,10 @@ func NewController(
|
||||
return &ctrl, nil
|
||||
}
|
||||
|
||||
func (ctrl *Controller) VerifyAccess(ctx context.Context) error {
|
||||
return ctrl.AC.Access().GetToken(ctx)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Processing Status
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -195,7 +208,7 @@ func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, er
|
||||
case resource.Groups:
|
||||
return &resourceClient{enum: rc, getter: ac.Groups()}, nil
|
||||
default:
|
||||
return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc)
|
||||
return nil, clues.New("unrecognized owner resource type").With("resource_enum", rc)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -861,7 +861,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MultipleContactsSingleFolder",
|
||||
name: "MultipleContactsInRestoreFolder",
|
||||
service: path.ExchangeService,
|
||||
collections: []stub.ColInfo{
|
||||
{
|
||||
@ -887,49 +887,77 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MultipleContactsMultipleFolders",
|
||||
service: path.ExchangeService,
|
||||
collections: []stub.ColInfo{
|
||||
{
|
||||
PathElements: []string{"Work"},
|
||||
Category: path.ContactsCategory,
|
||||
Items: []stub.ItemInfo{
|
||||
{
|
||||
Name: "someencodeditemID",
|
||||
Data: exchMock.ContactBytes("Ghimley"),
|
||||
LookupKey: "Ghimley",
|
||||
},
|
||||
{
|
||||
Name: "someencodeditemID2",
|
||||
Data: exchMock.ContactBytes("Irgot"),
|
||||
LookupKey: "Irgot",
|
||||
},
|
||||
{
|
||||
Name: "someencodeditemID3",
|
||||
Data: exchMock.ContactBytes("Jannes"),
|
||||
LookupKey: "Jannes",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PathElements: []string{"Personal"},
|
||||
Category: path.ContactsCategory,
|
||||
Items: []stub.ItemInfo{
|
||||
{
|
||||
Name: "someencodeditemID4",
|
||||
Data: exchMock.ContactBytes("Argon"),
|
||||
LookupKey: "Argon",
|
||||
},
|
||||
{
|
||||
Name: "someencodeditemID5",
|
||||
Data: exchMock.ContactBytes("Bernard"),
|
||||
LookupKey: "Bernard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
|
||||
//{
|
||||
// name: "MultipleContactsSingleFolder",
|
||||
// service: path.ExchangeService,
|
||||
// collections: []stub.ColInfo{
|
||||
// {
|
||||
// PathElements: []string{"Contacts"},
|
||||
// Category: path.ContactsCategory,
|
||||
// Items: []stub.ItemInfo{
|
||||
// {
|
||||
// Name: "someencodeditemID",
|
||||
// Data: exchMock.ContactBytes("Ghimley"),
|
||||
// LookupKey: "Ghimley",
|
||||
// },
|
||||
// {
|
||||
// Name: "someencodeditemID2",
|
||||
// Data: exchMock.ContactBytes("Irgot"),
|
||||
// LookupKey: "Irgot",
|
||||
// },
|
||||
// {
|
||||
// Name: "someencodeditemID3",
|
||||
// Data: exchMock.ContactBytes("Jannes"),
|
||||
// LookupKey: "Jannes",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
//},
|
||||
//{
|
||||
// name: "MultipleContactsMultipleFolders",
|
||||
// service: path.ExchangeService,
|
||||
// collections: []stub.ColInfo{
|
||||
// {
|
||||
// PathElements: []string{"Work"},
|
||||
// Category: path.ContactsCategory,
|
||||
// Items: []stub.ItemInfo{
|
||||
// {
|
||||
// Name: "someencodeditemID",
|
||||
// Data: exchMock.ContactBytes("Ghimley"),
|
||||
// LookupKey: "Ghimley",
|
||||
// },
|
||||
// {
|
||||
// Name: "someencodeditemID2",
|
||||
// Data: exchMock.ContactBytes("Irgot"),
|
||||
// LookupKey: "Irgot",
|
||||
// },
|
||||
// {
|
||||
// Name: "someencodeditemID3",
|
||||
// Data: exchMock.ContactBytes("Jannes"),
|
||||
// LookupKey: "Jannes",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// PathElements: []string{"Personal"},
|
||||
// Category: path.ContactsCategory,
|
||||
// Items: []stub.ItemInfo{
|
||||
// {
|
||||
// Name: "someencodeditemID4",
|
||||
// Data: exchMock.ContactBytes("Argon"),
|
||||
// LookupKey: "Argon",
|
||||
// },
|
||||
// {
|
||||
// Name: "someencodeditemID5",
|
||||
// Data: exchMock.ContactBytes("Bernard"),
|
||||
// LookupKey: "Bernard",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
//},
|
||||
// {
|
||||
// name: "MultipleEventsSingleCalendar",
|
||||
// service: path.ExchangeService,
|
||||
@ -1017,34 +1045,35 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
|
||||
|
||||
func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
||||
table := []restoreBackupInfo{
|
||||
{
|
||||
name: "Contacts",
|
||||
service: path.ExchangeService,
|
||||
collections: []stub.ColInfo{
|
||||
{
|
||||
PathElements: []string{"Work"},
|
||||
Category: path.ContactsCategory,
|
||||
Items: []stub.ItemInfo{
|
||||
{
|
||||
Name: "someencodeditemID",
|
||||
Data: exchMock.ContactBytes("Ghimley"),
|
||||
LookupKey: "Ghimley",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PathElements: []string{"Personal"},
|
||||
Category: path.ContactsCategory,
|
||||
Items: []stub.ItemInfo{
|
||||
{
|
||||
Name: "someencodeditemID2",
|
||||
Data: exchMock.ContactBytes("Irgot"),
|
||||
LookupKey: "Irgot",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
|
||||
//{
|
||||
// name: "Contacts",
|
||||
// service: path.ExchangeService,
|
||||
// collections: []stub.ColInfo{
|
||||
// {
|
||||
// PathElements: []string{"Work"},
|
||||
// Category: path.ContactsCategory,
|
||||
// Items: []stub.ItemInfo{
|
||||
// {
|
||||
// Name: "someencodeditemID",
|
||||
// Data: exchMock.ContactBytes("Ghimley"),
|
||||
// LookupKey: "Ghimley",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// PathElements: []string{"Personal"},
|
||||
// Category: path.ContactsCategory,
|
||||
// Items: []stub.ItemInfo{
|
||||
// {
|
||||
// Name: "someencodeditemID2",
|
||||
// Data: exchMock.ContactBytes("Irgot"),
|
||||
// LookupKey: "Irgot",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
//},
|
||||
// {
|
||||
// name: "Events",
|
||||
// service: path.ExchangeService,
|
||||
|
||||
@ -70,6 +70,7 @@ const (
|
||||
NoSPLicense errorMessage = "Tenant does not have a SPO license"
|
||||
parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request"
|
||||
usersCannotBeResolved errorMessage = "One or more users could not be resolved"
|
||||
requestedSiteCouldNotBeFound errorMessage = "Requested site could not be found"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -259,6 +260,10 @@ func IsErrUsersCannotBeResolved(err error) bool {
|
||||
return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved)
|
||||
}
|
||||
|
||||
func IsErrSiteNotFound(err error) bool {
|
||||
return hasErrorMessage(err, requestedSiteCouldNotBeFound)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// error parsers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -628,6 +628,51 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUsersCannotBeResolved() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *GraphErrorsUnitSuite) TestIsErrSiteCouldNotBeFound() {
|
||||
table := []struct {
|
||||
name string
|
||||
err error
|
||||
expect assert.BoolAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
err: nil,
|
||||
expect: assert.False,
|
||||
},
|
||||
{
|
||||
name: "non-matching",
|
||||
err: assert.AnError,
|
||||
expect: assert.False,
|
||||
},
|
||||
{
|
||||
name: "non-matching oDataErr",
|
||||
err: odErrMsg("InvalidRequest", "cant resolve sites"),
|
||||
expect: assert.False,
|
||||
},
|
||||
{
|
||||
name: "matching oDataErr msg",
|
||||
err: odErrMsg("InvalidRequest", string(requestedSiteCouldNotBeFound)),
|
||||
expect: assert.True,
|
||||
},
|
||||
// next two tests are to make sure the checks are case insensitive
|
||||
{
|
||||
name: "oDataErr uppercase",
|
||||
err: odErrMsg("InvalidRequest", strings.ToUpper(string(requestedSiteCouldNotBeFound))),
|
||||
expect: assert.True,
|
||||
},
|
||||
{
|
||||
name: "oDataErr lowercase",
|
||||
err: odErrMsg("InvalidRequest", strings.ToLower(string(requestedSiteCouldNotBeFound))),
|
||||
expect: assert.True,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
test.expect(suite.T(), IsErrSiteNotFound(test.err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() {
|
||||
table := []struct {
|
||||
name string
|
||||
|
||||
@ -57,11 +57,16 @@ func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) {
|
||||
return metadataItem{}, clues.Wrap(err, "serializing metadata")
|
||||
}
|
||||
|
||||
item, err := data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(buf),
|
||||
mce.fileName,
|
||||
time.Now())
|
||||
if err != nil {
|
||||
return metadataItem{}, clues.Stack(err)
|
||||
}
|
||||
|
||||
return metadataItem{
|
||||
Item: data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(buf),
|
||||
mce.fileName,
|
||||
time.Now()),
|
||||
Item: item,
|
||||
size: int64(buf.Len()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -69,13 +70,16 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
||||
items := []metadataItem{}
|
||||
|
||||
for i := 0; i < len(itemNames); i++ {
|
||||
item, err := data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(itemData[i])),
|
||||
itemNames[i],
|
||||
time.Time{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
items = append(
|
||||
items,
|
||||
metadataItem{
|
||||
Item: data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(itemData[i])),
|
||||
itemNames[i],
|
||||
time.Time{}),
|
||||
Item: item,
|
||||
size: int64(len(itemData[i])),
|
||||
})
|
||||
}
|
||||
@ -103,7 +107,13 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
||||
for s := range c.Items(ctx, fault.New(true)) {
|
||||
gotNames = append(gotNames, s.ID())
|
||||
|
||||
buf, err := io.ReadAll(s.ToReader())
|
||||
rr, err := readers.NewVersionedRestoreReader(s.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
assert.False(t, rr.Format().DelInFlight)
|
||||
|
||||
buf, err := io.ReadAll(rr)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
continue
|
||||
}
|
||||
@ -204,11 +214,17 @@ func (suite *MetadataCollectionUnitSuite) TestMakeMetadataCollection() {
|
||||
for item := range col.Items(ctx, fault.New(true)) {
|
||||
assert.Equal(t, test.metadata.fileName, item.ID())
|
||||
|
||||
rr, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||
assert.False(t, rr.Format().DelInFlight)
|
||||
|
||||
gotMap := map[string]string{}
|
||||
decoder := json.NewDecoder(item.ToReader())
|
||||
decoder := json.NewDecoder(rr)
|
||||
itemCount++
|
||||
|
||||
err := decoder.Decode(&gotMap)
|
||||
err = decoder.Decode(&gotMap)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/readers"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||
odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
|
||||
@ -573,7 +574,12 @@ func compareExchangeEmail(
|
||||
expected map[string][]byte,
|
||||
item data.Item,
|
||||
) {
|
||||
itemData, err := io.ReadAll(item.ToReader())
|
||||
rr := versionedReadWrapper(t, item.ToReader())
|
||||
if rr == nil {
|
||||
return
|
||||
}
|
||||
|
||||
itemData, err := io.ReadAll(rr)
|
||||
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
||||
return
|
||||
}
|
||||
@ -600,7 +606,12 @@ func compareExchangeContact(
|
||||
expected map[string][]byte,
|
||||
item data.Item,
|
||||
) {
|
||||
itemData, err := io.ReadAll(item.ToReader())
|
||||
rr := versionedReadWrapper(t, item.ToReader())
|
||||
if rr == nil {
|
||||
return
|
||||
}
|
||||
|
||||
itemData, err := io.ReadAll(rr)
|
||||
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
||||
return
|
||||
}
|
||||
@ -628,7 +639,12 @@ func compareExchangeEvent(
|
||||
expected map[string][]byte,
|
||||
item data.Item,
|
||||
) {
|
||||
itemData, err := io.ReadAll(item.ToReader())
|
||||
rr := versionedReadWrapper(t, item.ToReader())
|
||||
if rr == nil {
|
||||
return
|
||||
}
|
||||
|
||||
itemData, err := io.ReadAll(rr)
|
||||
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
||||
return
|
||||
}
|
||||
@ -718,7 +734,12 @@ func compareDriveItem(
|
||||
return false
|
||||
}
|
||||
|
||||
buf, err := io.ReadAll(item.ToReader())
|
||||
rr := versionedReadWrapper(t, item.ToReader())
|
||||
if rr == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
buf, err := io.ReadAll(rr)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
return true
|
||||
}
|
||||
@ -850,6 +871,29 @@ func compareDriveItem(
|
||||
return true
|
||||
}
|
||||
|
||||
// versionedReaderWrapper strips out the version format header and checks it
|
||||
// meets the current standard for all service types. If it doesn't meet the
|
||||
// standard, returns nil. Else returns the versionedRestoreReader.
|
||||
func versionedReadWrapper(
|
||||
t *testing.T,
|
||||
reader io.ReadCloser,
|
||||
) io.ReadCloser {
|
||||
rr, err := readers.NewVersionedRestoreReader(reader)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !assert.False(t, rr.Format().DelInFlight) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return rr
|
||||
}
|
||||
|
||||
// compareItem compares the data returned by backup with the expected data.
|
||||
// Returns true if a comparison was done else false. Bool return is mostly used
|
||||
// to exclude OneDrive permissions for the root right now.
|
||||
@ -919,30 +963,9 @@ func checkHasCollections(
|
||||
continue
|
||||
}
|
||||
|
||||
fp := g.FullPath()
|
||||
loc := g.(data.LocationPather).LocationPath()
|
||||
|
||||
if fp.Service() == path.OneDriveService ||
|
||||
(fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) {
|
||||
dp, err := path.ToDrivePath(fp)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
continue
|
||||
}
|
||||
|
||||
loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...)
|
||||
}
|
||||
|
||||
p, err := loc.ToDataLayerPath(
|
||||
fp.Tenant(),
|
||||
fp.ProtectedResource(),
|
||||
fp.Service(),
|
||||
fp.Category(),
|
||||
false)
|
||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||
continue
|
||||
}
|
||||
|
||||
gotNames = append(gotNames, p.String())
|
||||
gotNames = append(gotNames, loc.String())
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expectedNames, gotNames, "returned collections")
|
||||
@ -963,14 +986,18 @@ func checkCollections(
|
||||
|
||||
for _, returned := range got {
|
||||
var (
|
||||
hasItems bool
|
||||
service = returned.FullPath().Service()
|
||||
category = returned.FullPath().Category()
|
||||
expectedColData = expected[returned.FullPath().String()]
|
||||
folders = returned.FullPath().Elements()
|
||||
rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location
|
||||
expectedColDataByLoc map[string][]byte
|
||||
hasItems bool
|
||||
service = returned.FullPath().Service()
|
||||
category = returned.FullPath().Category()
|
||||
folders = returned.FullPath().Elements()
|
||||
rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location
|
||||
)
|
||||
|
||||
if p, ok := returned.(data.LocationPather); ok {
|
||||
expectedColDataByLoc = expected[p.LocationPath().String()]
|
||||
}
|
||||
|
||||
// Need to iterate through all items even if we don't expect to find a match
|
||||
// because otherwise we'll deadlock waiting for the status. Unexpected or
|
||||
// missing collection paths will be reported by checkHasCollections.
|
||||
@ -990,14 +1017,14 @@ func checkCollections(
|
||||
hasItems = true
|
||||
gotItems++
|
||||
|
||||
if expectedColData == nil {
|
||||
if expectedColDataByLoc == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !compareItem(
|
||||
t,
|
||||
returned.FullPath(),
|
||||
expectedColData,
|
||||
expectedColDataByLoc,
|
||||
service,
|
||||
category,
|
||||
item,
|
||||
|
||||
@ -84,6 +84,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
||||
rcc,
|
||||
ctrl.AC,
|
||||
ctrl.backupDriveIDNames,
|
||||
ctrl.backupSiteIDWebURL,
|
||||
dcs,
|
||||
deets,
|
||||
errs,
|
||||
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -35,19 +36,18 @@ func ProduceBackupCollections(
|
||||
creds account.M365Config,
|
||||
su support.StatusUpdater,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
||||
b, err := bpc.Selector.ToGroupsBackup()
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "groupsDataCollection: parsing selector")
|
||||
return nil, nil, clues.Wrap(err, "groupsDataCollection: parsing selector")
|
||||
}
|
||||
|
||||
var (
|
||||
el = errs.Local()
|
||||
collections = []data.BackupCollection{}
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||
canUsePreviousBackup bool
|
||||
sitesPreviousPaths = map[string]string{}
|
||||
el = errs.Local()
|
||||
collections = []data.BackupCollection{}
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||
sitesPreviousPaths = map[string]string{}
|
||||
)
|
||||
|
||||
ctx = clues.Add(
|
||||
@ -60,7 +60,7 @@ func ProduceBackupCollections(
|
||||
bpc.ProtectedResource.ID(),
|
||||
api.CallConfig{})
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||
return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||
}
|
||||
|
||||
isTeam := api.IsTeam(ctx, group)
|
||||
@ -79,12 +79,9 @@ func ProduceBackupCollections(
|
||||
|
||||
switch scope.Category().PathType() {
|
||||
case path.LibrariesCategory:
|
||||
// TODO(meain): Private channels get a separate SharePoint
|
||||
// site. We should also back those up and not just the
|
||||
// default one.
|
||||
resp, err := ac.Groups().GetRootSite(ctx, bpc.ProtectedResource.ID())
|
||||
sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
siteMetadataCollection := map[string][]data.RestoreCollection{}
|
||||
@ -95,39 +92,47 @@ func ProduceBackupCollections(
|
||||
siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c)
|
||||
}
|
||||
|
||||
pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName()))
|
||||
sbpc := inject.BackupProducerConfig{
|
||||
LastBackupVersion: bpc.LastBackupVersion,
|
||||
Options: bpc.Options,
|
||||
ProtectedResource: pr,
|
||||
Selector: bpc.Selector,
|
||||
MetadataCollections: siteMetadataCollection[ptr.Val(resp.GetId())],
|
||||
}
|
||||
for _, s := range sites {
|
||||
pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetName()))
|
||||
sbpc := inject.BackupProducerConfig{
|
||||
LastBackupVersion: bpc.LastBackupVersion,
|
||||
Options: bpc.Options,
|
||||
ProtectedResource: pr,
|
||||
Selector: bpc.Selector,
|
||||
MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())],
|
||||
}
|
||||
|
||||
bh := drive.NewGroupBackupHandler(
|
||||
bpc.ProtectedResource.ID(),
|
||||
ptr.Val(resp.GetId()),
|
||||
ac.Drives(),
|
||||
scope)
|
||||
bh := drive.NewGroupBackupHandler(
|
||||
bpc.ProtectedResource.ID(),
|
||||
ptr.Val(s.GetId()),
|
||||
ac.Drives(),
|
||||
scope)
|
||||
|
||||
cp, err := bh.SitePathPrefix(creds.AzureTenantID)
|
||||
if err != nil {
|
||||
return nil, nil, false, clues.Wrap(err, "getting canonical path")
|
||||
}
|
||||
sp, err := bh.SitePathPrefix(creds.AzureTenantID)
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "getting site path")
|
||||
}
|
||||
|
||||
sitesPreviousPaths[ptr.Val(resp.GetId())] = cp.String()
|
||||
sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String()
|
||||
|
||||
dbcs, canUsePreviousBackup, err = site.CollectLibraries(
|
||||
ctx,
|
||||
sbpc,
|
||||
bh,
|
||||
creds.AzureTenantID,
|
||||
ssmb,
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, err)
|
||||
continue
|
||||
cs, canUsePreviousBackup, err := site.CollectLibraries(
|
||||
ctx,
|
||||
sbpc,
|
||||
bh,
|
||||
creds.AzureTenantID,
|
||||
ssmb,
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !canUsePreviousBackup {
|
||||
dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{}))
|
||||
}
|
||||
|
||||
dbcs = append(dbcs, cs...)
|
||||
}
|
||||
|
||||
case path.ChannelMessagesCategory:
|
||||
@ -135,10 +140,12 @@ func ProduceBackupCollections(
|
||||
continue
|
||||
}
|
||||
|
||||
dbcs, canUsePreviousBackup, err = groups.CreateCollections(
|
||||
bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels())
|
||||
|
||||
cs, canUsePreviousBackup, err := groups.CreateCollections(
|
||||
ctx,
|
||||
bpc,
|
||||
groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()),
|
||||
bh,
|
||||
creds.AzureTenantID,
|
||||
scope,
|
||||
su,
|
||||
@ -147,6 +154,17 @@ func ProduceBackupCollections(
|
||||
el.AddRecoverable(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !canUsePreviousBackup {
|
||||
tp, err := bh.PathPrefix(creds.AzureTenantID)
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "getting message path")
|
||||
}
|
||||
|
||||
dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{}))
|
||||
}
|
||||
|
||||
dbcs = append(dbcs, cs...)
|
||||
}
|
||||
|
||||
collections = append(collections, dbcs...)
|
||||
@ -165,7 +183,7 @@ func ProduceBackupCollections(
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
collections = append(collections, baseCols...)
|
||||
@ -178,12 +196,12 @@ func ProduceBackupCollections(
|
||||
sitesPreviousPaths,
|
||||
su)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
collections = append(collections, md)
|
||||
|
||||
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
||||
return collections, ssmb.ToReader(), el.Failure()
|
||||
}
|
||||
|
||||
func getSitesMetadataCollection(
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/m365/support"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
@ -29,24 +30,20 @@ func ConsumeRestoreCollections(
|
||||
rcc inject.RestoreConsumerConfig,
|
||||
ac api.Client,
|
||||
backupDriveIDNames idname.Cacher,
|
||||
backupSiteIDWebURL idname.Cacher,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
errs *fault.Bus,
|
||||
ctr *count.Bus,
|
||||
) (*support.ControllerOperationStatus, error) {
|
||||
var (
|
||||
restoreMetrics support.CollectionMetrics
|
||||
caches = drive.NewRestoreCaches(backupDriveIDNames)
|
||||
lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService())
|
||||
el = errs.Local()
|
||||
restoreMetrics support.CollectionMetrics
|
||||
caches = drive.NewRestoreCaches(backupDriveIDNames)
|
||||
lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService())
|
||||
el = errs.Local()
|
||||
webURLToSiteNames = map[string]string{}
|
||||
)
|
||||
|
||||
// TODO: uncomment when a handler is available
|
||||
// err := caches.Populate(ctx, lrh, rcc.ProtectedResource.ID())
|
||||
// if err != nil {
|
||||
// return nil, clues.Wrap(err, "initializing restore caches")
|
||||
// }
|
||||
|
||||
// Reorder collections so that the parents directories are created
|
||||
// before the child directories; a requirement for permissions.
|
||||
data.SortRestoreCollections(dcs)
|
||||
@ -59,7 +56,7 @@ func ConsumeRestoreCollections(
|
||||
|
||||
var (
|
||||
err error
|
||||
resp models.Siteable
|
||||
siteName string
|
||||
category = dc.FullPath().Category()
|
||||
metrics support.CollectionMetrics
|
||||
ictx = clues.Add(ctx,
|
||||
@ -71,16 +68,25 @@ func ConsumeRestoreCollections(
|
||||
|
||||
switch dc.FullPath().Category() {
|
||||
case path.LibrariesCategory:
|
||||
// TODO(meain): As of now we only restore the root site
|
||||
// and that too to whatever is currently the root site of the
|
||||
// group and not the original one. Not sure if the
|
||||
// original can be changed.
|
||||
resp, err = ac.Groups().GetRootSite(ctx, rcc.ProtectedResource.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
siteID := dc.FullPath().Folders()[1]
|
||||
|
||||
webURL, ok := backupSiteIDWebURL.NameOf(siteID)
|
||||
if !ok {
|
||||
// This should not happen, but just in case
|
||||
logger.Ctx(ctx).With("site_id", siteID).Info("site weburl not found, using site id")
|
||||
}
|
||||
|
||||
pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName()))
|
||||
siteName, err = getSiteName(ctx, siteID, webURL, ac.Sites(), webURLToSiteNames)
|
||||
if err != nil {
|
||||
el.AddRecoverable(ctx, clues.Wrap(err, "getting site").
|
||||
With("web_url", webURL, "site_id", siteID))
|
||||
} else if len(siteName) == 0 {
|
||||
// Site was deleted in between and restore and is not
|
||||
// available anymore.
|
||||
continue
|
||||
}
|
||||
|
||||
pr := idname.NewProvider(siteID, siteName)
|
||||
srcc := inject.RestoreConsumerConfig{
|
||||
BackupVersion: rcc.BackupVersion,
|
||||
Options: rcc.Options,
|
||||
@ -133,3 +139,38 @@ func ConsumeRestoreCollections(
|
||||
|
||||
return status, el.Failure()
|
||||
}
|
||||
|
||||
func getSiteName(
|
||||
ctx context.Context,
|
||||
siteID string,
|
||||
webURL string,
|
||||
ac api.GetByIDer[models.Siteable],
|
||||
webURLToSiteNames map[string]string,
|
||||
) (string, error) {
|
||||
siteName, ok := webURLToSiteNames[webURL]
|
||||
if ok {
|
||||
return siteName, nil
|
||||
}
|
||||
|
||||
site, err := ac.GetByID(ctx, siteID, api.CallConfig{})
|
||||
if err != nil {
|
||||
webURLToSiteNames[webURL] = ""
|
||||
|
||||
if graph.IsErrSiteNotFound(err) {
|
||||
// TODO(meain): Should we surface this to the user somehow?
|
||||
// In case a site that we had previously backed up was
|
||||
// deleted, skip that site with a warning.
|
||||
logger.Ctx(ctx).With("web_url", webURL, "site_id", siteID).
|
||||
Info("Site does not exist, skipping restore.")
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
siteName = ptr.Val(site.GetDisplayName())
|
||||
webURLToSiteNames[webURL] = siteName
|
||||
|
||||
return siteName, nil
|
||||
}
|
||||
|
||||
@ -7,12 +7,17 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/data/mock"
|
||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -52,9 +57,118 @@ func (suite *GroupsUnitSuite) TestConsumeRestoreCollections_noErrorOnGroups() {
|
||||
rcc,
|
||||
api.Client{},
|
||||
idname.NewCache(map[string]string{}),
|
||||
idname.NewCache(map[string]string{}),
|
||||
dcs,
|
||||
nil,
|
||||
fault.New(false),
|
||||
nil)
|
||||
assert.NoError(t, err, "Groups Channels restore")
|
||||
}
|
||||
|
||||
type groupsIntegrationSuite struct {
|
||||
tester.Suite
|
||||
resource string
|
||||
tenantID string
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func TestGroupsIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &groupsIntegrationSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tconfig.M365AcctCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *groupsIntegrationSuite) SetupSuite() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
graph.InitializeConcurrencyLimiter(ctx, true, 4)
|
||||
|
||||
suite.resource = tconfig.M365TeamID(t)
|
||||
|
||||
acct := tconfig.NewM365Account(t)
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.tenantID = creds.AzureTenantID
|
||||
}
|
||||
|
||||
// test for getSiteName
|
||||
func (suite *groupsIntegrationSuite) TestGetSiteName() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
rootSite, err := suite.ac.Groups().GetRootSite(ctx, suite.resource)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// Generate a fake site ID that appears valid to graph API but doesn't actually exist.
|
||||
// This "could" be flaky, but highly unlikely
|
||||
unavailableSiteID := []rune(ptr.Val(rootSite.GetId()))
|
||||
firstIDChar := slices.Index(unavailableSiteID, ',') + 1
|
||||
|
||||
if unavailableSiteID[firstIDChar] != '2' {
|
||||
unavailableSiteID[firstIDChar] = '2'
|
||||
} else {
|
||||
unavailableSiteID[firstIDChar] = '1'
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
siteID string
|
||||
webURL string
|
||||
siteName string
|
||||
webURLToSiteNames map[string]string
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "valid",
|
||||
siteID: ptr.Val(rootSite.GetId()),
|
||||
webURL: ptr.Val(rootSite.GetWebUrl()),
|
||||
siteName: *rootSite.GetDisplayName(),
|
||||
webURLToSiteNames: map[string]string{},
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "unavailable",
|
||||
siteID: string(unavailableSiteID),
|
||||
webURL: "https://does-not-matter",
|
||||
siteName: "",
|
||||
webURLToSiteNames: map[string]string{},
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "previously found",
|
||||
siteID: "random-id",
|
||||
webURL: "https://random-url",
|
||||
siteName: "random-name",
|
||||
webURLToSiteNames: map[string]string{"https://random-url": "random-name"},
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
siteName, err := getSiteName(
|
||||
ctx,
|
||||
test.siteID,
|
||||
test.webURL,
|
||||
suite.ac.Sites(),
|
||||
test.webURLToSiteNames)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
test.expectErr(t, err)
|
||||
assert.Equal(t, test.siteName, siteName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,13 +8,11 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -24,8 +22,6 @@ import (
|
||||
type BackupHandler struct {
|
||||
ItemInfo details.ItemInfo
|
||||
|
||||
DriveItemEnumeration EnumeratesDriveItemsDelta
|
||||
|
||||
GI GetsItem
|
||||
GIP GetsItemPermission
|
||||
|
||||
@ -59,7 +55,6 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler {
|
||||
OneDrive: &details.OneDriveInfo{},
|
||||
Extension: &details.ExtensionData{},
|
||||
},
|
||||
DriveItemEnumeration: EnumeratesDriveItemsDelta{},
|
||||
GI: GetsItem{Err: clues.New("not defined")},
|
||||
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
||||
PathPrefixFn: defaultOneDrivePathPrefixer,
|
||||
@ -129,6 +124,10 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl
|
||||
return h.DrivePagerV
|
||||
}
|
||||
|
||||
func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] {
|
||||
return h.ItemPagerV[driveID]
|
||||
}
|
||||
|
||||
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
|
||||
return "/" + pb.String()
|
||||
}
|
||||
@ -153,13 +152,6 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R
|
||||
return h.GetResps[c], h.GetErrs[c]
|
||||
}
|
||||
|
||||
func (h BackupHandler) EnumerateDriveItemsDelta(
|
||||
ctx context.Context,
|
||||
driveID, prevDeltaLink string,
|
||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
||||
return h.DriveItemEnumeration.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
||||
}
|
||||
|
||||
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
|
||||
return h.GI.GetItem(ctx, "", "")
|
||||
}
|
||||
@ -262,65 +254,6 @@ func (m GetsItem) GetItem(
|
||||
return m.Item, m.Err
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Enumerates Drive Items
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type EnumeratesDriveItemsDelta struct {
|
||||
Items map[string][]models.DriveItemable
|
||||
DeltaUpdate map[string]api.DeltaUpdate
|
||||
Err map[string]error
|
||||
}
|
||||
|
||||
func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta(
|
||||
_ context.Context,
|
||||
driveID, _ string,
|
||||
) (
|
||||
[]models.DriveItemable,
|
||||
api.DeltaUpdate,
|
||||
error,
|
||||
) {
|
||||
return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID]
|
||||
}
|
||||
|
||||
func PagerResultToEDID(
|
||||
m map[string][]apiMock.PagerResult[models.DriveItemable],
|
||||
) EnumeratesDriveItemsDelta {
|
||||
edi := EnumeratesDriveItemsDelta{
|
||||
Items: map[string][]models.DriveItemable{},
|
||||
DeltaUpdate: map[string]api.DeltaUpdate{},
|
||||
Err: map[string]error{},
|
||||
}
|
||||
|
||||
for driveID, results := range m {
|
||||
var (
|
||||
err error
|
||||
items = []models.DriveItemable{}
|
||||
deltaUpdate api.DeltaUpdate
|
||||
)
|
||||
|
||||
for _, pr := range results {
|
||||
items = append(items, pr.Values...)
|
||||
|
||||
if pr.DeltaLink != nil {
|
||||
deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)}
|
||||
}
|
||||
|
||||
if pr.Err != nil {
|
||||
err = pr.Err
|
||||
}
|
||||
|
||||
deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta
|
||||
}
|
||||
|
||||
edi.Items[driveID] = items
|
||||
edi.Err[driveID] = err
|
||||
edi.DeltaUpdate[driveID] = deltaUpdate
|
||||
}
|
||||
|
||||
return edi
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Get Item Permissioner
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -109,10 +109,11 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
||||
//nolint:lll
|
||||
byteArray := spMock.Page("Byte Test")
|
||||
|
||||
pageData := data.NewUnindexedPrefetchedItem(
|
||||
pageData, err := data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(byteArray)),
|
||||
testName,
|
||||
time.Now())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
info, err := api.RestoreSitePage(
|
||||
ctx,
|
||||
|
||||
@ -90,9 +90,12 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
||||
|
||||
var (
|
||||
paths = map[string]string{}
|
||||
currPaths = map[string]string{}
|
||||
newPaths = map[string]string{}
|
||||
excluded = map[string]struct{}{}
|
||||
collMap = map[string]map[string]*drive.Collection{
|
||||
itemColls = map[string]map[string]string{
|
||||
driveID: {},
|
||||
}
|
||||
collMap = map[string]map[string]*drive.Collection{
|
||||
driveID: {},
|
||||
}
|
||||
)
|
||||
@ -106,14 +109,15 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
||||
|
||||
c.CollectionMap = collMap
|
||||
|
||||
_, err := c.UpdateCollections(
|
||||
err := c.UpdateCollections(
|
||||
ctx,
|
||||
driveID,
|
||||
"General",
|
||||
test.items,
|
||||
paths,
|
||||
currPaths,
|
||||
newPaths,
|
||||
excluded,
|
||||
itemColls,
|
||||
true,
|
||||
fault.New(true))
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
@ -163,28 +164,29 @@ func CollectionsForInfo(
|
||||
func backupOutputPathFromRestore(
|
||||
restoreCfg control.RestoreConfig,
|
||||
inputPath path.Path,
|
||||
) (path.Path, error) {
|
||||
) (*path.Builder, error) {
|
||||
base := []string{restoreCfg.Location}
|
||||
folders := inputPath.Folders()
|
||||
|
||||
switch inputPath.Service() {
|
||||
// OneDrive has leading information like the drive ID.
|
||||
if inputPath.Service() == path.OneDriveService || inputPath.Service() == path.SharePointService {
|
||||
folders := inputPath.Folders()
|
||||
base = append(append([]string{}, folders[:3]...), restoreCfg.Location)
|
||||
case path.OneDriveService, path.SharePointService:
|
||||
p, err := path.ToDrivePath(inputPath)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
if len(folders) > 3 {
|
||||
base = append(base, folders[3:]...)
|
||||
// Remove driveID, root, etc.
|
||||
folders = p.Folders
|
||||
// Re-add root, but it needs to be in front of the restore folder.
|
||||
base = append([]string{p.Root}, base...)
|
||||
|
||||
// Currently contacts restore doesn't have nested folders.
|
||||
case path.ExchangeService:
|
||||
if inputPath.Category() == path.ContactsCategory {
|
||||
folders = nil
|
||||
}
|
||||
}
|
||||
|
||||
if inputPath.Service() == path.ExchangeService && inputPath.Category() == path.EmailCategory {
|
||||
base = append(base, inputPath.Folders()...)
|
||||
}
|
||||
|
||||
return path.Build(
|
||||
inputPath.Tenant(),
|
||||
inputPath.ProtectedResource(),
|
||||
inputPath.Service(),
|
||||
inputPath.Category(),
|
||||
false,
|
||||
base...)
|
||||
return path.Builder{}.Append(append(base, folders...)...), nil
|
||||
}
|
||||
|
||||
@ -762,11 +762,10 @@ func runDriveIncrementalTest(
|
||||
true)
|
||||
|
||||
// do some additional checks to ensure the incremental dealt with fewer items.
|
||||
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||
var (
|
||||
expectWrites = test.itemsWritten + 2
|
||||
expectWrites = test.itemsWritten
|
||||
expectNonMetaWrites = test.nonMetaItemsWritten
|
||||
expectReads = test.itemsRead + 2
|
||||
expectReads = test.itemsRead
|
||||
assertReadWrite = assert.Equal
|
||||
)
|
||||
|
||||
@ -775,6 +774,17 @@ func runDriveIncrementalTest(
|
||||
// /libraries/sites/previouspath
|
||||
expectWrites++
|
||||
expectReads++
|
||||
|
||||
// +2 on read/writes to account for metadata: 1 delta and 1 path (for each site)
|
||||
sites, err := ac.Groups().GetAllSites(ctx, owner, fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
expectWrites += len(sites) * 2
|
||||
expectReads += len(sites) * 2
|
||||
} else {
|
||||
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||
expectWrites += 2
|
||||
expectReads += 2
|
||||
}
|
||||
|
||||
// Sharepoint can produce a superset of permissions by nature of
|
||||
|
||||
@ -182,12 +182,17 @@ func collect(
|
||||
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx)
|
||||
}
|
||||
|
||||
item, err := data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(bs)),
|
||||
col.itemName,
|
||||
time.Now())
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
dc := streamCollection{
|
||||
folderPath: p,
|
||||
item: data.NewUnindexedPrefetchedItem(
|
||||
io.NopCloser(bytes.NewReader(bs)),
|
||||
col.itemName,
|
||||
time.Now()),
|
||||
item: item,
|
||||
}
|
||||
|
||||
return &dc, nil
|
||||
|
||||
@ -384,20 +384,20 @@ func (pec printableErrCore) Values() []string {
|
||||
// funcs, and the function that spawned the local bus should always
|
||||
// return `local.Failure()` to ensure that hard failures are propagated
|
||||
// back upstream.
|
||||
func (e *Bus) Local() *LocalBus {
|
||||
return &LocalBus{
|
||||
func (e *Bus) Local() *localBus {
|
||||
return &localBus{
|
||||
mu: &sync.Mutex{},
|
||||
bus: e,
|
||||
}
|
||||
}
|
||||
|
||||
type LocalBus struct {
|
||||
type localBus struct {
|
||||
mu *sync.Mutex
|
||||
bus *Bus
|
||||
current error
|
||||
}
|
||||
|
||||
func (e *LocalBus) AddRecoverable(ctx context.Context, err error) {
|
||||
func (e *localBus) AddRecoverable(ctx context.Context, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
@ -422,7 +422,7 @@ func (e *LocalBus) AddRecoverable(ctx context.Context, err error) {
|
||||
// 2. Skipping avoids a permanent and consistent failure. If
|
||||
// the underlying reason is transient or otherwise recoverable,
|
||||
// the item should not be skipped.
|
||||
func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) {
|
||||
func (e *localBus) AddSkip(ctx context.Context, s *Skipped) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
@ -437,7 +437,7 @@ func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) {
|
||||
// It does not return the underlying bus.Failure(), only the failure
|
||||
// that was recorded within the local bus instance. This error should
|
||||
// get returned by any func which created a local bus.
|
||||
func (e *LocalBus) Failure() error {
|
||||
func (e *localBus) Failure() error {
|
||||
return e.current
|
||||
}
|
||||
|
||||
|
||||
@ -96,14 +96,10 @@ var serviceCategories = map[ServiceType]map[CategoryType]struct{}{
|
||||
ChannelMessagesCategory: {},
|
||||
LibrariesCategory: {},
|
||||
},
|
||||
TeamsService: {
|
||||
ChannelMessagesCategory: {},
|
||||
LibrariesCategory: {},
|
||||
},
|
||||
}
|
||||
|
||||
func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, error) {
|
||||
service := toServiceType(s)
|
||||
service := ToServiceType(s)
|
||||
if service == UnknownService {
|
||||
return UnknownService, UnknownCategory, clues.Stack(ErrorUnknownService).With("service", fmt.Sprintf("%q", s))
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ func (suite *ServiceCategoryUnitSuite) TestToServiceType() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
assert.Equal(t, test.expected, toServiceType(test.service))
|
||||
assert.Equal(t, test.expected, ToServiceType(test.service))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,27 +15,25 @@ var ErrorUnknownService = clues.New("unknown service string")
|
||||
// Metadata services are not considered valid service types for resource paths
|
||||
// though they can be used for metadata paths.
|
||||
//
|
||||
// The order of the enums below can be changed, but the string representation of
|
||||
// each enum must remain the same or migration code needs to be added to handle
|
||||
// changes to the string format.
|
||||
// The string representaton of each enum _must remain the same_. In case of
|
||||
// changes to those values, we'll need migration code to handle transitions
|
||||
// across states else we'll get marshalling/unmarshalling errors.
|
||||
type ServiceType int
|
||||
|
||||
//go:generate stringer -type=ServiceType -linecomment
|
||||
const (
|
||||
UnknownService ServiceType = 0
|
||||
ExchangeService ServiceType = 1 // exchange
|
||||
OneDriveService ServiceType = 2 // onedrive
|
||||
SharePointService ServiceType = 3 // sharepoint
|
||||
ExchangeMetadataService ServiceType = 4 // exchangeMetadata
|
||||
OneDriveMetadataService ServiceType = 5 // onedriveMetadata
|
||||
SharePointMetadataService ServiceType = 6 // sharepointMetadata
|
||||
GroupsService ServiceType = 7 // groups
|
||||
GroupsMetadataService ServiceType = 8 // groupsMetadata
|
||||
TeamsService ServiceType = 9 // teams
|
||||
TeamsMetadataService ServiceType = 10 // teamsMetadata
|
||||
ExchangeService ServiceType = 1 // exchange
|
||||
OneDriveService ServiceType = 2 // onedrive
|
||||
SharePointService ServiceType = 3 // sharepoint
|
||||
ExchangeMetadataService ServiceType = 4 // exchangeMetadata
|
||||
OneDriveMetadataService ServiceType = 5 // onedriveMetadata
|
||||
SharePointMetadataService ServiceType = 6 // sharepointMetadata
|
||||
GroupsService ServiceType = 7 // groups
|
||||
GroupsMetadataService ServiceType = 8 // groupsMetadata
|
||||
)
|
||||
|
||||
func toServiceType(service string) ServiceType {
|
||||
func ToServiceType(service string) ServiceType {
|
||||
s := strings.ToLower(service)
|
||||
|
||||
switch s {
|
||||
@ -47,8 +45,6 @@ func toServiceType(service string) ServiceType {
|
||||
return SharePointService
|
||||
case strings.ToLower(GroupsService.String()):
|
||||
return GroupsService
|
||||
case strings.ToLower(TeamsService.String()):
|
||||
return TeamsService
|
||||
case strings.ToLower(ExchangeMetadataService.String()):
|
||||
return ExchangeMetadataService
|
||||
case strings.ToLower(OneDriveMetadataService.String()):
|
||||
@ -57,8 +53,6 @@ func toServiceType(service string) ServiceType {
|
||||
return SharePointMetadataService
|
||||
case strings.ToLower(GroupsMetadataService.String()):
|
||||
return GroupsMetadataService
|
||||
case strings.ToLower(TeamsMetadataService.String()):
|
||||
return TeamsMetadataService
|
||||
default:
|
||||
return UnknownService
|
||||
}
|
||||
|
||||
@ -17,13 +17,11 @@ func _() {
|
||||
_ = x[SharePointMetadataService-6]
|
||||
_ = x[GroupsService-7]
|
||||
_ = x[GroupsMetadataService-8]
|
||||
_ = x[TeamsService-9]
|
||||
_ = x[TeamsMetadataService-10]
|
||||
}
|
||||
|
||||
const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadatateamsteamsMetadata"
|
||||
const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadata"
|
||||
|
||||
var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110, 115, 128}
|
||||
var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110}
|
||||
|
||||
func (i ServiceType) String() string {
|
||||
if i < 0 || i >= ServiceType(len(_ServiceType_index)-1) {
|
||||
|
||||
359
src/pkg/repository/backups.go
Normal file
359
src/pkg/repository/backups.go
Normal file
@ -0,0 +1,359 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/streamstore"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
|
||||
// BackupGetter deals with retrieving metadata about backups from the
|
||||
// repository.
|
||||
type BackupGetter interface {
|
||||
Backup(ctx context.Context, id string) (*backup.Backup, error)
|
||||
Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus)
|
||||
BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error)
|
||||
GetBackupDetails(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*details.Details, *backup.Backup, *fault.Bus)
|
||||
GetBackupErrors(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*fault.Errors, *backup.Backup, *fault.Bus)
|
||||
}
|
||||
|
||||
type Backuper interface {
|
||||
NewBackup(
|
||||
ctx context.Context,
|
||||
self selectors.Selector,
|
||||
) (operations.BackupOperation, error)
|
||||
NewBackupWithLookup(
|
||||
ctx context.Context,
|
||||
self selectors.Selector,
|
||||
ins idname.Cacher,
|
||||
) (operations.BackupOperation, error)
|
||||
DeleteBackups(
|
||||
ctx context.Context,
|
||||
failOnMissing bool,
|
||||
ids ...string,
|
||||
) error
|
||||
}
|
||||
|
||||
// NewBackup generates a BackupOperation runner.
|
||||
func (r repository) NewBackup(
|
||||
ctx context.Context,
|
||||
sel selectors.Selector,
|
||||
) (operations.BackupOperation, error) {
|
||||
return r.NewBackupWithLookup(ctx, sel, nil)
|
||||
}
|
||||
|
||||
// NewBackupWithLookup generates a BackupOperation runner.
|
||||
// ownerIDToName and ownerNameToID are optional populations, in case the caller has
|
||||
// already generated those values.
|
||||
func (r repository) NewBackupWithLookup(
|
||||
ctx context.Context,
|
||||
sel selectors.Selector,
|
||||
ins idname.Cacher,
|
||||
) (operations.BackupOperation, error) {
|
||||
err := r.ConnectDataProvider(ctx, sel.PathService())
|
||||
if err != nil {
|
||||
return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
ownerID, ownerName, err := r.Provider.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
|
||||
if err != nil {
|
||||
return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details")
|
||||
}
|
||||
|
||||
// TODO: retrieve display name from gc
|
||||
sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName)
|
||||
|
||||
return operations.NewBackupOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
r.Provider,
|
||||
r.Account,
|
||||
sel,
|
||||
sel, // the selector acts as an IDNamer for its discrete resource owner.
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
// Backup retrieves a backup by id.
|
||||
func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) {
|
||||
return getBackup(ctx, id, store.NewWrapper(r.modelStore))
|
||||
}
|
||||
|
||||
// getBackup handles the processing for Backup.
|
||||
func getBackup(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
sw store.BackupGetter,
|
||||
) (*backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(id))
|
||||
if err != nil {
|
||||
return nil, errWrapper(err)
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Backups lists backups by ID. Returns as many backups as possible with
|
||||
// errors for the backups it was unable to retrieve.
|
||||
func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) {
|
||||
var (
|
||||
bups []*backup.Backup
|
||||
errs = fault.New(false)
|
||||
sw = store.NewWrapper(r.modelStore)
|
||||
)
|
||||
|
||||
for _, id := range ids {
|
||||
ictx := clues.Add(ctx, "backup_id", id)
|
||||
|
||||
b, err := sw.GetBackup(ictx, model.StableID(id))
|
||||
if err != nil {
|
||||
errs.AddRecoverable(ctx, errWrapper(err))
|
||||
}
|
||||
|
||||
bups = append(bups, b)
|
||||
}
|
||||
|
||||
return bups, errs
|
||||
}
|
||||
|
||||
// BackupsByTag lists all backups in a repository that contain all the tags
|
||||
// specified.
|
||||
func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) {
|
||||
sw := store.NewWrapper(r.modelStore)
|
||||
return backupsByTag(ctx, sw, fs)
|
||||
}
|
||||
|
||||
// backupsByTag returns all backups matching all provided tags.
|
||||
//
|
||||
// TODO(ashmrtn): This exists mostly for testing, but we could restructure the
|
||||
// code in this file so there's a more elegant mocking solution.
|
||||
func backupsByTag(
|
||||
ctx context.Context,
|
||||
sw store.BackupWrapper,
|
||||
fs []store.FilterOption,
|
||||
) ([]*backup.Backup, error) {
|
||||
bs, err := sw.GetBackups(ctx, fs...)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
// Filter out assist backup bases as they're considered incomplete and we
|
||||
// haven't been displaying them before now.
|
||||
res := make([]*backup.Backup, 0, len(bs))
|
||||
|
||||
for _, b := range bs {
|
||||
if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup {
|
||||
res = append(res, b)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// BackupDetails returns the specified backup.Details
|
||||
func (r repository) GetBackupDetails(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*details.Details, *backup.Backup, *fault.Bus) {
|
||||
errs := fault.New(false)
|
||||
|
||||
deets, bup, err := getBackupDetails(
|
||||
ctx,
|
||||
backupID,
|
||||
r.Account.ID(),
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
errs)
|
||||
|
||||
return deets, bup, errs.Fail(err)
|
||||
}
|
||||
|
||||
// getBackupDetails handles the processing for GetBackupDetails.
|
||||
func getBackupDetails(
|
||||
ctx context.Context,
|
||||
backupID, tenantID string,
|
||||
kw *kopia.Wrapper,
|
||||
sw store.BackupGetter,
|
||||
errs *fault.Bus,
|
||||
) (*details.Details, *backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
return nil, nil, errWrapper(err)
|
||||
}
|
||||
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
ssid = b.DetailsID
|
||||
}
|
||||
|
||||
if len(ssid) == 0 {
|
||||
return nil, b, clues.New("no streamstore id in backup").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
||||
deets details.Details
|
||||
)
|
||||
|
||||
err = sstore.Read(
|
||||
ctx,
|
||||
ssid,
|
||||
streamstore.DetailsReader(details.UnmarshalTo(&deets)),
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Retroactively fill in isMeta information for items in older
|
||||
// backup versions without that info
|
||||
// version.Restore2 introduces the IsMeta flag, so only v1 needs a check.
|
||||
if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker {
|
||||
for _, d := range deets.Entries {
|
||||
if d.OneDrive != nil {
|
||||
d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deets.DetailsModel = deets.FilterMetaFiles()
|
||||
|
||||
return &deets, b, nil
|
||||
}
|
||||
|
||||
// BackupErrors returns the specified backup's fault.Errors
|
||||
func (r repository) GetBackupErrors(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*fault.Errors, *backup.Backup, *fault.Bus) {
|
||||
errs := fault.New(false)
|
||||
|
||||
fe, bup, err := getBackupErrors(
|
||||
ctx,
|
||||
backupID,
|
||||
r.Account.ID(),
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
errs)
|
||||
|
||||
return fe, bup, errs.Fail(err)
|
||||
}
|
||||
|
||||
// getBackupErrors handles the processing for GetBackupErrors.
|
||||
func getBackupErrors(
|
||||
ctx context.Context,
|
||||
backupID, tenantID string,
|
||||
kw *kopia.Wrapper,
|
||||
sw store.BackupGetter,
|
||||
errs *fault.Bus,
|
||||
) (*fault.Errors, *backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
return nil, nil, errWrapper(err)
|
||||
}
|
||||
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
||||
fe fault.Errors
|
||||
)
|
||||
|
||||
err = sstore.Read(
|
||||
ctx,
|
||||
ssid,
|
||||
streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)),
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &fe, b, nil
|
||||
}
|
||||
|
||||
// DeleteBackups removes the backups from both the model store and the backup
|
||||
// storage.
|
||||
//
|
||||
// If failOnMissing is true then returns an error if a backup model can't be
|
||||
// found. Otherwise ignores missing backup models.
|
||||
//
|
||||
// Missing models or snapshots during the actual deletion do not cause errors.
|
||||
//
|
||||
// All backups are delete as an atomic unit so any failures will result in no
|
||||
// deletions.
|
||||
func (r repository) DeleteBackups(
|
||||
ctx context.Context,
|
||||
failOnMissing bool,
|
||||
ids ...string,
|
||||
) error {
|
||||
return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...)
|
||||
}
|
||||
|
||||
// deleteBackup handles the processing for backup deletion.
|
||||
func deleteBackups(
|
||||
ctx context.Context,
|
||||
sw store.BackupGetterModelDeleter,
|
||||
failOnMissing bool,
|
||||
ids ...string,
|
||||
) error {
|
||||
// Although we haven't explicitly stated it, snapshots are technically
|
||||
// manifests in kopia. This means we can use the same delete API to remove
|
||||
// them and backup models. Deleting all of them together gives us both
|
||||
// atomicity guarantees (around when data will be flushed) and helps reduce
|
||||
// the number of manifest blobs that kopia will create.
|
||||
var toDelete []manifest.ID
|
||||
|
||||
for _, id := range ids {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(id))
|
||||
if err != nil {
|
||||
if !failOnMissing && errors.Is(err, data.ErrNotFound) {
|
||||
continue
|
||||
}
|
||||
|
||||
return clues.Stack(errWrapper(err)).
|
||||
WithClues(ctx).
|
||||
With("delete_backup_id", id)
|
||||
}
|
||||
|
||||
toDelete = append(toDelete, b.ModelStoreID)
|
||||
|
||||
if len(b.SnapshotID) > 0 {
|
||||
toDelete = append(toDelete, manifest.ID(b.SnapshotID))
|
||||
}
|
||||
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
ssid = b.DetailsID
|
||||
}
|
||||
|
||||
if len(ssid) > 0 {
|
||||
toDelete = append(toDelete, manifest.ID(ssid))
|
||||
}
|
||||
}
|
||||
|
||||
return sw.DeleteWithModelStoreIDs(ctx, toDelete...)
|
||||
}
|
||||
88
src/pkg/repository/data_providers.go
Normal file
88
src/pkg/repository/data_providers.go
Normal file
@ -0,0 +1,88 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/m365"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
type DataProvider interface {
|
||||
inject.BackupProducer
|
||||
inject.ExportConsumer
|
||||
inject.RestoreConsumer
|
||||
|
||||
VerifyAccess(ctx context.Context) error
|
||||
}
|
||||
|
||||
type DataProviderConnector interface {
|
||||
// ConnectDataProvider initializes configurations
|
||||
// and establishes the client connection with the
|
||||
// data provider for this operation.
|
||||
ConnectDataProvider(
|
||||
ctx context.Context,
|
||||
pst path.ServiceType,
|
||||
) error
|
||||
}
|
||||
|
||||
func (r *repository) ConnectDataProvider(
|
||||
ctx context.Context,
|
||||
pst path.ServiceType,
|
||||
) error {
|
||||
var (
|
||||
provider DataProvider
|
||||
err error
|
||||
)
|
||||
|
||||
switch r.Account.Provider {
|
||||
case account.ProviderM365:
|
||||
provider, err = connectToM365(ctx, *r, pst)
|
||||
default:
|
||||
err = clues.New("unrecognized provider").WithClues(ctx)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "connecting data provider")
|
||||
}
|
||||
|
||||
if err := provider.VerifyAccess(ctx); err != nil {
|
||||
return clues.Wrap(err, fmt.Sprintf("verifying %s account connection", r.Account.Provider))
|
||||
}
|
||||
|
||||
r.Provider = provider
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func connectToM365(
|
||||
ctx context.Context,
|
||||
r repository,
|
||||
pst path.ServiceType,
|
||||
) (*m365.Controller, error) {
|
||||
if r.Provider != nil {
|
||||
ctrl, ok := r.Provider.(*m365.Controller)
|
||||
if !ok {
|
||||
// if the provider is initialized to a non-m365 controller, we should not
|
||||
// attempt to connnect to m365 afterward.
|
||||
return nil, clues.New("Attempted to connect to multiple data providers")
|
||||
}
|
||||
|
||||
return ctrl, nil
|
||||
}
|
||||
|
||||
progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365")
|
||||
defer close(progressBar)
|
||||
|
||||
ctrl, err := m365.NewController(ctx, r.Account, pst, r.Opts)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating m365 client controller")
|
||||
}
|
||||
|
||||
return ctrl, nil
|
||||
}
|
||||
40
src/pkg/repository/exports.go
Normal file
40
src/pkg/repository/exports.go
Normal file
@ -0,0 +1,40 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
|
||||
type Exporter interface {
|
||||
NewExport(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
) (operations.ExportOperation, error)
|
||||
}
|
||||
|
||||
// NewExport generates a exportOperation runner.
|
||||
func (r repository) NewExport(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
) (operations.ExportOperation, error) {
|
||||
return operations.NewExportOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
r.Provider,
|
||||
r.Account,
|
||||
model.StableID(backupID),
|
||||
sel,
|
||||
exportCfg,
|
||||
r.Bus)
|
||||
}
|
||||
@ -21,7 +21,6 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -111,7 +110,7 @@ func initM365Repo(t *testing.T) (
|
||||
repository.NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, repository.InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return ctx, r, ac, st
|
||||
|
||||
@ -6,31 +6,20 @@ import (
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/google/uuid"
|
||||
"github.com/kopia/kopia/repo/manifest"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/crash"
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/m365"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/streamstore"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
@ -42,48 +31,24 @@ var (
|
||||
ErrorBackupNotFound = clues.New("no backup exists with that id")
|
||||
)
|
||||
|
||||
// BackupGetter deals with retrieving metadata about backups from the
|
||||
// repository.
|
||||
type BackupGetter interface {
|
||||
Backup(ctx context.Context, id string) (*backup.Backup, error)
|
||||
Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus)
|
||||
BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error)
|
||||
GetBackupDetails(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*details.Details, *backup.Backup, *fault.Bus)
|
||||
GetBackupErrors(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*fault.Errors, *backup.Backup, *fault.Bus)
|
||||
}
|
||||
|
||||
type Repositoryer interface {
|
||||
Initialize(ctx context.Context, retentionOpts ctrlRepo.Retention) error
|
||||
Connect(ctx context.Context) error
|
||||
Backuper
|
||||
BackupGetter
|
||||
Restorer
|
||||
Exporter
|
||||
DataProviderConnector
|
||||
|
||||
Initialize(
|
||||
ctx context.Context,
|
||||
cfg InitConfig,
|
||||
) error
|
||||
Connect(
|
||||
ctx context.Context,
|
||||
cfg ConnConfig,
|
||||
) error
|
||||
GetID() string
|
||||
Close(context.Context) error
|
||||
NewBackup(
|
||||
ctx context.Context,
|
||||
self selectors.Selector,
|
||||
) (operations.BackupOperation, error)
|
||||
NewBackupWithLookup(
|
||||
ctx context.Context,
|
||||
self selectors.Selector,
|
||||
ins idname.Cacher,
|
||||
) (operations.BackupOperation, error)
|
||||
NewRestore(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) (operations.RestoreOperation, error)
|
||||
NewExport(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
) (operations.ExportOperation, error)
|
||||
|
||||
NewMaintenance(
|
||||
ctx context.Context,
|
||||
mOpts ctrlRepo.Maintenance,
|
||||
@ -92,14 +57,6 @@ type Repositoryer interface {
|
||||
ctx context.Context,
|
||||
rcOpts ctrlRepo.Retention,
|
||||
) (operations.RetentionConfigOperation, error)
|
||||
DeleteBackups(ctx context.Context, failOnMissing bool, ids ...string) error
|
||||
BackupGetter
|
||||
// ConnectToM365 establishes graph api connections
|
||||
// and initializes api client configurations.
|
||||
ConnectToM365(
|
||||
ctx context.Context,
|
||||
pst path.ServiceType,
|
||||
) (*m365.Controller, error)
|
||||
}
|
||||
|
||||
// Repository contains storage provider information.
|
||||
@ -108,9 +65,10 @@ type repository struct {
|
||||
CreatedAt time.Time
|
||||
Version string // in case of future breaking changes
|
||||
|
||||
Account account.Account // the user's m365 account connection details
|
||||
Storage storage.Storage // the storage provider details and configuration
|
||||
Opts control.Options
|
||||
Account account.Account // the user's m365 account connection details
|
||||
Storage storage.Storage // the storage provider details and configuration
|
||||
Opts control.Options
|
||||
Provider DataProvider // the client controller used for external user data CRUD
|
||||
|
||||
Bus events.Eventer
|
||||
dataLayer *kopia.Wrapper
|
||||
@ -125,7 +83,7 @@ func (r repository) GetID() string {
|
||||
func New(
|
||||
ctx context.Context,
|
||||
acct account.Account,
|
||||
s storage.Storage,
|
||||
st storage.Storage,
|
||||
opts control.Options,
|
||||
configFileRepoID string,
|
||||
) (singleRepo *repository, err error) {
|
||||
@ -133,16 +91,16 @@ func New(
|
||||
ctx,
|
||||
"acct_provider", acct.Provider.String(),
|
||||
"acct_id", clues.Hide(acct.ID()),
|
||||
"storage_provider", s.Provider.String())
|
||||
"storage_provider", st.Provider.String())
|
||||
|
||||
bus, err := events.NewBus(ctx, s, acct.ID(), opts)
|
||||
bus, err := events.NewBus(ctx, st, acct.ID(), opts)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "constructing event bus").WithClues(ctx)
|
||||
}
|
||||
|
||||
repoID := configFileRepoID
|
||||
if len(configFileRepoID) == 0 {
|
||||
repoID = newRepoID(s)
|
||||
repoID = newRepoID(st)
|
||||
}
|
||||
|
||||
bus.SetRepoID(repoID)
|
||||
@ -151,7 +109,7 @@ func New(
|
||||
ID: repoID,
|
||||
Version: "v1",
|
||||
Account: acct,
|
||||
Storage: s,
|
||||
Storage: st,
|
||||
Bus: bus,
|
||||
Opts: opts,
|
||||
}
|
||||
@ -163,17 +121,22 @@ func New(
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
type InitConfig struct {
|
||||
// tells the data provider which service to
|
||||
// use for its connection pattern. Optional.
|
||||
Service path.ServiceType
|
||||
RetentionOpts ctrlRepo.Retention
|
||||
}
|
||||
|
||||
// Initialize will:
|
||||
// - validate the m365 account & secrets
|
||||
// - connect to the m365 account to ensure communication capability
|
||||
// - validate the provider config & secrets
|
||||
// - initialize the kopia repo with the provider and retention parameters
|
||||
// - update maintenance retention parameters as needed
|
||||
// - store the configuration details
|
||||
// - connect to the provider
|
||||
func (r *repository) Initialize(
|
||||
ctx context.Context,
|
||||
retentionOpts ctrlRepo.Retention,
|
||||
cfg InitConfig,
|
||||
) (err error) {
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
@ -187,10 +150,14 @@ func (r *repository) Initialize(
|
||||
}
|
||||
}()
|
||||
|
||||
if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
observe.Message(ctx, "Initializing repository")
|
||||
|
||||
kopiaRef := kopia.NewConn(r.Storage)
|
||||
if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts); err != nil {
|
||||
if err := kopiaRef.Initialize(ctx, r.Opts.Repo, cfg.RetentionOpts); err != nil {
|
||||
// replace common internal errors so that sdk users can check results with errors.Is()
|
||||
if errors.Is(err, kopia.ErrorRepoAlreadyExists) {
|
||||
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||
@ -221,12 +188,21 @@ func (r *repository) Initialize(
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConnConfig struct {
|
||||
// tells the data provider which service to
|
||||
// use for its connection pattern. Leave empty
|
||||
// to skip the provider connection.
|
||||
Service path.ServiceType
|
||||
}
|
||||
|
||||
// Connect will:
|
||||
// - validate the m365 account details
|
||||
// - connect to the m365 account to ensure communication capability
|
||||
// - connect to the m365 account
|
||||
// - connect to the provider storage
|
||||
// - return the connected repository
|
||||
func (r *repository) Connect(ctx context.Context) (err error) {
|
||||
func (r *repository) Connect(
|
||||
ctx context.Context,
|
||||
cfg ConnConfig,
|
||||
) (err error) {
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"acct_provider", r.Account.Provider.String(),
|
||||
@ -239,6 +215,10 @@ func (r *repository) Connect(ctx context.Context) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
|
||||
observe.Message(ctx, "Connecting to repository")
|
||||
|
||||
kopiaRef := kopia.NewConn(r.Storage)
|
||||
@ -297,14 +277,13 @@ func (r *repository) UpdatePassword(ctx context.Context, password string) (err e
|
||||
return clues.Wrap(err, "connecting kopia client")
|
||||
}
|
||||
|
||||
if err := kopiaRef.UpdatePassword(ctx, password, r.Opts.Repo); err != nil {
|
||||
err = kopiaRef.UpdatePassword(ctx, password, r.Opts.Repo)
|
||||
if err != nil {
|
||||
return clues.Wrap(err, "updating on kopia")
|
||||
}
|
||||
|
||||
defer kopiaRef.Close(ctx)
|
||||
|
||||
r.Bus.Event(ctx, events.RepoUpdate, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -332,98 +311,6 @@ func (r *repository) Close(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewBackup generates a BackupOperation runner.
|
||||
func (r repository) NewBackup(
|
||||
ctx context.Context,
|
||||
sel selectors.Selector,
|
||||
) (operations.BackupOperation, error) {
|
||||
return r.NewBackupWithLookup(ctx, sel, nil)
|
||||
}
|
||||
|
||||
// NewBackupWithLookup generates a BackupOperation runner.
|
||||
// ownerIDToName and ownerNameToID are optional populations, in case the caller has
|
||||
// already generated those values.
|
||||
func (r repository) NewBackupWithLookup(
|
||||
ctx context.Context,
|
||||
sel selectors.Selector,
|
||||
ins idname.Cacher,
|
||||
) (operations.BackupOperation, error) {
|
||||
ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts)
|
||||
if err != nil {
|
||||
return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
ownerID, ownerName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
|
||||
if err != nil {
|
||||
return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details")
|
||||
}
|
||||
|
||||
// TODO: retrieve display name from gc
|
||||
sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName)
|
||||
|
||||
return operations.NewBackupOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
ctrl,
|
||||
r.Account,
|
||||
sel,
|
||||
sel, // the selector acts as an IDNamer for its discrete resource owner.
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
// NewExport generates a exportOperation runner.
|
||||
func (r repository) NewExport(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
exportCfg control.ExportConfig,
|
||||
) (operations.ExportOperation, error) {
|
||||
ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts)
|
||||
if err != nil {
|
||||
return operations.ExportOperation{}, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
return operations.NewExportOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
ctrl,
|
||||
r.Account,
|
||||
model.StableID(backupID),
|
||||
sel,
|
||||
exportCfg,
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
// NewRestore generates a restoreOperation runner.
|
||||
func (r repository) NewRestore(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) (operations.RestoreOperation, error) {
|
||||
ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts)
|
||||
if err != nil {
|
||||
return operations.RestoreOperation{}, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
return operations.NewRestoreOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
ctrl,
|
||||
r.Account,
|
||||
model.StableID(backupID),
|
||||
sel,
|
||||
restoreCfg,
|
||||
r.Bus,
|
||||
count.New())
|
||||
}
|
||||
|
||||
func (r repository) NewMaintenance(
|
||||
ctx context.Context,
|
||||
mOpts ctrlRepo.Maintenance,
|
||||
@ -449,280 +336,6 @@ func (r repository) NewRetentionConfig(
|
||||
r.Bus)
|
||||
}
|
||||
|
||||
// Backup retrieves a backup by id.
|
||||
func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) {
|
||||
return getBackup(ctx, id, store.NewWrapper(r.modelStore))
|
||||
}
|
||||
|
||||
// getBackup handles the processing for Backup.
|
||||
func getBackup(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
sw store.BackupGetter,
|
||||
) (*backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(id))
|
||||
if err != nil {
|
||||
return nil, errWrapper(err)
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Backups lists backups by ID. Returns as many backups as possible with
|
||||
// errors for the backups it was unable to retrieve.
|
||||
func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) {
|
||||
var (
|
||||
bups []*backup.Backup
|
||||
errs = fault.New(false)
|
||||
sw = store.NewWrapper(r.modelStore)
|
||||
)
|
||||
|
||||
for _, id := range ids {
|
||||
ictx := clues.Add(ctx, "backup_id", id)
|
||||
|
||||
b, err := sw.GetBackup(ictx, model.StableID(id))
|
||||
if err != nil {
|
||||
errs.AddRecoverable(ctx, errWrapper(err))
|
||||
}
|
||||
|
||||
bups = append(bups, b)
|
||||
}
|
||||
|
||||
return bups, errs
|
||||
}
|
||||
|
||||
// BackupsByTag lists all backups in a repository that contain all the tags
|
||||
// specified.
|
||||
func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) {
|
||||
sw := store.NewWrapper(r.modelStore)
|
||||
return backupsByTag(ctx, sw, fs)
|
||||
}
|
||||
|
||||
// backupsByTag returns all backups matching all provided tags.
|
||||
//
|
||||
// TODO(ashmrtn): This exists mostly for testing, but we could restructure the
|
||||
// code in this file so there's a more elegant mocking solution.
|
||||
func backupsByTag(
|
||||
ctx context.Context,
|
||||
sw store.BackupWrapper,
|
||||
fs []store.FilterOption,
|
||||
) ([]*backup.Backup, error) {
|
||||
bs, err := sw.GetBackups(ctx, fs...)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
// Filter out assist backup bases as they're considered incomplete and we
|
||||
// haven't been displaying them before now.
|
||||
res := make([]*backup.Backup, 0, len(bs))
|
||||
|
||||
for _, b := range bs {
|
||||
if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup {
|
||||
res = append(res, b)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// BackupDetails returns the specified backup.Details
|
||||
func (r repository) GetBackupDetails(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*details.Details, *backup.Backup, *fault.Bus) {
|
||||
errs := fault.New(false)
|
||||
|
||||
deets, bup, err := getBackupDetails(
|
||||
ctx,
|
||||
backupID,
|
||||
r.Account.ID(),
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
errs)
|
||||
|
||||
return deets, bup, errs.Fail(err)
|
||||
}
|
||||
|
||||
// getBackupDetails handles the processing for GetBackupDetails.
|
||||
func getBackupDetails(
|
||||
ctx context.Context,
|
||||
backupID, tenantID string,
|
||||
kw *kopia.Wrapper,
|
||||
sw store.BackupGetter,
|
||||
errs *fault.Bus,
|
||||
) (*details.Details, *backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
return nil, nil, errWrapper(err)
|
||||
}
|
||||
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
ssid = b.DetailsID
|
||||
}
|
||||
|
||||
if len(ssid) == 0 {
|
||||
return nil, b, clues.New("no streamstore id in backup").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
||||
deets details.Details
|
||||
)
|
||||
|
||||
err = sstore.Read(
|
||||
ctx,
|
||||
ssid,
|
||||
streamstore.DetailsReader(details.UnmarshalTo(&deets)),
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Retroactively fill in isMeta information for items in older
|
||||
// backup versions without that info
|
||||
// version.Restore2 introduces the IsMeta flag, so only v1 needs a check.
|
||||
if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker {
|
||||
for _, d := range deets.Entries {
|
||||
if d.OneDrive != nil {
|
||||
d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deets.DetailsModel = deets.FilterMetaFiles()
|
||||
|
||||
return &deets, b, nil
|
||||
}
|
||||
|
||||
// BackupErrors returns the specified backup's fault.Errors
|
||||
func (r repository) GetBackupErrors(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
) (*fault.Errors, *backup.Backup, *fault.Bus) {
|
||||
errs := fault.New(false)
|
||||
|
||||
fe, bup, err := getBackupErrors(
|
||||
ctx,
|
||||
backupID,
|
||||
r.Account.ID(),
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
errs)
|
||||
|
||||
return fe, bup, errs.Fail(err)
|
||||
}
|
||||
|
||||
// getBackupErrors handles the processing for GetBackupErrors.
|
||||
func getBackupErrors(
|
||||
ctx context.Context,
|
||||
backupID, tenantID string,
|
||||
kw *kopia.Wrapper,
|
||||
sw store.BackupGetter,
|
||||
errs *fault.Bus,
|
||||
) (*fault.Errors, *backup.Backup, error) {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||
if err != nil {
|
||||
return nil, nil, errWrapper(err)
|
||||
}
|
||||
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
||||
fe fault.Errors
|
||||
)
|
||||
|
||||
err = sstore.Read(
|
||||
ctx,
|
||||
ssid,
|
||||
streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)),
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &fe, b, nil
|
||||
}
|
||||
|
||||
// DeleteBackups removes the backups from both the model store and the backup
|
||||
// storage.
|
||||
//
|
||||
// If failOnMissing is true then returns an error if a backup model can't be
|
||||
// found. Otherwise ignores missing backup models.
|
||||
//
|
||||
// Missing models or snapshots during the actual deletion do not cause errors.
|
||||
//
|
||||
// All backups are delete as an atomic unit so any failures will result in no
|
||||
// deletions.
|
||||
func (r repository) DeleteBackups(
|
||||
ctx context.Context,
|
||||
failOnMissing bool,
|
||||
ids ...string,
|
||||
) error {
|
||||
return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...)
|
||||
}
|
||||
|
||||
// deleteBackup handles the processing for backup deletion.
|
||||
func deleteBackups(
|
||||
ctx context.Context,
|
||||
sw store.BackupGetterModelDeleter,
|
||||
failOnMissing bool,
|
||||
ids ...string,
|
||||
) error {
|
||||
// Although we haven't explicitly stated it, snapshots are technically
|
||||
// manifests in kopia. This means we can use the same delete API to remove
|
||||
// them and backup models. Deleting all of them together gives us both
|
||||
// atomicity guarantees (around when data will be flushed) and helps reduce
|
||||
// the number of manifest blobs that kopia will create.
|
||||
var toDelete []manifest.ID
|
||||
|
||||
for _, id := range ids {
|
||||
b, err := sw.GetBackup(ctx, model.StableID(id))
|
||||
if err != nil {
|
||||
if !failOnMissing && errors.Is(err, data.ErrNotFound) {
|
||||
continue
|
||||
}
|
||||
|
||||
return clues.Stack(errWrapper(err)).
|
||||
WithClues(ctx).
|
||||
With("delete_backup_id", id)
|
||||
}
|
||||
|
||||
toDelete = append(toDelete, b.ModelStoreID)
|
||||
|
||||
if len(b.SnapshotID) > 0 {
|
||||
toDelete = append(toDelete, manifest.ID(b.SnapshotID))
|
||||
}
|
||||
|
||||
ssid := b.StreamStoreID
|
||||
if len(ssid) == 0 {
|
||||
ssid = b.DetailsID
|
||||
}
|
||||
|
||||
if len(ssid) > 0 {
|
||||
toDelete = append(toDelete, manifest.ID(ssid))
|
||||
}
|
||||
}
|
||||
|
||||
return sw.DeleteWithModelStoreIDs(ctx, toDelete...)
|
||||
}
|
||||
|
||||
func (r repository) ConnectToM365(
|
||||
ctx context.Context,
|
||||
pst path.ServiceType,
|
||||
) (*m365.Controller, error) {
|
||||
ctrl, err := connectToM365(ctx, pst, r.Account, r.Opts)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "connecting to m365")
|
||||
}
|
||||
|
||||
return ctrl, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Repository ID Model
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -771,29 +384,6 @@ func newRepoID(s storage.Storage) string {
|
||||
// helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var m365nonce bool
|
||||
|
||||
func connectToM365(
|
||||
ctx context.Context,
|
||||
pst path.ServiceType,
|
||||
acct account.Account,
|
||||
co control.Options,
|
||||
) (*m365.Controller, error) {
|
||||
if !m365nonce {
|
||||
m365nonce = true
|
||||
|
||||
progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365")
|
||||
defer close(progressBar)
|
||||
}
|
||||
|
||||
ctrl, err := m365.NewController(ctx, acct, pst, co)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ctrl, nil
|
||||
}
|
||||
|
||||
func errWrapper(err error) error {
|
||||
if errors.Is(err, data.ErrNotFound) {
|
||||
return clues.Stack(ErrorBackupNotFound, err)
|
||||
|
||||
@ -17,6 +17,7 @@ import (
|
||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||
"github.com/alcionai/corso/src/pkg/extensions"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
"github.com/alcionai/corso/src/pkg/storage"
|
||||
@ -69,7 +70,7 @@ func (suite *RepositoryUnitSuite) TestInitialize() {
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -85,12 +86,12 @@ func (suite *RepositoryUnitSuite) TestConnect() {
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
storage.ProviderUnknown.String(),
|
||||
func() (storage.Storage, error) {
|
||||
name: storage.ProviderUnknown.String(),
|
||||
storage: func() (storage.Storage, error) {
|
||||
return storage.NewStorage(storage.ProviderUnknown)
|
||||
},
|
||||
account.Account{},
|
||||
assert.Error,
|
||||
account: account.Account{},
|
||||
errCheck: assert.Error,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
@ -111,7 +112,7 @@ func (suite *RepositoryUnitSuite) TestConnect() {
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Connect(ctx)
|
||||
err = r.Connect(ctx, ConnConfig{})
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
@ -136,12 +137,13 @@ func TestRepositoryIntegrationSuite(t *testing.T) {
|
||||
func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
||||
table := []struct {
|
||||
name string
|
||||
account account.Account
|
||||
account func(*testing.T) account.Account
|
||||
storage func(tester.TestT) storage.Storage
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "success",
|
||||
account: tconfig.NewM365Account,
|
||||
storage: storeTD.NewPrefixedS3Storage,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
@ -156,13 +158,13 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
||||
st := test.storage(t)
|
||||
r, err := New(
|
||||
ctx,
|
||||
test.account,
|
||||
test.account(t),
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
if err == nil {
|
||||
defer func() {
|
||||
err := r.Close(ctx)
|
||||
@ -204,7 +206,7 @@ func (suite *RepositoryIntegrationSuite) TestInitializeWithRole() {
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
@ -218,21 +220,23 @@ func (suite *RepositoryIntegrationSuite) TestConnect() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
acct := tconfig.NewM365Account(t)
|
||||
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
r, err := New(
|
||||
ctx,
|
||||
account.Account{},
|
||||
acct,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// now re-connect
|
||||
err = r.Connect(ctx)
|
||||
err = r.Connect(ctx, ConnConfig{})
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
@ -242,29 +246,36 @@ func (suite *RepositoryIntegrationSuite) TestRepository_UpdatePassword() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
acct := tconfig.NewM365Account(t)
|
||||
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
r, err := New(
|
||||
ctx,
|
||||
account.Account{},
|
||||
acct,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// now re-connect
|
||||
err = r.Connect(ctx)
|
||||
err = r.Connect(ctx, ConnConfig{})
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.UpdatePassword(ctx, "newpass")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
tmp := st.Config["common_corsoPassphrase"]
|
||||
st.Config["common_corsoPassphrase"] = "newpass"
|
||||
|
||||
// now reconnect with new pass
|
||||
err = r.Connect(ctx)
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
err = r.Connect(ctx, ConnConfig{})
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
st.Config["common_corsoPassphrase"] = tmp
|
||||
}
|
||||
|
||||
func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
||||
@ -273,17 +284,19 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
acct := tconfig.NewM365Account(t)
|
||||
|
||||
// need to initialize the repository before we can test connecting to it.
|
||||
st := storeTD.NewPrefixedS3Storage(t)
|
||||
r, err := New(
|
||||
ctx,
|
||||
account.Account{},
|
||||
acct,
|
||||
st,
|
||||
control.DefaultOptions(),
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
oldID := r.GetID()
|
||||
@ -292,7 +305,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// now re-connect
|
||||
err = r.Connect(ctx)
|
||||
err = r.Connect(ctx, ConnConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.Equal(t, oldID, r.GetID())
|
||||
}
|
||||
@ -315,7 +328,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackup() {
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
// service doesn't matter here, we just need a valid value.
|
||||
err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
userID := tconfig.M365UserID(t)
|
||||
@ -344,7 +358,7 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() {
|
||||
"")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ro, err := r.NewRestore(
|
||||
@ -374,7 +388,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackupAndDelete() {
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
// service doesn't matter here, we just need a valid value.
|
||||
err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
userID := tconfig.M365UserID(t)
|
||||
@ -427,7 +442,7 @@ func (suite *RepositoryIntegrationSuite) TestNewMaintenance() {
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
mo, err := r.NewMaintenance(ctx, ctrlRepo.Maintenance{})
|
||||
@ -496,11 +511,11 @@ func (suite *RepositoryIntegrationSuite) Test_Options() {
|
||||
NewRepoID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
||||
err = r.Initialize(ctx, InitConfig{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory))
|
||||
|
||||
err = r.Connect(ctx)
|
||||
err = r.Connect(ctx, ConnConfig{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory))
|
||||
})
|
||||
|
||||
42
src/pkg/repository/restores.go
Normal file
42
src/pkg/repository/restores.go
Normal file
@ -0,0 +1,42 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/count"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/store"
|
||||
)
|
||||
|
||||
type Restorer interface {
|
||||
NewRestore(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) (operations.RestoreOperation, error)
|
||||
}
|
||||
|
||||
// NewRestore generates a restoreOperation runner.
|
||||
func (r repository) NewRestore(
|
||||
ctx context.Context,
|
||||
backupID string,
|
||||
sel selectors.Selector,
|
||||
restoreCfg control.RestoreConfig,
|
||||
) (operations.RestoreOperation, error) {
|
||||
return operations.NewRestoreOperation(
|
||||
ctx,
|
||||
r.Opts,
|
||||
r.dataLayer,
|
||||
store.NewWrapper(r.modelStore),
|
||||
r.Provider,
|
||||
r.Account,
|
||||
model.StableID(backupID),
|
||||
sel,
|
||||
restoreCfg,
|
||||
r.Bus,
|
||||
count.New())
|
||||
}
|
||||
@ -697,7 +697,7 @@ func (s ExchangeScope) IncludesCategory(cat exchangeCategory) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s ExchangeScope) IsAny(cat exchangeCategory) bool {
|
||||
return IsAnyTarget(s, cat)
|
||||
return isAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
@ -699,7 +699,7 @@ func (s GroupsScope) IncludesCategory(cat groupsCategory) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s GroupsScope) IsAny(cat groupsCategory) bool {
|
||||
return IsAnyTarget(s, cat)
|
||||
return isAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
@ -484,7 +484,7 @@ func (s OneDriveScope) Matches(cat oneDriveCategory, target string) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s OneDriveScope) IsAny(cat oneDriveCategory) bool {
|
||||
return IsAnyTarget(s, cat)
|
||||
return isAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
@ -694,7 +694,7 @@ func matchesPathValues[T scopeT, C categoryT](
|
||||
return false
|
||||
}
|
||||
|
||||
if IsAnyTarget(sc, cc) {
|
||||
if isAnyTarget(sc, cc) {
|
||||
// continue, not return: all path keys must match the entry to succeed
|
||||
continue
|
||||
}
|
||||
@ -795,7 +795,7 @@ func isNoneTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||
|
||||
// returns true if the category is included in the scope's category type,
|
||||
// and the value is set to Any().
|
||||
func IsAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||
func isAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||
if !typeAndCategoryMatches(cat, s.categorizer()) {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -125,14 +125,14 @@ func (suite *SelectorScopesSuite) TestGetCatValue() {
|
||||
func (suite *SelectorScopesSuite) TestIsAnyTarget() {
|
||||
t := suite.T()
|
||||
stub := stubScope("")
|
||||
assert.True(t, IsAnyTarget(stub, rootCatStub))
|
||||
assert.True(t, IsAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
||||
assert.True(t, isAnyTarget(stub, rootCatStub))
|
||||
assert.True(t, isAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
||||
|
||||
stub = stubScope("none")
|
||||
assert.False(t, IsAnyTarget(stub, rootCatStub))
|
||||
assert.False(t, IsAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
||||
assert.False(t, isAnyTarget(stub, rootCatStub))
|
||||
assert.False(t, isAnyTarget(stub, leafCatStub))
|
||||
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
||||
}
|
||||
|
||||
var reduceTestTable = []struct {
|
||||
|
||||
@ -625,7 +625,7 @@ func (s SharePointScope) IncludesCategory(cat sharePointCategory) bool {
|
||||
// returns true if the category is included in the scope's data type,
|
||||
// and the value is set to Any().
|
||||
func (s SharePointScope) IsAny(cat sharePointCategory) bool {
|
||||
return IsAnyTarget(s, cat)
|
||||
return isAnyTarget(s, cat)
|
||||
}
|
||||
|
||||
// Get returns the data category in the scope. If the scope
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user