Merge branch 'updateKopiaPassword' of https://github.com/alcionai/corso into updateKopiaPassword
This commit is contained in:
commit
1ee148554d
@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
|
- Reduce backup runtime for OneDrive and SharePoint incremental backups that have no file changes.
|
||||||
- Increase Exchange backup performance by lazily fetching data only for items whose content changed.
|
- Increase Exchange backup performance by lazily fetching data only for items whose content changed.
|
||||||
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
|
- Added `--backups` flag to delete multiple backups in `corso backup delete` command.
|
||||||
|
- Backup now includes all sites that belongs to a team, not just the root site.
|
||||||
|
|
||||||
## Fixed
|
## Fixed
|
||||||
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.
|
- Teams Channels that cannot support delta tokens (those without messages) fall back to non-delta enumeration and no longer fail a backup.
|
||||||
|
|||||||
@ -16,6 +16,8 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
@ -163,7 +165,7 @@ func handleDeleteCmd(cmd *cobra.Command, args []string) error {
|
|||||||
// standard set of selector behavior that we want used in the cli
|
// standard set of selector behavior that we want used in the cli
|
||||||
var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true}
|
var defaultSelectorConfig = selectors.Config{OnlyMatchItemNames: true}
|
||||||
|
|
||||||
func runBackups(
|
func genericCreateCommand(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
r repository.Repositoryer,
|
r repository.Repositoryer,
|
||||||
serviceName string,
|
serviceName string,
|
||||||
@ -332,6 +334,65 @@ func genericListCommand(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func genericDetailsCommand(
|
||||||
|
cmd *cobra.Command,
|
||||||
|
backupID string,
|
||||||
|
sel selectors.Selector,
|
||||||
|
) (*details.Details, error) {
|
||||||
|
ctx := cmd.Context()
|
||||||
|
|
||||||
|
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer utils.CloseRepo(ctx, r)
|
||||||
|
|
||||||
|
return genericDetailsCore(
|
||||||
|
ctx,
|
||||||
|
r,
|
||||||
|
backupID,
|
||||||
|
sel,
|
||||||
|
rdao.Opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func genericDetailsCore(
|
||||||
|
ctx context.Context,
|
||||||
|
bg repository.BackupGetter,
|
||||||
|
backupID string,
|
||||||
|
sel selectors.Selector,
|
||||||
|
opts control.Options,
|
||||||
|
) (*details.Details, error) {
|
||||||
|
ctx = clues.Add(ctx, "backup_id", backupID)
|
||||||
|
|
||||||
|
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||||
|
|
||||||
|
d, _, errs := bg.GetBackupDetails(ctx, backupID)
|
||||||
|
// TODO: log/track recoverable errors
|
||||||
|
if errs.Failure() != nil {
|
||||||
|
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
||||||
|
return nil, clues.New("no backup exists with the id " + backupID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.SkipReduce {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := sel.Reduce(ctx, d, errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "filtering backup details to selection")
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// helper funcs
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
func ifShow(flag string) bool {
|
func ifShow(flag string) bool {
|
||||||
return strings.ToLower(strings.TrimSpace(flag)) == "show"
|
return strings.ToLower(strings.TrimSpace(flag)) == "show"
|
||||||
}
|
}
|
||||||
|
|||||||
68
src/cli/backup/backup_test.go
Normal file
68
src/cli/backup/backup_test.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/cli/utils/testdata"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BackupUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &BackupUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *BackupUnitSuite) TestGenericDetailsCore() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
expected := append(
|
||||||
|
append(
|
||||||
|
dtd.GetItemsForVersion(
|
||||||
|
t,
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EmailCategory,
|
||||||
|
0,
|
||||||
|
-1),
|
||||||
|
dtd.GetItemsForVersion(
|
||||||
|
t,
|
||||||
|
path.ExchangeService,
|
||||||
|
path.EventsCategory,
|
||||||
|
0,
|
||||||
|
-1)...),
|
||||||
|
dtd.GetItemsForVersion(
|
||||||
|
t,
|
||||||
|
path.ExchangeService,
|
||||||
|
path.ContactsCategory,
|
||||||
|
0,
|
||||||
|
-1)...)
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
bg := testdata.VersionedBackupGetter{
|
||||||
|
Details: dtd.GetDetailsSetForVersion(t, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
sel := selectors.NewExchangeBackup([]string{"user-id"})
|
||||||
|
sel.Include(sel.AllData())
|
||||||
|
|
||||||
|
output, err := genericDetailsCore(
|
||||||
|
ctx,
|
||||||
|
bg,
|
||||||
|
"backup-ID",
|
||||||
|
sel.Selector,
|
||||||
|
control.DefaultOptions())
|
||||||
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.ElementsMatch(t, expected, output.Entries)
|
||||||
|
}
|
||||||
@ -1,21 +1,15 @@
|
|||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/cli/flags"
|
"github.com/alcionai/corso/src/cli/flags"
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -182,7 +176,7 @@ func createExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
selectorSet = append(selectorSet, discSel.Selector)
|
selectorSet = append(selectorSet, discSel.Selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
return runBackups(
|
return genericCreateCommand(
|
||||||
ctx,
|
ctx,
|
||||||
r,
|
r,
|
||||||
"Exchange",
|
"Exchange",
|
||||||
@ -272,74 +266,31 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return runDetailsExchangeCmd(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDetailsExchangeCmd(cmd *cobra.Command) error {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
opts := utils.MakeExchangeOpts(cmd)
|
opts := utils.MakeExchangeOpts(cmd)
|
||||||
|
|
||||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.ExchangeService)
|
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||||
|
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||||
|
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||||
|
|
||||||
|
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, err)
|
return Only(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer utils.CloseRepo(ctx, r)
|
if len(ds.Entries) > 0 {
|
||||||
|
ds.PrintEntries(ctx)
|
||||||
ds, err := runDetailsExchangeCmd(
|
} else {
|
||||||
ctx,
|
|
||||||
r,
|
|
||||||
flags.BackupIDFV,
|
|
||||||
opts,
|
|
||||||
rdao.Opts.SkipReduce)
|
|
||||||
if err != nil {
|
|
||||||
return Only(ctx, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ds.Entries) == 0 {
|
|
||||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ds.PrintEntries(ctx)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDetailsExchangeCmd actually performs the lookup in backup details.
|
|
||||||
// the fault.Errors return is always non-nil. Callers should check if
|
|
||||||
// errs.Failure() == nil.
|
|
||||||
func runDetailsExchangeCmd(
|
|
||||||
ctx context.Context,
|
|
||||||
r repository.BackupGetter,
|
|
||||||
backupID string,
|
|
||||||
opts utils.ExchangeOpts,
|
|
||||||
skipReduce bool,
|
|
||||||
) (*details.Details, error) {
|
|
||||||
if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
|
||||||
|
|
||||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
|
||||||
// TODO: log/track recoverable errors
|
|
||||||
if errs.Failure() != nil {
|
|
||||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
|
||||||
return nil, clues.New("No backup exists with the id " + backupID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
|
||||||
|
|
||||||
if !skipReduce {
|
|
||||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
|
||||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
|
||||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
|
||||||
d = sel.Reduce(ctx, d, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
// backup delete
|
// backup delete
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|||||||
@ -55,7 +55,7 @@ func (suite *NoBackupExchangeE2ESuite) SetupSuite() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.its = newIntegrationTesterSetup(t)
|
suite.its = newIntegrationTesterSetup(t)
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() {
|
func (suite *NoBackupExchangeE2ESuite) TestExchangeBackupListCmd_noBackups() {
|
||||||
@ -109,7 +109,7 @@ func (suite *BackupExchangeE2ESuite) SetupSuite() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.its = newIntegrationTesterSetup(t)
|
suite.its = newIntegrationTesterSetup(t)
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() {
|
func (suite *BackupExchangeE2ESuite) TestExchangeBackupCmd_email() {
|
||||||
@ -336,7 +336,7 @@ func (suite *PreparedBackupExchangeE2ESuite) SetupSuite() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.its = newIntegrationTesterSetup(t)
|
suite.its = newIntegrationTesterSetup(t)
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||||
suite.backupOps = make(map[path.CategoryType]string)
|
suite.backupOps = make(map[path.CategoryType]string)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -579,7 +579,7 @@ func (suite *BackupDeleteExchangeE2ESuite) SetupSuite() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.ExchangeService)
|
||||||
|
|
||||||
m365UserID := tconfig.M365UserID(t)
|
m365UserID := tconfig.M365UserID(t)
|
||||||
users := []string{m365UserID}
|
users := []string{m365UserID}
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -15,10 +14,7 @@ import (
|
|||||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
|
||||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -368,51 +364,3 @@ func (suite *ExchangeUnitSuite) TestExchangeBackupCreateSelectors() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectors() {
|
|
||||||
for v := 0; v <= version.Backup; v++ {
|
|
||||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
|
||||||
for _, test := range utilsTD.ExchangeOptionDetailLookups {
|
|
||||||
suite.Run(test.Name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
bg := utilsTD.VersionedBackupGetter{
|
|
||||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := runDetailsExchangeCmd(
|
|
||||||
ctx,
|
|
||||||
bg,
|
|
||||||
"backup-ID",
|
|
||||||
test.Opts(t, v),
|
|
||||||
false)
|
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
|
||||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ExchangeUnitSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
|
||||||
for _, test := range utilsTD.BadExchangeOptionsFormats {
|
|
||||||
suite.Run(test.Name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
output, err := runDetailsExchangeCmd(
|
|
||||||
ctx,
|
|
||||||
test.BackupGetter,
|
|
||||||
"backup-ID",
|
|
||||||
test.Opts(t, version.Backup),
|
|
||||||
false)
|
|
||||||
assert.Error(t, err, clues.ToCore(err))
|
|
||||||
assert.Empty(t, output)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package backup
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
@ -14,12 +13,9 @@ import (
|
|||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/filters"
|
"github.com/alcionai/corso/src/pkg/filters"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365"
|
"github.com/alcionai/corso/src/pkg/services/m365"
|
||||||
)
|
)
|
||||||
@ -174,7 +170,7 @@ func createGroupsCmd(cmd *cobra.Command, args []string) error {
|
|||||||
selectorSet = append(selectorSet, discSel.Selector)
|
selectorSet = append(selectorSet, discSel.Selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
return runBackups(
|
return genericCreateCommand(
|
||||||
ctx,
|
ctx,
|
||||||
r,
|
r,
|
||||||
"Group",
|
"Group",
|
||||||
@ -225,74 +221,31 @@ func detailsGroupsCmd(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return runDetailsGroupsCmd(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDetailsGroupsCmd(cmd *cobra.Command) error {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
opts := utils.MakeGroupsOpts(cmd)
|
opts := utils.MakeGroupsOpts(cmd)
|
||||||
|
|
||||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.GroupsService)
|
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
|
||||||
|
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||||
|
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
|
||||||
|
|
||||||
|
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, err)
|
return Only(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer utils.CloseRepo(ctx, r)
|
if len(ds.Entries) > 0 {
|
||||||
|
ds.PrintEntries(ctx)
|
||||||
ds, err := runDetailsGroupsCmd(
|
} else {
|
||||||
ctx,
|
|
||||||
r,
|
|
||||||
flags.BackupIDFV,
|
|
||||||
opts,
|
|
||||||
rdao.Opts.SkipReduce)
|
|
||||||
if err != nil {
|
|
||||||
return Only(ctx, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ds.Entries) == 0 {
|
|
||||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ds.PrintEntries(ctx)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDetailsGroupsCmd actually performs the lookup in backup details.
|
|
||||||
// the fault.Errors return is always non-nil. Callers should check if
|
|
||||||
// errs.Failure() == nil.
|
|
||||||
func runDetailsGroupsCmd(
|
|
||||||
ctx context.Context,
|
|
||||||
r repository.BackupGetter,
|
|
||||||
backupID string,
|
|
||||||
opts utils.GroupsOpts,
|
|
||||||
skipReduce bool,
|
|
||||||
) (*details.Details, error) {
|
|
||||||
if err := utils.ValidateGroupsRestoreFlags(backupID, opts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
|
||||||
|
|
||||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
|
||||||
// TODO: log/track recoverable errors
|
|
||||||
if errs.Failure() != nil {
|
|
||||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
|
||||||
return nil, clues.New("no backup exists with the id " + backupID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
|
||||||
|
|
||||||
if !skipReduce {
|
|
||||||
sel := utils.IncludeGroupsRestoreDataSelectors(ctx, opts)
|
|
||||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
|
||||||
utils.FilterGroupsRestoreInfoSelectors(sel, opts)
|
|
||||||
d = sel.Reduce(ctx, d, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
// backup delete
|
// backup delete
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|||||||
@ -56,7 +56,7 @@ func (suite *NoBackupGroupsE2ESuite) SetupSuite() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.its = newIntegrationTesterSetup(t)
|
suite.its = newIntegrationTesterSetup(t)
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() {
|
func (suite *NoBackupGroupsE2ESuite) TestGroupsBackupListCmd_noBackups() {
|
||||||
@ -110,7 +110,7 @@ func (suite *BackupGroupsE2ESuite) SetupSuite() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.its = newIntegrationTesterSetup(t)
|
suite.its = newIntegrationTesterSetup(t)
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() {
|
func (suite *BackupGroupsE2ESuite) TestGroupsBackupCmd_channelMessages() {
|
||||||
@ -287,7 +287,7 @@ func (suite *PreparedBackupGroupsE2ESuite) SetupSuite() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.its = newIntegrationTesterSetup(t)
|
suite.its = newIntegrationTesterSetup(t)
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||||
suite.backupOps = make(map[path.CategoryType]string)
|
suite.backupOps = make(map[path.CategoryType]string)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -515,7 +515,7 @@ func (suite *BackupDeleteGroupsE2ESuite) SetupSuite() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.GroupsService)
|
||||||
|
|
||||||
m365GroupID := tconfig.M365GroupID(t)
|
m365GroupID := tconfig.M365GroupID(t)
|
||||||
groups := []string{m365GroupID}
|
groups := []string{m365GroupID}
|
||||||
|
|||||||
@ -21,7 +21,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||||
@ -133,6 +133,7 @@ type dependencies struct {
|
|||||||
func prepM365Test(
|
func prepM365Test(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context, //revive:disable-line:context-as-argument
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
|
pst path.ServiceType,
|
||||||
) dependencies {
|
) dependencies {
|
||||||
var (
|
var (
|
||||||
acct = tconfig.NewM365Account(t)
|
acct = tconfig.NewM365Account(t)
|
||||||
@ -160,7 +161,9 @@ func prepM365Test(
|
|||||||
repository.NewRepoID)
|
repository.NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = repo.Initialize(ctx, ctrlRepo.Retention{})
|
err = repo.Initialize(ctx, repository.InitConfig{
|
||||||
|
Service: pst,
|
||||||
|
})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return dependencies{
|
return dependencies{
|
||||||
|
|||||||
@ -1,21 +1,15 @@
|
|||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/cli/flags"
|
"github.com/alcionai/corso/src/cli/flags"
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -162,7 +156,7 @@ func createOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
selectorSet = append(selectorSet, discSel.Selector)
|
selectorSet = append(selectorSet, discSel.Selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
return runBackups(
|
return genericCreateCommand(
|
||||||
ctx,
|
ctx,
|
||||||
r,
|
r,
|
||||||
"OneDrive",
|
"OneDrive",
|
||||||
@ -229,74 +223,31 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return runDetailsOneDriveCmd(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDetailsOneDriveCmd(cmd *cobra.Command) error {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
opts := utils.MakeOneDriveOpts(cmd)
|
opts := utils.MakeOneDriveOpts(cmd)
|
||||||
|
|
||||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.OneDriveService)
|
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||||
|
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||||
|
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||||
|
|
||||||
|
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, err)
|
return Only(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer utils.CloseRepo(ctx, r)
|
if len(ds.Entries) > 0 {
|
||||||
|
ds.PrintEntries(ctx)
|
||||||
ds, err := runDetailsOneDriveCmd(
|
} else {
|
||||||
ctx,
|
|
||||||
r,
|
|
||||||
flags.BackupIDFV,
|
|
||||||
opts,
|
|
||||||
rdao.Opts.SkipReduce)
|
|
||||||
if err != nil {
|
|
||||||
return Only(ctx, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ds.Entries) == 0 {
|
|
||||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ds.PrintEntries(ctx)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDetailsOneDriveCmd actually performs the lookup in backup details.
|
|
||||||
// the fault.Errors return is always non-nil. Callers should check if
|
|
||||||
// errs.Failure() == nil.
|
|
||||||
func runDetailsOneDriveCmd(
|
|
||||||
ctx context.Context,
|
|
||||||
r repository.BackupGetter,
|
|
||||||
backupID string,
|
|
||||||
opts utils.OneDriveOpts,
|
|
||||||
skipReduce bool,
|
|
||||||
) (*details.Details, error) {
|
|
||||||
if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
|
||||||
|
|
||||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
|
||||||
// TODO: log/track recoverable errors
|
|
||||||
if errs.Failure() != nil {
|
|
||||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
|
||||||
return nil, clues.New("no backup exists with the id " + backupID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
|
||||||
|
|
||||||
if !skipReduce {
|
|
||||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
|
||||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
|
||||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
|
||||||
d = sel.Reduce(ctx, d, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// `corso backup delete onedrive [<flag>...]`
|
// `corso backup delete onedrive [<flag>...]`
|
||||||
func oneDriveDeleteCmd() *cobra.Command {
|
func oneDriveDeleteCmd() *cobra.Command {
|
||||||
return &cobra.Command{
|
return &cobra.Command{
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/operations"
|
"github.com/alcionai/corso/src/internal/operations"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
selTD "github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||||
@ -48,7 +49,7 @@ func (suite *NoBackupOneDriveE2ESuite) SetupSuite() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() {
|
func (suite *NoBackupOneDriveE2ESuite) TestOneDriveBackupListCmd_empty() {
|
||||||
@ -139,7 +140,7 @@ func (suite *BackupDeleteOneDriveE2ESuite) SetupSuite() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.OneDriveService)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
m365UserID = tconfig.M365UserID(t)
|
m365UserID = tconfig.M365UserID(t)
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
@ -14,10 +13,7 @@ import (
|
|||||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
|
||||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -227,51 +223,3 @@ func (suite *OneDriveUnitSuite) TestValidateOneDriveBackupCreateFlags() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectors() {
|
|
||||||
for v := 0; v <= version.Backup; v++ {
|
|
||||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
|
||||||
for _, test := range utilsTD.OneDriveOptionDetailLookups {
|
|
||||||
suite.Run(test.Name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
bg := utilsTD.VersionedBackupGetter{
|
|
||||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := runDetailsOneDriveCmd(
|
|
||||||
ctx,
|
|
||||||
bg,
|
|
||||||
"backup-ID",
|
|
||||||
test.Opts(t, v),
|
|
||||||
false)
|
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
|
||||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *OneDriveUnitSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
|
|
||||||
for _, test := range utilsTD.BadOneDriveOptionsFormats {
|
|
||||||
suite.Run(test.Name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
output, err := runDetailsOneDriveCmd(
|
|
||||||
ctx,
|
|
||||||
test.BackupGetter,
|
|
||||||
"backup-ID",
|
|
||||||
test.Opts(t, version.Backup),
|
|
||||||
false)
|
|
||||||
assert.Error(t, err, clues.ToCore(err))
|
|
||||||
assert.Empty(t, output)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
@ -13,12 +12,9 @@ import (
|
|||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/filters"
|
"github.com/alcionai/corso/src/pkg/filters"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365"
|
"github.com/alcionai/corso/src/pkg/services/m365"
|
||||||
)
|
)
|
||||||
@ -179,7 +175,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
selectorSet = append(selectorSet, discSel.Selector)
|
selectorSet = append(selectorSet, discSel.Selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
return runBackups(
|
return genericCreateCommand(
|
||||||
ctx,
|
ctx,
|
||||||
r,
|
r,
|
||||||
"SharePoint",
|
"SharePoint",
|
||||||
@ -303,7 +299,7 @@ func deleteSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
// backup details
|
// backup details
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
// `corso backup details onedrive [<flag>...]`
|
// `corso backup details SharePoint [<flag>...]`
|
||||||
func sharePointDetailsCmd() *cobra.Command {
|
func sharePointDetailsCmd() *cobra.Command {
|
||||||
return &cobra.Command{
|
return &cobra.Command{
|
||||||
Use: sharePointServiceCommand,
|
Use: sharePointServiceCommand,
|
||||||
@ -324,70 +320,27 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return runDetailsSharePointCmd(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDetailsSharePointCmd(cmd *cobra.Command) error {
|
||||||
ctx := cmd.Context()
|
ctx := cmd.Context()
|
||||||
opts := utils.MakeSharePointOpts(cmd)
|
opts := utils.MakeSharePointOpts(cmd)
|
||||||
|
|
||||||
r, rdao, err := utils.GetAccountAndConnect(ctx, cmd, path.SharePointService)
|
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
|
||||||
|
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
||||||
|
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||||
|
|
||||||
|
ds, err := genericDetailsCommand(cmd, flags.BackupIDFV, sel.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, err)
|
return Only(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer utils.CloseRepo(ctx, r)
|
if len(ds.Entries) > 0 {
|
||||||
|
ds.PrintEntries(ctx)
|
||||||
ds, err := runDetailsSharePointCmd(
|
} else {
|
||||||
ctx,
|
|
||||||
r,
|
|
||||||
flags.BackupIDFV,
|
|
||||||
opts,
|
|
||||||
rdao.Opts.SkipReduce)
|
|
||||||
if err != nil {
|
|
||||||
return Only(ctx, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ds.Entries) == 0 {
|
|
||||||
Info(ctx, selectors.ErrorNoMatchingItems)
|
Info(ctx, selectors.ErrorNoMatchingItems)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ds.PrintEntries(ctx)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDetailsSharePointCmd actually performs the lookup in backup details.
|
|
||||||
// the fault.Errors return is always non-nil. Callers should check if
|
|
||||||
// errs.Failure() == nil.
|
|
||||||
func runDetailsSharePointCmd(
|
|
||||||
ctx context.Context,
|
|
||||||
r repository.BackupGetter,
|
|
||||||
backupID string,
|
|
||||||
opts utils.SharePointOpts,
|
|
||||||
skipReduce bool,
|
|
||||||
) (*details.Details, error) {
|
|
||||||
if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "backup_id", backupID)
|
|
||||||
|
|
||||||
d, _, errs := r.GetBackupDetails(ctx, backupID)
|
|
||||||
// TODO: log/track recoverable errors
|
|
||||||
if errs.Failure() != nil {
|
|
||||||
if errors.Is(errs.Failure(), data.ErrNotFound) {
|
|
||||||
return nil, clues.New("no backup exists with the id " + backupID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, clues.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "details_entries", len(d.Entries))
|
|
||||||
|
|
||||||
if !skipReduce {
|
|
||||||
sel := utils.IncludeSharePointRestoreDataSelectors(ctx, opts)
|
|
||||||
sel.Configure(selectors.Config{OnlyMatchItemNames: true})
|
|
||||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
|
||||||
d = sel.Reduce(ctx, d, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/operations"
|
"github.com/alcionai/corso/src/internal/operations"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors/testdata"
|
"github.com/alcionai/corso/src/pkg/selectors/testdata"
|
||||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||||
@ -46,7 +47,7 @@ func (suite *NoBackupSharePointE2ESuite) SetupSuite() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() {
|
func (suite *NoBackupSharePointE2ESuite) TestSharePointBackupListCmd_empty() {
|
||||||
@ -103,7 +104,7 @@ func (suite *BackupDeleteSharePointE2ESuite) SetupSuite() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.dpnd = prepM365Test(t, ctx)
|
suite.dpnd = prepM365Test(t, ctx, path.SharePointService)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
m365SiteID = tconfig.M365SiteID(t)
|
m365SiteID = tconfig.M365SiteID(t)
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -15,11 +14,8 @@ import (
|
|||||||
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
flagsTD "github.com/alcionai/corso/src/cli/flags/testdata"
|
||||||
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
cliTD "github.com/alcionai/corso/src/cli/testdata"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
utilsTD "github.com/alcionai/corso/src/cli/utils/testdata"
|
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
|
||||||
dtd "github.com/alcionai/corso/src/pkg/backup/details/testdata"
|
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
@ -339,51 +335,3 @@ func (suite *SharePointUnitSuite) TestSharePointBackupCreateSelectors() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectors() {
|
|
||||||
for v := 0; v <= version.Backup; v++ {
|
|
||||||
suite.Run(fmt.Sprintf("version%d", v), func() {
|
|
||||||
for _, test := range utilsTD.SharePointOptionDetailLookups {
|
|
||||||
suite.Run(test.Name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
bg := utilsTD.VersionedBackupGetter{
|
|
||||||
Details: dtd.GetDetailsSetForVersion(t, v),
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := runDetailsSharePointCmd(
|
|
||||||
ctx,
|
|
||||||
bg,
|
|
||||||
"backup-ID",
|
|
||||||
test.Opts(t, v),
|
|
||||||
false)
|
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
|
||||||
assert.ElementsMatch(t, test.Expected(t, v), output.Entries)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *SharePointUnitSuite) TestSharePointBackupDetailsSelectorsBadFormats() {
|
|
||||||
for _, test := range utilsTD.BadSharePointOptionsFormats {
|
|
||||||
suite.Run(test.Name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
output, err := runDetailsSharePointCmd(
|
|
||||||
ctx,
|
|
||||||
test.BackupGetter,
|
|
||||||
"backup-ID",
|
|
||||||
test.Opts(t, version.Backup),
|
|
||||||
false)
|
|
||||||
assert.Error(t, err, clues.ToCore(err))
|
|
||||||
assert.Empty(t, output)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
3
src/cli/flags/testdata/backup_list.go
vendored
3
src/cli/flags/testdata/backup_list.go
vendored
@ -3,9 +3,10 @@ package testdata
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/cli/flags"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/cli/flags"
|
||||||
)
|
)
|
||||||
|
|
||||||
func PreparedBackupListFlags() []string {
|
func PreparedBackupListFlags() []string {
|
||||||
|
|||||||
@ -85,7 +85,7 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
opt := utils.ControlWithConfig(cfg)
|
opt := utils.ControlWithConfig(cfg)
|
||||||
// Retention is not supported for filesystem repos.
|
// Retention is not supported for filesystem repos.
|
||||||
retention := ctrlRepo.Retention{}
|
retentionOpts := ctrlRepo.Retention{}
|
||||||
|
|
||||||
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
|
// SendStartCorsoEvent uses distict ID as tenant ID because repoID is still not generated
|
||||||
utils.SendStartCorsoEvent(
|
utils.SendStartCorsoEvent(
|
||||||
@ -116,7 +116,9 @@ func initFilesystemCmd(cmd *cobra.Command, args []string) error {
|
|||||||
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
|
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = r.Initialize(ctx, retention); err != nil {
|
ric := repository.InitConfig{RetentionOpts: retentionOpts}
|
||||||
|
|
||||||
|
if err = r.Initialize(ctx, ric); err != nil {
|
||||||
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
|
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -207,7 +209,7 @@ func connectFilesystemCmd(cmd *cobra.Command, args []string) error {
|
|||||||
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
|
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Connect(ctx); err != nil {
|
if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
|
||||||
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -16,7 +16,6 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||||
@ -132,13 +131,13 @@ func (suite *FilesystemE2ESuite) TestConnectFilesystemCmd() {
|
|||||||
// init the repo first
|
// init the repo first
|
||||||
r, err := repository.New(
|
r, err := repository.New(
|
||||||
ctx,
|
ctx,
|
||||||
account.Account{},
|
tconfig.NewM365Account(t),
|
||||||
st,
|
st,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
repository.NewRepoID)
|
repository.NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, repository.InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// then test it
|
// then test it
|
||||||
|
|||||||
@ -143,7 +143,9 @@ func initS3Cmd(cmd *cobra.Command, args []string) error {
|
|||||||
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
|
return Only(ctx, clues.Wrap(err, "Failed to construct the repository controller"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = r.Initialize(ctx, retentionOpts); err != nil {
|
ric := repository.InitConfig{RetentionOpts: retentionOpts}
|
||||||
|
|
||||||
|
if err = r.Initialize(ctx, ric); err != nil {
|
||||||
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
|
if flags.SucceedIfExistsFV && errors.Is(err, repository.ErrorRepoAlreadyExists) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -226,7 +228,7 @@ func connectS3Cmd(cmd *cobra.Command, args []string) error {
|
|||||||
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
|
return Only(ctx, clues.Wrap(err, "Failed to create a repository controller"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Connect(ctx); err != nil {
|
if err := r.Connect(ctx, repository.ConnConfig{}); err != nil {
|
||||||
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
return Only(ctx, clues.Stack(ErrConnectingRepo, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,6 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||||
@ -208,13 +207,13 @@ func (suite *S3E2ESuite) TestConnectS3Cmd() {
|
|||||||
// init the repo first
|
// init the repo first
|
||||||
r, err := repository.New(
|
r, err := repository.New(
|
||||||
ctx,
|
ctx,
|
||||||
account.Account{},
|
tconfig.NewM365Account(t),
|
||||||
st,
|
st,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
repository.NewRepoID)
|
repository.NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, repository.InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// then test it
|
// then test it
|
||||||
|
|||||||
@ -20,7 +20,6 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/repository"
|
"github.com/alcionai/corso/src/pkg/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -92,7 +91,7 @@ func (suite *RestoreExchangeE2ESuite) SetupSuite() {
|
|||||||
repository.NewRepoID)
|
repository.NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = suite.repo.Initialize(ctx, ctrlRepo.Retention{})
|
err = suite.repo.Initialize(ctx, repository.InitConfig{Service: path.ExchangeService})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)
|
suite.backupOps = make(map[path.CategoryType]operations.BackupOperation)
|
||||||
|
|||||||
@ -78,16 +78,10 @@ func GetAccountAndConnectWithOverrides(
|
|||||||
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller")
|
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "creating a repository controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Connect(ctx); err != nil {
|
if err := r.Connect(ctx, repository.ConnConfig{Service: pst}); err != nil {
|
||||||
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository")
|
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to the "+cfg.Storage.Provider.String()+" repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
// this initializes our graph api client configurations,
|
|
||||||
// including control options such as concurency limitations.
|
|
||||||
if _, err := r.ConnectToM365(ctx, pst); err != nil {
|
|
||||||
return nil, RepoDetailsAndOpts{}, clues.Wrap(err, "connecting to m365")
|
|
||||||
}
|
|
||||||
|
|
||||||
rdao := RepoDetailsAndOpts{
|
rdao := RepoDetailsAndOpts{
|
||||||
Repo: cfg,
|
Repo: cfg,
|
||||||
Opts: opts,
|
Opts: opts,
|
||||||
|
|||||||
@ -72,7 +72,7 @@ func deleteBackups(
|
|||||||
// Only supported for S3 repos currently.
|
// Only supported for S3 repos currently.
|
||||||
func pitrListBackups(
|
func pitrListBackups(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
service path.ServiceType,
|
pst path.ServiceType,
|
||||||
pitr time.Time,
|
pitr time.Time,
|
||||||
backupIDs []string,
|
backupIDs []string,
|
||||||
) error {
|
) error {
|
||||||
@ -113,14 +113,14 @@ func pitrListBackups(
|
|||||||
return clues.Wrap(err, "creating a repo")
|
return clues.Wrap(err, "creating a repo")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.Connect(ctx)
|
err = r.Connect(ctx, repository.ConnConfig{Service: pst})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Wrap(err, "connecting to the repository")
|
return clues.Wrap(err, "connecting to the repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
defer r.Close(ctx)
|
defer r.Close(ctx)
|
||||||
|
|
||||||
backups, err := r.BackupsByTag(ctx, store.Service(service))
|
backups, err := r.BackupsByTag(ctx, store.Service(pst))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Wrap(err, "listing backups").WithClues(ctx)
|
return clues.Wrap(err, "listing backups").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -229,7 +229,7 @@ elseif (![string]::IsNullOrEmpty($Site)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
Write-Host "User (for OneDrive) or Site (for Sharepoint) is required"
|
Write-Host "User (for OneDrvie) or Site (for Sharpeoint) is required"
|
||||||
Exit
|
Exit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -8,7 +8,7 @@ require (
|
|||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1
|
||||||
github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8
|
github.com/alcionai/clues v0.0.0-20230920212840-728ac1a1d8b8
|
||||||
github.com/armon/go-metrics v0.4.1
|
github.com/armon/go-metrics v0.4.1
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
github.com/aws/aws-xray-sdk-go v1.8.2
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1
|
github.com/cenkalti/backoff/v4 v4.2.1
|
||||||
github.com/google/uuid v1.3.1
|
github.com/google/uuid v1.3.1
|
||||||
github.com/h2non/gock v1.2.0
|
github.com/h2non/gock v1.2.0
|
||||||
|
|||||||
@ -71,8 +71,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
|
|||||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc=
|
github.com/aws/aws-sdk-go v1.45.0 h1:qoVOQHuLacxJMO71T49KeE70zm+Tk3vtrl7XO4VUPZc=
|
||||||
github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.45.0/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
github.com/aws/aws-xray-sdk-go v1.8.2 h1:PVxNWnQG+rAYjxsmhEN97DTO57Dipg6VS0wsu6bXUB0=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
github.com/aws/aws-xray-sdk-go v1.8.2/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
|||||||
@ -27,7 +27,7 @@ func NewPrefixMap(m map[string]map[string]struct{}) *PrefixMap {
|
|||||||
|
|
||||||
func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) {
|
func (pm PrefixMap) AssertEqual(t *testing.T, r prefixmatcher.StringSetReader) {
|
||||||
if pm.Empty() {
|
if pm.Empty() {
|
||||||
require.True(t, r.Empty(), "result prefixMap should be empty but contains keys: %+v", r.Keys())
|
require.True(t, r.Empty(), "both prefix maps are empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
187
src/internal/common/readers/serialization_version.go
Normal file
187
src/internal/common/readers/serialization_version.go
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
package readers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
)
|
||||||
|
|
||||||
|
// persistedSerializationVersion is the size of the serialization version in
|
||||||
|
// storage.
|
||||||
|
//
|
||||||
|
// The current on-disk format of this field is written in big endian. The
|
||||||
|
// highest bit denotes if the item is empty because it was deleted between the
|
||||||
|
// time we told the storage about it and when we needed to get data for it. The
|
||||||
|
// lowest two bytes are the version number. All other bits are reserved for
|
||||||
|
// future use.
|
||||||
|
//
|
||||||
|
// MSB 31 30 16 8 0 LSB
|
||||||
|
// +----------+----+---------+--------+-------+
|
||||||
|
// | del flag | reserved | version number |
|
||||||
|
// +----------+----+---------+--------+-------+
|
||||||
|
type persistedSerializationVersion = uint32
|
||||||
|
|
||||||
|
// SerializationVersion is the in-memory size of the version number that gets
|
||||||
|
// added to the persisted serialization version.
|
||||||
|
//
|
||||||
|
// Right now it's only a uint16 but we can expand it to be larger so long as the
|
||||||
|
// expanded size doesn't clash with the flags in the high-order bits.
|
||||||
|
type SerializationVersion uint16
|
||||||
|
|
||||||
|
// DefaultSerializationVersion is the current (default) version number for all
|
||||||
|
// services. As services evolve their storage format they should begin tracking
|
||||||
|
// their own version numbers separate from other services.
|
||||||
|
const DefaultSerializationVersion SerializationVersion = 1
|
||||||
|
|
||||||
|
const (
|
||||||
|
VersionFormatSize = int(unsafe.Sizeof(persistedSerializationVersion(0)))
|
||||||
|
delInFlightMask persistedSerializationVersion = 1 << ((VersionFormatSize * 8) - 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
// SerializationFormat is a struct describing serialization format versions and
|
||||||
|
// flags to add for this item.
|
||||||
|
type SerializationFormat struct {
|
||||||
|
Version SerializationVersion
|
||||||
|
DelInFlight bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVersionedBackupReader creates a reader that injects format into the first
|
||||||
|
// bytes of the returned data. After format has been returned, data is returned
|
||||||
|
// from baseReaders in the order they're passed in.
|
||||||
|
func NewVersionedBackupReader(
|
||||||
|
format SerializationFormat,
|
||||||
|
baseReaders ...io.ReadCloser,
|
||||||
|
) (io.ReadCloser, error) {
|
||||||
|
if format.DelInFlight && len(baseReaders) > 0 {
|
||||||
|
// This is a conservative check, but we can always loosen it later on if
|
||||||
|
// needed. At the moment we really don't expect any data if the item was
|
||||||
|
// deleted.
|
||||||
|
return nil, clues.New("item marked deleted but has reader(s)")
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedVersion := persistedSerializationVersion(format.Version)
|
||||||
|
if format.DelInFlight {
|
||||||
|
formattedVersion |= delInFlightMask
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedBuf := make([]byte, VersionFormatSize)
|
||||||
|
binary.BigEndian.PutUint32(formattedBuf, formattedVersion)
|
||||||
|
|
||||||
|
versionReader := io.NopCloser(bytes.NewReader(formattedBuf))
|
||||||
|
|
||||||
|
// Need to add readers individually because types differ.
|
||||||
|
allReaders := make([]io.Reader, 0, len(baseReaders)+1)
|
||||||
|
allReaders = append(allReaders, versionReader)
|
||||||
|
|
||||||
|
for _, r := range baseReaders {
|
||||||
|
allReaders = append(allReaders, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := &versionedBackupReader{
|
||||||
|
baseReaders: append([]io.ReadCloser{versionReader}, baseReaders...),
|
||||||
|
combined: io.MultiReader(allReaders...),
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type versionedBackupReader struct {
|
||||||
|
// baseReaders is a reference to the original readers so we can close them.
|
||||||
|
baseReaders []io.ReadCloser
|
||||||
|
// combined is the reader that will return all data.
|
||||||
|
combined io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vbr *versionedBackupReader) Read(p []byte) (int, error) {
|
||||||
|
if vbr.combined == nil {
|
||||||
|
return 0, os.ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := vbr.combined.Read(p)
|
||||||
|
if err == io.EOF {
|
||||||
|
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
|
||||||
|
// thinking it's an actual error.
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, clues.Stack(err).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vbr *versionedBackupReader) Close() error {
|
||||||
|
if vbr.combined == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
vbr.combined = nil
|
||||||
|
|
||||||
|
var errs *clues.Err
|
||||||
|
|
||||||
|
for i, r := range vbr.baseReaders {
|
||||||
|
if err := r.Close(); err != nil {
|
||||||
|
errs = clues.Stack(
|
||||||
|
errs,
|
||||||
|
clues.Wrap(err, "closing reader").With("reader_index", i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
vbr.baseReaders = nil
|
||||||
|
|
||||||
|
return errs.OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVersionedRestoreReader wraps baseReader and provides easy access to the
|
||||||
|
// SerializationFormat info in the first bytes of the data contained in
|
||||||
|
// baseReader.
|
||||||
|
func NewVersionedRestoreReader(
|
||||||
|
baseReader io.ReadCloser,
|
||||||
|
) (*VersionedRestoreReader, error) {
|
||||||
|
versionBuf := make([]byte, VersionFormatSize)
|
||||||
|
|
||||||
|
// Loop to account for the unlikely case where we get a short read.
|
||||||
|
for read := 0; read < VersionFormatSize; {
|
||||||
|
n, err := baseReader.Read(versionBuf[read:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "reading serialization version")
|
||||||
|
}
|
||||||
|
|
||||||
|
read += n
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedVersion := binary.BigEndian.Uint32(versionBuf)
|
||||||
|
|
||||||
|
return &VersionedRestoreReader{
|
||||||
|
baseReader: baseReader,
|
||||||
|
format: SerializationFormat{
|
||||||
|
Version: SerializationVersion(formattedVersion),
|
||||||
|
DelInFlight: (formattedVersion & delInFlightMask) != 0,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type VersionedRestoreReader struct {
|
||||||
|
baseReader io.ReadCloser
|
||||||
|
format SerializationFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vrr *VersionedRestoreReader) Read(p []byte) (int, error) {
|
||||||
|
n, err := vrr.baseReader.Read(p)
|
||||||
|
if err == io.EOF {
|
||||||
|
// Golang doesn't allow wrapping of EOF. If we wrap it other things start
|
||||||
|
// thinking it's an actual error.
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, clues.Stack(err).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vrr *VersionedRestoreReader) Close() error {
|
||||||
|
return clues.Stack(vrr.baseReader.Close()).OrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vrr VersionedRestoreReader) Format() SerializationFormat {
|
||||||
|
return vrr.format
|
||||||
|
}
|
||||||
362
src/internal/common/readers/serialization_version_test.go
Normal file
362
src/internal/common/readers/serialization_version_test.go
Normal file
@ -0,0 +1,362 @@
|
|||||||
|
package readers_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
)
|
||||||
|
|
||||||
|
type shortReader struct {
|
||||||
|
maxReadLen int
|
||||||
|
io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *shortReader) Read(p []byte) (int, error) {
|
||||||
|
toRead := s.maxReadLen
|
||||||
|
if len(p) < toRead {
|
||||||
|
toRead = len(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.ReadCloser.Read(p[:toRead])
|
||||||
|
}
|
||||||
|
|
||||||
|
type SerializationReaderUnitSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSerializationReaderUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &SerializationReaderUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader() {
|
||||||
|
baseData := []byte("hello world")
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
format readers.SerializationFormat
|
||||||
|
inputReaders []io.ReadCloser
|
||||||
|
|
||||||
|
expectErr require.ErrorAssertionFunc
|
||||||
|
expectData []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "DeletedInFlight NoVersion NoReaders",
|
||||||
|
format: readers.SerializationFormat{
|
||||||
|
DelInFlight: true,
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: []byte{0x80, 0x0, 0x0, 0x0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DeletedInFlight NoReaders",
|
||||||
|
format: readers.SerializationFormat{
|
||||||
|
Version: 42,
|
||||||
|
DelInFlight: true,
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: []byte{0x80, 0x0, 0x0, 42},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoVersion NoReaders",
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: []byte{0x00, 0x0, 0x0, 0x0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoReaders",
|
||||||
|
format: readers.SerializationFormat{
|
||||||
|
Version: 42,
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: []byte{0x00, 0x0, 0x0, 42},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SingleReader",
|
||||||
|
format: readers.SerializationFormat{
|
||||||
|
Version: 42,
|
||||||
|
},
|
||||||
|
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleReaders",
|
||||||
|
format: readers.SerializationFormat{
|
||||||
|
Version: 42,
|
||||||
|
},
|
||||||
|
inputReaders: []io.ReadCloser{
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)),
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)),
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: append(
|
||||||
|
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
|
||||||
|
baseData...),
|
||||||
|
},
|
||||||
|
// Uncomment if we expand the version to 32 bits.
|
||||||
|
//{
|
||||||
|
// name: "VersionWithHighBitSet NoReaders Errors",
|
||||||
|
// format: readers.SerializationFormat{
|
||||||
|
// Version: 0x80000000,
|
||||||
|
// },
|
||||||
|
// expectErr: require.Error,
|
||||||
|
//},
|
||||||
|
{
|
||||||
|
name: "DeletedInFlight SingleReader Errors",
|
||||||
|
format: readers.SerializationFormat{
|
||||||
|
DelInFlight: true,
|
||||||
|
},
|
||||||
|
inputReaders: []io.ReadCloser{io.NopCloser(bytes.NewReader(baseData))},
|
||||||
|
expectErr: require.Error,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
test.format,
|
||||||
|
test.inputReaders...)
|
||||||
|
test.expectErr(t, err, "getting backup reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := r.Close()
|
||||||
|
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
|
||||||
|
}()
|
||||||
|
|
||||||
|
buf, err := io.ReadAll(r)
|
||||||
|
require.NoError(
|
||||||
|
t,
|
||||||
|
err,
|
||||||
|
"reading serialized data: %v",
|
||||||
|
clues.ToCore(err))
|
||||||
|
|
||||||
|
// Need to use equal because output is order-sensitive.
|
||||||
|
assert.Equal(t, test.expectData, buf, "serialized data")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *SerializationReaderUnitSuite) TestBackupSerializationReader_ShortReads() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
baseData := []byte("hello world")
|
||||||
|
expectData := append(
|
||||||
|
append([]byte{0x00, 0x0, 0x0, 42}, baseData...),
|
||||||
|
baseData...)
|
||||||
|
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{Version: 42},
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)),
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)))
|
||||||
|
require.NoError(t, err, "getting backup reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := r.Close()
|
||||||
|
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
|
||||||
|
}()
|
||||||
|
|
||||||
|
buf := make([]byte, len(expectData))
|
||||||
|
r = &shortReader{
|
||||||
|
maxReadLen: 3,
|
||||||
|
ReadCloser: r,
|
||||||
|
}
|
||||||
|
|
||||||
|
for read := 0; ; {
|
||||||
|
n, err := r.Read(buf[read:])
|
||||||
|
|
||||||
|
read += n
|
||||||
|
if read >= len(buf) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need to use equal because output is order-sensitive.
|
||||||
|
assert.Equal(t, expectData, buf, "serialized data")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRestoreSerializationReader checks that we can read previously serialized
|
||||||
|
// data. For simplicity, it uses the versionedBackupReader to generate the
|
||||||
|
// input. This should be relatively safe because the tests for
|
||||||
|
// versionedBackupReader do compare directly against serialized data.
|
||||||
|
func (suite *SerializationReaderUnitSuite) TestRestoreSerializationReader() {
|
||||||
|
baseData := []byte("hello world")
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
inputReader func(*testing.T) io.ReadCloser
|
||||||
|
|
||||||
|
expectErr require.ErrorAssertionFunc
|
||||||
|
expectVersion readers.SerializationVersion
|
||||||
|
expectDelInFlight bool
|
||||||
|
expectData []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "NoVersion NoReaders",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
r, err := readers.NewVersionedBackupReader(readers.SerializationFormat{})
|
||||||
|
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
return r
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DeletedInFlight NoReaders",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{
|
||||||
|
Version: 42,
|
||||||
|
DelInFlight: true,
|
||||||
|
})
|
||||||
|
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
return r
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectVersion: 42,
|
||||||
|
expectDelInFlight: true,
|
||||||
|
expectData: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DeletedInFlight SingleReader",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
// Need to specify the bytes manually because the backup reader won't
|
||||||
|
// allow creating something with the deleted flag and data.
|
||||||
|
return io.NopCloser(bytes.NewReader(append(
|
||||||
|
[]byte{0x80, 0x0, 0x0, 42},
|
||||||
|
baseData...)))
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectVersion: 42,
|
||||||
|
expectDelInFlight: true,
|
||||||
|
expectData: baseData,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoVersion SingleReader",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{},
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)))
|
||||||
|
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
return r
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectData: baseData,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "SingleReader",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{Version: 42},
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)))
|
||||||
|
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
return r
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectVersion: 42,
|
||||||
|
expectData: baseData,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ShortReads SingleReader",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{Version: 42},
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)))
|
||||||
|
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
r = &shortReader{
|
||||||
|
maxReadLen: 3,
|
||||||
|
ReadCloser: r,
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectVersion: 42,
|
||||||
|
expectData: baseData,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleReaders",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{Version: 42},
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)),
|
||||||
|
io.NopCloser(bytes.NewReader(baseData)))
|
||||||
|
require.NoError(t, err, "making reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
return r
|
||||||
|
},
|
||||||
|
expectErr: require.NoError,
|
||||||
|
expectVersion: 42,
|
||||||
|
expectData: append(slices.Clone(baseData), baseData...),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "EmptyReader Errors",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
return io.NopCloser(bytes.NewReader([]byte{}))
|
||||||
|
},
|
||||||
|
expectErr: require.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TruncatedVersion Errors",
|
||||||
|
inputReader: func(t *testing.T) io.ReadCloser {
|
||||||
|
return io.NopCloser(bytes.NewReader([]byte{0x80, 0x0}))
|
||||||
|
},
|
||||||
|
expectErr: require.Error,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
r, err := readers.NewVersionedRestoreReader(test.inputReader(t))
|
||||||
|
test.expectErr(t, err, "getting restore reader: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := r.Close()
|
||||||
|
assert.NoError(t, err, "closing reader: %v", clues.ToCore(err))
|
||||||
|
}()
|
||||||
|
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
test.expectVersion,
|
||||||
|
r.Format().Version,
|
||||||
|
"version")
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
test.expectDelInFlight,
|
||||||
|
r.Format().DelInFlight,
|
||||||
|
"deleted in flight")
|
||||||
|
|
||||||
|
buf, err := io.ReadAll(r)
|
||||||
|
require.NoError(t, err, "reading serialized data: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
// Need to use equal because output is order-sensitive.
|
||||||
|
assert.Equal(t, test.expectData, buf, "serialized data")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,7 +1,6 @@
|
|||||||
package data
|
package data
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
@ -10,6 +9,7 @@ import (
|
|||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/spatialcurrent/go-lazy/pkg/lazy"
|
"github.com/spatialcurrent/go-lazy/pkg/lazy"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -46,12 +46,19 @@ func NewUnindexedPrefetchedItem(
|
|||||||
reader io.ReadCloser,
|
reader io.ReadCloser,
|
||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
) Item {
|
) (*unindexedPrefetchedItem, error) {
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{Version: readers.DefaultSerializationVersion},
|
||||||
|
reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
return &unindexedPrefetchedItem{
|
return &unindexedPrefetchedItem{
|
||||||
id: itemID,
|
id: itemID,
|
||||||
reader: reader,
|
reader: r,
|
||||||
modTime: modTime,
|
modTime: modTime,
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// unindexedPrefetchedItem represents a single item retrieved from the remote
|
// unindexedPrefetchedItem represents a single item retrieved from the remote
|
||||||
@ -92,15 +99,16 @@ func NewPrefetchedItem(
|
|||||||
reader io.ReadCloser,
|
reader io.ReadCloser,
|
||||||
itemID string,
|
itemID string,
|
||||||
info details.ItemInfo,
|
info details.ItemInfo,
|
||||||
) Item {
|
) (*prefetchedItem, error) {
|
||||||
return &prefetchedItem{
|
inner, err := NewUnindexedPrefetchedItem(reader, itemID, info.Modified())
|
||||||
unindexedPrefetchedItem: unindexedPrefetchedItem{
|
if err != nil {
|
||||||
id: itemID,
|
return nil, clues.Stack(err)
|
||||||
reader: reader,
|
|
||||||
modTime: info.Modified(),
|
|
||||||
},
|
|
||||||
info: info,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return &prefetchedItem{
|
||||||
|
unindexedPrefetchedItem: inner,
|
||||||
|
info: info,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// prefetchedItem represents a single item retrieved from the remote service.
|
// prefetchedItem represents a single item retrieved from the remote service.
|
||||||
@ -108,7 +116,7 @@ func NewPrefetchedItem(
|
|||||||
// This item implements ItemInfo so it should be used for things that need to
|
// This item implements ItemInfo so it should be used for things that need to
|
||||||
// appear in backup details.
|
// appear in backup details.
|
||||||
type prefetchedItem struct {
|
type prefetchedItem struct {
|
||||||
unindexedPrefetchedItem
|
*unindexedPrefetchedItem
|
||||||
info details.ItemInfo
|
info details.ItemInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,7 +137,7 @@ func NewUnindexedLazyItem(
|
|||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) Item {
|
) *unindexedLazyItem {
|
||||||
return &unindexedLazyItem{
|
return &unindexedLazyItem{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
id: itemID,
|
id: itemID,
|
||||||
@ -182,6 +190,10 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
|||||||
return nil, clues.Stack(err)
|
return nil, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
format := readers.SerializationFormat{
|
||||||
|
Version: readers.DefaultSerializationVersion,
|
||||||
|
}
|
||||||
|
|
||||||
// If an item was deleted then return an empty file so we don't fail the
|
// If an item was deleted then return an empty file so we don't fail the
|
||||||
// backup and return a sentinel error when asked for ItemInfo so we don't
|
// backup and return a sentinel error when asked for ItemInfo so we don't
|
||||||
// display the item in the backup.
|
// display the item in the backup.
|
||||||
@ -193,13 +205,17 @@ func (i *unindexedLazyItem) ToReader() io.ReadCloser {
|
|||||||
logger.Ctx(i.ctx).Info("item not found")
|
logger.Ctx(i.ctx).Info("item not found")
|
||||||
|
|
||||||
i.delInFlight = true
|
i.delInFlight = true
|
||||||
|
format.DelInFlight = true
|
||||||
|
r, err := readers.NewVersionedBackupReader(format)
|
||||||
|
|
||||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
return r, clues.Stack(err).OrNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
i.info = info
|
i.info = info
|
||||||
|
|
||||||
return reader, nil
|
r, err := readers.NewVersionedBackupReader(format, reader)
|
||||||
|
|
||||||
|
return r, clues.Stack(err).OrNil()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,15 +233,14 @@ func NewLazyItem(
|
|||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) Item {
|
) *lazyItem {
|
||||||
return &lazyItem{
|
return &lazyItem{
|
||||||
unindexedLazyItem: unindexedLazyItem{
|
unindexedLazyItem: NewUnindexedLazyItem(
|
||||||
ctx: ctx,
|
ctx,
|
||||||
id: itemID,
|
itemGetter,
|
||||||
itemGetter: itemGetter,
|
itemID,
|
||||||
modTime: modTime,
|
modTime,
|
||||||
errs: errs,
|
errs),
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -236,7 +251,7 @@ func NewLazyItem(
|
|||||||
// This item implements ItemInfo so it should be used for things that need to
|
// This item implements ItemInfo so it should be used for things that need to
|
||||||
// appear in backup details.
|
// appear in backup details.
|
||||||
type lazyItem struct {
|
type lazyItem struct {
|
||||||
unindexedLazyItem
|
*unindexedLazyItem
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *lazyItem) Info() (details.ItemInfo, error) {
|
func (i *lazyItem) Info() (details.ItemInfo, error) {
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -50,11 +51,15 @@ func TestItemUnitSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
|
func (suite *ItemUnitSuite) TestUnindexedPrefetchedItem() {
|
||||||
prefetch := data.NewUnindexedPrefetchedItem(
|
prefetch, err := data.NewUnindexedPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader([]byte{})),
|
io.NopCloser(bytes.NewReader([]byte{})),
|
||||||
"foo",
|
"foo",
|
||||||
time.Time{})
|
time.Time{})
|
||||||
_, ok := prefetch.(data.ItemInfo)
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
|
|
||||||
|
var item data.Item = prefetch
|
||||||
|
|
||||||
|
_, ok := item.(data.ItemInfo)
|
||||||
assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()")
|
assert.False(suite.T(), ok, "unindexedPrefetchedItem implements Info()")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,7 +75,10 @@ func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
|
|||||||
"foo",
|
"foo",
|
||||||
time.Time{},
|
time.Time{},
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
_, ok := lazy.(data.ItemInfo)
|
|
||||||
|
var item data.Item = lazy
|
||||||
|
|
||||||
|
_, ok := item.(data.ItemInfo)
|
||||||
assert.False(t, ok, "unindexedLazyItem implements Info()")
|
assert.False(t, ok, "unindexedLazyItem implements Info()")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,18 +148,29 @@ func (suite *ItemUnitSuite) TestPrefetchedItem() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
item := data.NewPrefetchedItem(test.reader, id, test.info)
|
item, err := data.NewPrefetchedItem(test.reader, id, test.info)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Equal(t, id, item.ID(), "ID")
|
assert.Equal(t, id, item.ID(), "ID")
|
||||||
assert.False(t, item.Deleted(), "deleted")
|
assert.False(t, item.Deleted(), "deleted")
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
test.info.Modified(),
|
test.info.Modified(),
|
||||||
item.(data.ItemModTime).ModTime(),
|
item.ModTime(),
|
||||||
"mod time")
|
"mod time")
|
||||||
|
|
||||||
readData, err := io.ReadAll(item.ToReader())
|
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||||
test.readErr(t, err, clues.ToCore(err), "read error")
|
require.NoError(t, err, "version error: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||||
|
assert.False(t, r.Format().DelInFlight)
|
||||||
|
|
||||||
|
readData, err := io.ReadAll(r)
|
||||||
|
test.readErr(t, err, "read error: %v", clues.ToCore(err))
|
||||||
assert.Equal(t, test.expectData, readData, "read data")
|
assert.Equal(t, test.expectData, readData, "read data")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -194,6 +213,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
mid *mockItemDataGetter
|
mid *mockItemDataGetter
|
||||||
|
versionErr assert.ErrorAssertionFunc
|
||||||
readErr assert.ErrorAssertionFunc
|
readErr assert.ErrorAssertionFunc
|
||||||
infoErr assert.ErrorAssertionFunc
|
infoErr assert.ErrorAssertionFunc
|
||||||
expectData []byte
|
expectData []byte
|
||||||
@ -205,6 +225,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
reader: io.NopCloser(bytes.NewReader([]byte{})),
|
reader: io.NopCloser(bytes.NewReader([]byte{})),
|
||||||
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
|
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
|
||||||
},
|
},
|
||||||
|
versionErr: assert.NoError,
|
||||||
readErr: assert.NoError,
|
readErr: assert.NoError,
|
||||||
infoErr: assert.NoError,
|
infoErr: assert.NoError,
|
||||||
expectData: []byte{},
|
expectData: []byte{},
|
||||||
@ -215,6 +236,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
reader: io.NopCloser(bytes.NewReader(baseData)),
|
reader: io.NopCloser(bytes.NewReader(baseData)),
|
||||||
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
|
info: &details.ItemInfo{Exchange: &details.ExchangeInfo{Modified: now}},
|
||||||
},
|
},
|
||||||
|
versionErr: assert.NoError,
|
||||||
readErr: assert.NoError,
|
readErr: assert.NoError,
|
||||||
infoErr: assert.NoError,
|
infoErr: assert.NoError,
|
||||||
expectData: baseData,
|
expectData: baseData,
|
||||||
@ -225,6 +247,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
reader: io.NopCloser(bytes.NewReader(baseData)),
|
reader: io.NopCloser(bytes.NewReader(baseData)),
|
||||||
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
|
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
|
||||||
},
|
},
|
||||||
|
versionErr: assert.NoError,
|
||||||
readErr: assert.NoError,
|
readErr: assert.NoError,
|
||||||
infoErr: assert.NoError,
|
infoErr: assert.NoError,
|
||||||
expectData: baseData,
|
expectData: baseData,
|
||||||
@ -234,6 +257,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
mid: &mockItemDataGetter{
|
mid: &mockItemDataGetter{
|
||||||
err: assert.AnError,
|
err: assert.AnError,
|
||||||
},
|
},
|
||||||
|
versionErr: assert.Error,
|
||||||
readErr: assert.Error,
|
readErr: assert.Error,
|
||||||
infoErr: assert.Error,
|
infoErr: assert.Error,
|
||||||
expectData: []byte{},
|
expectData: []byte{},
|
||||||
@ -249,6 +273,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
},
|
},
|
||||||
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
|
info: &details.ItemInfo{OneDrive: &details.OneDriveInfo{Modified: now}},
|
||||||
},
|
},
|
||||||
|
versionErr: assert.NoError,
|
||||||
readErr: assert.Error,
|
readErr: assert.Error,
|
||||||
infoErr: assert.NoError,
|
infoErr: assert.NoError,
|
||||||
expectData: baseData[:5],
|
expectData: baseData[:5],
|
||||||
@ -278,15 +303,25 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
now,
|
now,
|
||||||
item.(data.ItemModTime).ModTime(),
|
item.ModTime(),
|
||||||
"mod time")
|
"mod time")
|
||||||
|
|
||||||
// Read data to execute lazy reader.
|
// Read data to execute lazy reader.
|
||||||
readData, err := io.ReadAll(item.ToReader())
|
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||||
|
test.versionErr(t, err, "version error: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||||
|
assert.False(t, r.Format().DelInFlight)
|
||||||
|
|
||||||
|
readData, err := io.ReadAll(r)
|
||||||
test.readErr(t, err, clues.ToCore(err), "read error")
|
test.readErr(t, err, clues.ToCore(err), "read error")
|
||||||
assert.Equal(t, test.expectData, readData, "read data")
|
assert.Equal(t, test.expectData, readData, "read data")
|
||||||
|
|
||||||
_, err = item.(data.ItemInfo).Info()
|
_, err = item.Info()
|
||||||
test.infoErr(t, err, "Info(): %v", clues.ToCore(err))
|
test.infoErr(t, err, "Info(): %v", clues.ToCore(err))
|
||||||
|
|
||||||
e := errs.Errors()
|
e := errs.Errors()
|
||||||
@ -326,15 +361,21 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
|
|||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
now,
|
now,
|
||||||
item.(data.ItemModTime).ModTime(),
|
item.ModTime(),
|
||||||
"mod time")
|
"mod time")
|
||||||
|
|
||||||
// Read data to execute lazy reader.
|
// Read data to execute lazy reader.
|
||||||
readData, err := io.ReadAll(item.ToReader())
|
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||||
|
require.NoError(t, err, "version error: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||||
|
assert.True(t, r.Format().DelInFlight)
|
||||||
|
|
||||||
|
readData, err := io.ReadAll(r)
|
||||||
require.NoError(t, err, clues.ToCore(err), "read error")
|
require.NoError(t, err, clues.ToCore(err), "read error")
|
||||||
assert.Empty(t, readData, "read data")
|
assert.Empty(t, readData, "read data")
|
||||||
|
|
||||||
_, err = item.(data.ItemInfo).Info()
|
_, err = item.Info()
|
||||||
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
|
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
|
||||||
|
|
||||||
e := errs.Errors()
|
e := errs.Errors()
|
||||||
@ -366,9 +407,9 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
|
|||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
now,
|
now,
|
||||||
item.(data.ItemModTime).ModTime(),
|
item.ModTime(),
|
||||||
"mod time")
|
"mod time")
|
||||||
|
|
||||||
_, err := item.(data.ItemInfo).Info()
|
_, err := item.Info()
|
||||||
assert.Error(t, err, "Info() error")
|
assert.Error(t, err, "Info() error")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,8 +3,13 @@ package mock
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
@ -163,3 +168,106 @@ func (rc RestoreCollection) FetchItemByName(
|
|||||||
|
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ data.BackupCollection = &versionedBackupCollection{}
|
||||||
|
_ data.RestoreCollection = &unversionedRestoreCollection{}
|
||||||
|
_ data.Item = &itemWrapper{}
|
||||||
|
)
|
||||||
|
|
||||||
|
type itemWrapper struct {
|
||||||
|
data.Item
|
||||||
|
reader io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *itemWrapper) ToReader() io.ReadCloser {
|
||||||
|
return i.reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUnversionedRestoreCollection(
|
||||||
|
t *testing.T,
|
||||||
|
col data.RestoreCollection,
|
||||||
|
) *unversionedRestoreCollection {
|
||||||
|
return &unversionedRestoreCollection{
|
||||||
|
RestoreCollection: col,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unversionedRestoreCollection strips out version format headers on all items.
|
||||||
|
//
|
||||||
|
// Wrap data.RestoreCollections in this type if you don't need access to the
|
||||||
|
// version format header during tests and you know the item readers can't return
|
||||||
|
// an error.
|
||||||
|
type unversionedRestoreCollection struct {
|
||||||
|
data.RestoreCollection
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *unversionedRestoreCollection) Items(
|
||||||
|
ctx context.Context,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) <-chan data.Item {
|
||||||
|
res := make(chan data.Item)
|
||||||
|
go func() {
|
||||||
|
defer close(res)
|
||||||
|
|
||||||
|
for item := range c.RestoreCollection.Items(ctx, errs) {
|
||||||
|
r, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||||
|
require.NoError(c.t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
res <- &itemWrapper{
|
||||||
|
Item: item,
|
||||||
|
reader: r,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVersionedBackupCollection(
|
||||||
|
t *testing.T,
|
||||||
|
col data.BackupCollection,
|
||||||
|
) *versionedBackupCollection {
|
||||||
|
return &versionedBackupCollection{
|
||||||
|
BackupCollection: col,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// versionedBackupCollection injects basic version information on all items.
|
||||||
|
//
|
||||||
|
// Wrap data.BackupCollections in this type if you don't need to explicitly set
|
||||||
|
// the version format header during tests, aren't trying to check reader errors
|
||||||
|
// cases, and aren't populating backup details.
|
||||||
|
type versionedBackupCollection struct {
|
||||||
|
data.BackupCollection
|
||||||
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *versionedBackupCollection) Items(
|
||||||
|
ctx context.Context,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) <-chan data.Item {
|
||||||
|
res := make(chan data.Item)
|
||||||
|
go func() {
|
||||||
|
defer close(res)
|
||||||
|
|
||||||
|
for item := range c.BackupCollection.Items(ctx, errs) {
|
||||||
|
r, err := readers.NewVersionedBackupReader(
|
||||||
|
readers.SerializationFormat{
|
||||||
|
Version: readers.DefaultSerializationVersion,
|
||||||
|
},
|
||||||
|
item.ToReader())
|
||||||
|
require.NoError(c.t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
res <- &itemWrapper{
|
||||||
|
Item: item,
|
||||||
|
reader: r,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|||||||
@ -580,6 +580,10 @@ func (w *conn) SnapshotRoot(man *snapshot.Manifest) (fs.Entry, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error {
|
func (w *conn) UpdatePassword(ctx context.Context, password string, opts repository.Options) error {
|
||||||
|
if len(password) <= 0 {
|
||||||
|
return clues.New("empty password provided")
|
||||||
|
}
|
||||||
|
|
||||||
kopiaRef := NewConn(w.storage)
|
kopiaRef := NewConn(w.storage)
|
||||||
if err := kopiaRef.Connect(ctx, opts); err != nil {
|
if err := kopiaRef.Connect(ctx, opts); err != nil {
|
||||||
return clues.Wrap(err, "connecting kopia client")
|
return clues.Wrap(err, "connecting kopia client")
|
||||||
@ -587,8 +591,10 @@ func (w *conn) UpdatePassword(ctx context.Context, password string, opts reposit
|
|||||||
|
|
||||||
defer kopiaRef.Close(ctx)
|
defer kopiaRef.Close(ctx)
|
||||||
|
|
||||||
repository := kopiaRef.Repository.(repo.DirectRepository)
|
kopiaRepo := kopiaRef.Repository.(repo.DirectRepository)
|
||||||
err := repository.FormatManager().ChangePassword(ctx, password)
|
if err := kopiaRepo.FormatManager().ChangePassword(ctx, password); err != nil {
|
||||||
|
return clues.Wrap(err, "unable to update password")
|
||||||
|
}
|
||||||
|
|
||||||
return errors.Wrap(err, "unable to update password")
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,6 +22,20 @@ import (
|
|||||||
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
storeTD "github.com/alcionai/corso/src/pkg/storage/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func openLocalKopiaRepo(
|
||||||
|
t tester.TestT,
|
||||||
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
|
) (*conn, error) {
|
||||||
|
st := storeTD.NewFilesystemStorage(t)
|
||||||
|
|
||||||
|
k := NewConn(st)
|
||||||
|
if err := k.Initialize(ctx, repository.Options{}, repository.Retention{}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
|
||||||
func openKopiaRepo(
|
func openKopiaRepo(
|
||||||
t tester.TestT,
|
t tester.TestT,
|
||||||
ctx context.Context, //revive:disable-line:context-as-argument
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
@ -81,7 +95,7 @@ func (suite *WrapperIntegrationSuite) TestRepoExistsError() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewFilesystemStorage(t)
|
||||||
k := NewConn(st)
|
k := NewConn(st)
|
||||||
|
|
||||||
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
err := k.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||||
@ -101,7 +115,7 @@ func (suite *WrapperIntegrationSuite) TestBadProviderErrors() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewFilesystemStorage(t)
|
||||||
st.Provider = storage.ProviderUnknown
|
st.Provider = storage.ProviderUnknown
|
||||||
k := NewConn(st)
|
k := NewConn(st)
|
||||||
|
|
||||||
@ -115,7 +129,7 @@ func (suite *WrapperIntegrationSuite) TestConnectWithoutInitErrors() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewFilesystemStorage(t)
|
||||||
k := NewConn(st)
|
k := NewConn(st)
|
||||||
|
|
||||||
err := k.Connect(ctx, repository.Options{})
|
err := k.Connect(ctx, repository.Options{})
|
||||||
@ -408,7 +422,7 @@ func (suite *WrapperIntegrationSuite) TestSetUserAndHost() {
|
|||||||
Host: "bar",
|
Host: "bar",
|
||||||
}
|
}
|
||||||
|
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewFilesystemStorage(t)
|
||||||
k := NewConn(st)
|
k := NewConn(st)
|
||||||
|
|
||||||
err := k.Initialize(ctx, opts, repository.Retention{})
|
err := k.Initialize(ctx, opts, repository.Retention{})
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/fs"
|
"github.com/kopia/kopia/fs"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -16,6 +17,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
_ data.RestoreCollection = &kopiaDataCollection{}
|
_ data.RestoreCollection = &kopiaDataCollection{}
|
||||||
_ data.Item = &kopiaDataStream{}
|
_ data.Item = &kopiaDataStream{}
|
||||||
|
_ data.ItemSize = &kopiaDataStream{}
|
||||||
)
|
)
|
||||||
|
|
||||||
type kopiaDataCollection struct {
|
type kopiaDataCollection struct {
|
||||||
@ -23,7 +25,7 @@ type kopiaDataCollection struct {
|
|||||||
dir fs.Directory
|
dir fs.Directory
|
||||||
items []string
|
items []string
|
||||||
counter ByteCounter
|
counter ByteCounter
|
||||||
expectedVersion uint32
|
expectedVersion readers.SerializationVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kdc *kopiaDataCollection) Items(
|
func (kdc *kopiaDataCollection) Items(
|
||||||
@ -102,7 +104,7 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
|||||||
return nil, clues.New("object is not a file").WithClues(ctx)
|
return nil, clues.New("object is not a file").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
size := f.Size() - int64(versionSize)
|
size := f.Size() - int64(readers.VersionFormatSize)
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size)
|
logger.Ctx(ctx).Infow("negative file size; resetting to 0", "file_size", size)
|
||||||
|
|
||||||
@ -118,13 +120,32 @@ func (kdc kopiaDataCollection) FetchItemByName(
|
|||||||
return nil, clues.Wrap(err, "opening file").WithClues(ctx)
|
return nil, clues.Wrap(err, "opening file").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(ashmrtn): Remove this when individual services implement checks for
|
||||||
|
// version and deleted items.
|
||||||
|
rr, err := readers.NewVersionedRestoreReader(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rr.Format().Version != kdc.expectedVersion {
|
||||||
|
return nil, clues.New("unexpected data format").
|
||||||
|
WithClues(ctx).
|
||||||
|
With(
|
||||||
|
"read_version", rr.Format().Version,
|
||||||
|
"expected_version", kdc.expectedVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a conservative check, but we shouldn't be seeing items that were
|
||||||
|
// deleted in flight during restores because there's no way to select them.
|
||||||
|
if rr.Format().DelInFlight {
|
||||||
|
return nil, clues.New("selected item marked as deleted in flight").
|
||||||
|
WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
return &kopiaDataStream{
|
return &kopiaDataStream{
|
||||||
id: name,
|
id: name,
|
||||||
reader: &restoreStreamReader{
|
reader: rr,
|
||||||
ReadCloser: r,
|
size: size,
|
||||||
expectedVersion: kdc.expectedVersion,
|
|
||||||
},
|
|
||||||
size: size,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
@ -121,25 +122,35 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Needs to be a function so the readers get refreshed each time.
|
// Needs to be a function so the readers get refreshed each time.
|
||||||
getLayout := func() fs.Directory {
|
getLayout := func(t *testing.T) fs.Directory {
|
||||||
|
format := readers.SerializationFormat{
|
||||||
|
Version: readers.DefaultSerializationVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
r1, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
io.NopCloser(bytes.NewReader(files[0].data)))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
r2, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
io.NopCloser(bytes.NewReader(files[1].data)))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{
|
return virtualfs.NewStaticDirectory(encodeAsPath("foo"), []fs.Entry{
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(files[0].uuid),
|
encodeAsPath(files[0].uuid),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r1,
|
||||||
serializationVersion,
|
size: int64(len(files[0].data) + readers.VersionFormatSize),
|
||||||
io.NopCloser(bytes.NewReader(files[0].data))),
|
|
||||||
size: int64(len(files[0].data) + versionSize),
|
|
||||||
},
|
},
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(files[1].uuid),
|
encodeAsPath(files[1].uuid),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r2,
|
||||||
serializationVersion,
|
size: int64(len(files[1].data) + readers.VersionFormatSize),
|
||||||
io.NopCloser(bytes.NewReader(files[1].data))),
|
|
||||||
size: int64(len(files[1].data) + versionSize),
|
|
||||||
},
|
},
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
@ -224,10 +235,10 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
c := kopiaDataCollection{
|
c := kopiaDataCollection{
|
||||||
dir: getLayout(),
|
dir: getLayout(t),
|
||||||
path: nil,
|
path: nil,
|
||||||
items: items,
|
items: items,
|
||||||
expectedVersion: serializationVersion,
|
expectedVersion: readers.DefaultSerializationVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -291,23 +302,34 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
|
|
||||||
// Needs to be a function so we can switch the serialization version as
|
// Needs to be a function so we can switch the serialization version as
|
||||||
// needed.
|
// needed.
|
||||||
getLayout := func(serVersion uint32) fs.Directory {
|
getLayout := func(
|
||||||
|
t *testing.T,
|
||||||
|
serVersion readers.SerializationVersion,
|
||||||
|
) fs.Directory {
|
||||||
|
format := readers.SerializationFormat{Version: serVersion}
|
||||||
|
|
||||||
|
r1, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
io.NopCloser(bytes.NewReader([]byte(noErrFileData))))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
r2, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
errReader.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{
|
return virtualfs.NewStaticDirectory(encodeAsPath(folder2), []fs.Entry{
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(noErrFileName),
|
encodeAsPath(noErrFileName),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r1,
|
||||||
serVersion,
|
|
||||||
io.NopCloser(bytes.NewReader([]byte(noErrFileData)))),
|
|
||||||
},
|
},
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(errFileName),
|
encodeAsPath(errFileName),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r2,
|
||||||
serVersion,
|
|
||||||
errReader.ToReader()),
|
|
||||||
},
|
},
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
@ -330,7 +352,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
inputName string
|
inputName string
|
||||||
inputSerializationVersion uint32
|
inputSerializationVersion readers.SerializationVersion
|
||||||
expectedData []byte
|
expectedData []byte
|
||||||
lookupErr assert.ErrorAssertionFunc
|
lookupErr assert.ErrorAssertionFunc
|
||||||
readErr assert.ErrorAssertionFunc
|
readErr assert.ErrorAssertionFunc
|
||||||
@ -339,7 +361,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
{
|
{
|
||||||
name: "FileFound_NoError",
|
name: "FileFound_NoError",
|
||||||
inputName: noErrFileName,
|
inputName: noErrFileName,
|
||||||
inputSerializationVersion: serializationVersion,
|
inputSerializationVersion: readers.DefaultSerializationVersion,
|
||||||
expectedData: []byte(noErrFileData),
|
expectedData: []byte(noErrFileData),
|
||||||
lookupErr: assert.NoError,
|
lookupErr: assert.NoError,
|
||||||
readErr: assert.NoError,
|
readErr: assert.NoError,
|
||||||
@ -347,21 +369,20 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
{
|
{
|
||||||
name: "FileFound_ReadError",
|
name: "FileFound_ReadError",
|
||||||
inputName: errFileName,
|
inputName: errFileName,
|
||||||
inputSerializationVersion: serializationVersion,
|
inputSerializationVersion: readers.DefaultSerializationVersion,
|
||||||
lookupErr: assert.NoError,
|
lookupErr: assert.NoError,
|
||||||
readErr: assert.Error,
|
readErr: assert.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "FileFound_VersionError",
|
name: "FileFound_VersionError",
|
||||||
inputName: noErrFileName,
|
inputName: noErrFileName,
|
||||||
inputSerializationVersion: serializationVersion + 1,
|
inputSerializationVersion: readers.DefaultSerializationVersion + 1,
|
||||||
lookupErr: assert.NoError,
|
lookupErr: assert.Error,
|
||||||
readErr: assert.Error,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "FileNotFound",
|
name: "FileNotFound",
|
||||||
inputName: "foo",
|
inputName: "foo",
|
||||||
inputSerializationVersion: serializationVersion + 1,
|
inputSerializationVersion: readers.DefaultSerializationVersion + 1,
|
||||||
lookupErr: assert.Error,
|
lookupErr: assert.Error,
|
||||||
notFoundErr: true,
|
notFoundErr: true,
|
||||||
},
|
},
|
||||||
@ -373,14 +394,14 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
root := getLayout(test.inputSerializationVersion)
|
root := getLayout(t, test.inputSerializationVersion)
|
||||||
c := &i64counter{}
|
c := &i64counter{}
|
||||||
|
|
||||||
col := &kopiaDataCollection{
|
col := &kopiaDataCollection{
|
||||||
path: pth,
|
path: pth,
|
||||||
dir: root,
|
dir: root,
|
||||||
counter: c,
|
counter: c,
|
||||||
expectedVersion: serializationVersion,
|
expectedVersion: readers.DefaultSerializationVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := col.FetchItemByName(ctx, test.inputName)
|
s, err := col.FetchItemByName(ctx, test.inputName)
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
"github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
@ -150,20 +151,27 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
|
|
||||||
// Needs to be a function so the readers get refreshed each time.
|
// Needs to be a function so the readers get refreshed each time.
|
||||||
layouts := []func() fs.Directory{
|
layouts := []func(t *testing.T) fs.Directory{
|
||||||
// Has the following;
|
// Has the following;
|
||||||
// - file1: data[0]
|
// - file1: data[0]
|
||||||
// - errOpen: (error opening file)
|
// - errOpen: (error opening file)
|
||||||
func() fs.Directory {
|
func(t *testing.T) fs.Directory {
|
||||||
|
format := readers.SerializationFormat{
|
||||||
|
Version: readers.DefaultSerializationVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
r1, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData1)))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{
|
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[0]), []fs.Entry{
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(fileName1),
|
encodeAsPath(fileName1),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r1,
|
||||||
serializationVersion,
|
size: int64(len(fileData1) + readers.VersionFormatSize),
|
||||||
io.NopCloser(bytes.NewReader(fileData1))),
|
|
||||||
size: int64(len(fileData1) + versionSize),
|
|
||||||
},
|
},
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
@ -178,34 +186,47 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
// - file1: data[1]
|
// - file1: data[1]
|
||||||
// - file2: data[0]
|
// - file2: data[0]
|
||||||
// - errOpen: data[2]
|
// - errOpen: data[2]
|
||||||
func() fs.Directory {
|
func(t *testing.T) fs.Directory {
|
||||||
|
format := readers.SerializationFormat{
|
||||||
|
Version: readers.DefaultSerializationVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
r1, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData2)))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
r2, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData1)))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
r3, err := readers.NewVersionedBackupReader(
|
||||||
|
format,
|
||||||
|
io.NopCloser(bytes.NewReader(fileData3)))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{
|
return virtualfs.NewStaticDirectory(encodeAsPath(colPaths[1]), []fs.Entry{
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(fileName1),
|
encodeAsPath(fileName1),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r1,
|
||||||
serializationVersion,
|
size: int64(len(fileData2) + readers.VersionFormatSize),
|
||||||
io.NopCloser(bytes.NewReader(fileData2))),
|
|
||||||
size: int64(len(fileData2) + versionSize),
|
|
||||||
},
|
},
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(fileName2),
|
encodeAsPath(fileName2),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r2,
|
||||||
serializationVersion,
|
size: int64(len(fileData1) + readers.VersionFormatSize),
|
||||||
io.NopCloser(bytes.NewReader(fileData1))),
|
|
||||||
size: int64(len(fileData1) + versionSize),
|
|
||||||
},
|
},
|
||||||
&mockFile{
|
&mockFile{
|
||||||
StreamingFile: virtualfs.StreamingFileFromReader(
|
StreamingFile: virtualfs.StreamingFileFromReader(
|
||||||
encodeAsPath(fileOpenErrName),
|
encodeAsPath(fileOpenErrName),
|
||||||
nil),
|
nil),
|
||||||
r: newBackupStreamReader(
|
r: r3,
|
||||||
serializationVersion,
|
size: int64(len(fileData3) + readers.VersionFormatSize),
|
||||||
io.NopCloser(bytes.NewReader(fileData3))),
|
|
||||||
size: int64(len(fileData3) + versionSize),
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
@ -257,9 +278,9 @@ func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
|
|||||||
for i, layout := range layouts {
|
for i, layout := range layouts {
|
||||||
col := &kopiaDataCollection{
|
col := &kopiaDataCollection{
|
||||||
path: pth,
|
path: pth,
|
||||||
dir: layout(),
|
dir: layout(t),
|
||||||
counter: c,
|
counter: c,
|
||||||
expectedVersion: serializationVersion,
|
expectedVersion: readers.DefaultSerializationVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := dc.addCollection(colPaths[i], col)
|
err := dc.addCollection(colPaths[i], col)
|
||||||
|
|||||||
@ -29,7 +29,7 @@ type fooModel struct {
|
|||||||
|
|
||||||
//revive:disable-next-line:context-as-argument
|
//revive:disable-next-line:context-as-argument
|
||||||
func getModelStore(t *testing.T, ctx context.Context) *ModelStore {
|
func getModelStore(t *testing.T, ctx context.Context) *ModelStore {
|
||||||
c, err := openKopiaRepo(t, ctx)
|
c, err := openLocalKopiaRepo(t, ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return &ModelStore{c: c, modelVersion: globalModelVersion}
|
return &ModelStore{c: c, modelVersion: globalModelVersion}
|
||||||
@ -856,7 +856,7 @@ func openConnAndModelStore(
|
|||||||
t *testing.T,
|
t *testing.T,
|
||||||
ctx context.Context, //revive:disable-line:context-as-argument
|
ctx context.Context, //revive:disable-line:context-as-argument
|
||||||
) (*conn, *ModelStore) {
|
) (*conn, *ModelStore) {
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewFilesystemStorage(t)
|
||||||
c := NewConn(st)
|
c := NewConn(st)
|
||||||
|
|
||||||
err := c.Initialize(ctx, repository.Options{}, repository.Retention{})
|
err := c.Initialize(ctx, repository.Options{}, repository.Retention{})
|
||||||
|
|||||||
@ -1,19 +1,14 @@
|
|||||||
package kopia
|
package kopia
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"runtime/trace"
|
"runtime/trace"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/kopia/kopia/fs"
|
"github.com/kopia/kopia/fs"
|
||||||
@ -37,101 +32,6 @@ import (
|
|||||||
|
|
||||||
const maxInflateTraversalDepth = 500
|
const maxInflateTraversalDepth = 500
|
||||||
|
|
||||||
var versionSize = int(unsafe.Sizeof(serializationVersion))
|
|
||||||
|
|
||||||
func newBackupStreamReader(version uint32, reader io.ReadCloser) *backupStreamReader {
|
|
||||||
buf := make([]byte, versionSize)
|
|
||||||
binary.BigEndian.PutUint32(buf, version)
|
|
||||||
bufReader := io.NopCloser(bytes.NewReader(buf))
|
|
||||||
|
|
||||||
return &backupStreamReader{
|
|
||||||
readers: []io.ReadCloser{bufReader, reader},
|
|
||||||
combined: io.NopCloser(io.MultiReader(bufReader, reader)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// backupStreamReader is a wrapper around the io.Reader that other Corso
|
|
||||||
// components return when backing up information. It injects a version number at
|
|
||||||
// the start of the data stream. Future versions of Corso may not need this if
|
|
||||||
// they use more complex serialization logic as serialization/version injection
|
|
||||||
// will be handled by other components.
|
|
||||||
type backupStreamReader struct {
|
|
||||||
readers []io.ReadCloser
|
|
||||||
combined io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *backupStreamReader) Read(p []byte) (n int, err error) {
|
|
||||||
if rw.combined == nil {
|
|
||||||
return 0, os.ErrClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
return rw.combined.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *backupStreamReader) Close() error {
|
|
||||||
if rw.combined == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.combined = nil
|
|
||||||
|
|
||||||
var errs *clues.Err
|
|
||||||
|
|
||||||
for _, r := range rw.readers {
|
|
||||||
err := r.Close()
|
|
||||||
if err != nil {
|
|
||||||
errs = clues.Stack(clues.Wrap(err, "closing reader"), errs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return errs.OrNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// restoreStreamReader is a wrapper around the io.Reader that kopia returns when
|
|
||||||
// reading data from an item. It examines and strips off the version number of
|
|
||||||
// the restored data. Future versions of Corso may not need this if they use
|
|
||||||
// more complex serialization logic as version checking/deserialization will be
|
|
||||||
// handled by other components. A reader that returns a version error is no
|
|
||||||
// longer valid and should not be used once the version error is returned.
|
|
||||||
type restoreStreamReader struct {
|
|
||||||
io.ReadCloser
|
|
||||||
expectedVersion uint32
|
|
||||||
readVersion bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *restoreStreamReader) checkVersion() error {
|
|
||||||
versionBuf := make([]byte, versionSize)
|
|
||||||
|
|
||||||
for newlyRead := 0; newlyRead < versionSize; {
|
|
||||||
n, err := rw.ReadCloser.Read(versionBuf[newlyRead:])
|
|
||||||
if err != nil {
|
|
||||||
return clues.Wrap(err, "reading data format version")
|
|
||||||
}
|
|
||||||
|
|
||||||
newlyRead += n
|
|
||||||
}
|
|
||||||
|
|
||||||
version := binary.BigEndian.Uint32(versionBuf)
|
|
||||||
|
|
||||||
if version != rw.expectedVersion {
|
|
||||||
return clues.New("unexpected data format").With("read_version", version)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *restoreStreamReader) Read(p []byte) (n int, err error) {
|
|
||||||
if !rw.readVersion {
|
|
||||||
rw.readVersion = true
|
|
||||||
|
|
||||||
if err := rw.checkVersion(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rw.ReadCloser.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
type itemDetails struct {
|
type itemDetails struct {
|
||||||
infoer data.ItemInfo
|
infoer data.ItemInfo
|
||||||
repoPath path.Path
|
repoPath path.Path
|
||||||
@ -436,7 +336,7 @@ func collectionEntries(
|
|||||||
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
entry := virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodedName,
|
encodedName,
|
||||||
modTime,
|
modTime,
|
||||||
newBackupStreamReader(serializationVersion, e.ToReader()))
|
e.ToReader())
|
||||||
|
|
||||||
err = ctr(ctx, entry)
|
err = ctr(ctx, entry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -14,7 +14,6 @@ import (
|
|||||||
"github.com/kopia/kopia/repo/manifest"
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
"github.com/kopia/kopia/snapshot"
|
"github.com/kopia/kopia/snapshot"
|
||||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -124,12 +123,6 @@ func expectFileData(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to wrap with a restore stream reader to remove the version.
|
|
||||||
r = &restoreStreamReader{
|
|
||||||
ReadCloser: io.NopCloser(r),
|
|
||||||
expectedVersion: serializationVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := io.ReadAll(r)
|
got, err := io.ReadAll(r)
|
||||||
if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) {
|
if !assert.NoError(t, err, "reading data in file", name, clues.ToCore(err)) {
|
||||||
return
|
return
|
||||||
@ -226,135 +219,6 @@ func getDirEntriesForEntry(
|
|||||||
// ---------------
|
// ---------------
|
||||||
// unit tests
|
// unit tests
|
||||||
// ---------------
|
// ---------------
|
||||||
type limitedRangeReader struct {
|
|
||||||
readLen int
|
|
||||||
io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lrr *limitedRangeReader) Read(p []byte) (int, error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
// Not well specified behavior, defer to underlying reader.
|
|
||||||
return lrr.ReadCloser.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
toRead := lrr.readLen
|
|
||||||
if len(p) < toRead {
|
|
||||||
toRead = len(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return lrr.ReadCloser.Read(p[:toRead])
|
|
||||||
}
|
|
||||||
|
|
||||||
type VersionReadersUnitSuite struct {
|
|
||||||
tester.Suite
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersionReadersUnitSuite(t *testing.T) {
|
|
||||||
suite.Run(t, &VersionReadersUnitSuite{Suite: tester.NewUnitSuite(t)})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *VersionReadersUnitSuite) TestWriteAndRead() {
|
|
||||||
inputData := []byte("This is some data for the reader to test with")
|
|
||||||
table := []struct {
|
|
||||||
name string
|
|
||||||
readVersion uint32
|
|
||||||
writeVersion uint32
|
|
||||||
check assert.ErrorAssertionFunc
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "SameVersionSucceeds",
|
|
||||||
readVersion: 42,
|
|
||||||
writeVersion: 42,
|
|
||||||
check: assert.NoError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "DifferentVersionsFail",
|
|
||||||
readVersion: 7,
|
|
||||||
writeVersion: 42,
|
|
||||||
check: assert.Error,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
suite.Run(test.name, func() {
|
|
||||||
t := suite.T()
|
|
||||||
|
|
||||||
baseReader := bytes.NewReader(inputData)
|
|
||||||
|
|
||||||
reversible := &restoreStreamReader{
|
|
||||||
expectedVersion: test.readVersion,
|
|
||||||
ReadCloser: newBackupStreamReader(
|
|
||||||
test.writeVersion,
|
|
||||||
io.NopCloser(baseReader)),
|
|
||||||
}
|
|
||||||
|
|
||||||
defer reversible.Close()
|
|
||||||
|
|
||||||
allData, err := io.ReadAll(reversible)
|
|
||||||
test.check(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, inputData, allData)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func readAllInParts(
|
|
||||||
t *testing.T,
|
|
||||||
partLen int,
|
|
||||||
reader io.ReadCloser,
|
|
||||||
) ([]byte, int) {
|
|
||||||
res := []byte{}
|
|
||||||
read := 0
|
|
||||||
tmp := make([]byte, partLen)
|
|
||||||
|
|
||||||
for {
|
|
||||||
n, err := reader.Read(tmp)
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
|
||||||
|
|
||||||
read += n
|
|
||||||
res = append(res, tmp[:n]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, read
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *VersionReadersUnitSuite) TestWriteHandlesShortReads() {
|
|
||||||
t := suite.T()
|
|
||||||
inputData := []byte("This is some data for the reader to test with")
|
|
||||||
version := uint32(42)
|
|
||||||
baseReader := bytes.NewReader(inputData)
|
|
||||||
versioner := newBackupStreamReader(version, io.NopCloser(baseReader))
|
|
||||||
expectedToWrite := len(inputData) + int(versionSize)
|
|
||||||
|
|
||||||
// "Write" all the data.
|
|
||||||
versionedData, writtenLen := readAllInParts(t, 1, versioner)
|
|
||||||
assert.Equal(t, expectedToWrite, writtenLen)
|
|
||||||
|
|
||||||
// Read all of the data back.
|
|
||||||
baseReader = bytes.NewReader(versionedData)
|
|
||||||
reader := &restoreStreamReader{
|
|
||||||
expectedVersion: version,
|
|
||||||
// Be adversarial and only allow reads of length 1 from the byte reader.
|
|
||||||
ReadCloser: &limitedRangeReader{
|
|
||||||
readLen: 1,
|
|
||||||
ReadCloser: io.NopCloser(baseReader),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
readData, readLen := readAllInParts(t, 1, reader)
|
|
||||||
// This reports the bytes read and returned to the user, excluding the version
|
|
||||||
// that is stripped off at the start.
|
|
||||||
assert.Equal(t, len(inputData), readLen)
|
|
||||||
assert.Equal(t, inputData, readData)
|
|
||||||
}
|
|
||||||
|
|
||||||
type CorsoProgressUnitSuite struct {
|
type CorsoProgressUnitSuite struct {
|
||||||
tester.Suite
|
tester.Suite
|
||||||
targetFilePath path.Path
|
targetFilePath path.Path
|
||||||
@ -2420,9 +2284,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
|||||||
encodeElements(inboxFileName1)[0],
|
encodeElements(inboxFileName1)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
// Wrap with a backup reader so it gets the version injected.
|
// Wrap with a backup reader so it gets the version injected.
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(inboxFileData1v2))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(inboxFileData1v2)))),
|
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
virtualfs.NewStaticDirectory(
|
virtualfs.NewStaticDirectory(
|
||||||
@ -2582,9 +2444,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
|
|||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(inboxFileName1)[0],
|
encodeElements(inboxFileName1)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(inboxFileData1))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(inboxFileData1)))),
|
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
virtualfs.NewStaticDirectory(
|
virtualfs.NewStaticDirectory(
|
||||||
@ -2596,9 +2456,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsMigrateSubt
|
|||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(contactsFileName1)[0],
|
encodeElements(contactsFileName1)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(contactsFileData1))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(contactsFileData1)))),
|
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
@ -2817,15 +2675,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
|||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName5)[0],
|
encodeElements(fileName5)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData5))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData5)))),
|
|
||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName6)[0],
|
encodeElements(fileName6)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData6))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData6)))),
|
|
||||||
})
|
})
|
||||||
counters[folderID3] = count
|
counters[folderID3] = count
|
||||||
|
|
||||||
@ -2835,15 +2689,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
|||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName3)[0],
|
encodeElements(fileName3)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData3))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData3)))),
|
|
||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName4)[0],
|
encodeElements(fileName4)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData4))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData4)))),
|
|
||||||
folder,
|
folder,
|
||||||
})
|
})
|
||||||
counters[folderID2] = count
|
counters[folderID2] = count
|
||||||
@ -2859,15 +2709,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
|||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName1)[0],
|
encodeElements(fileName1)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData1))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData1)))),
|
|
||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName2)[0],
|
encodeElements(fileName2)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData2))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData2)))),
|
|
||||||
folder,
|
folder,
|
||||||
folder4,
|
folder4,
|
||||||
})
|
})
|
||||||
@ -2879,15 +2725,11 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_SelectiveSubtreeP
|
|||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName7)[0],
|
encodeElements(fileName7)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData7))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData7)))),
|
|
||||||
virtualfs.StreamingFileWithModTimeFromReader(
|
virtualfs.StreamingFileWithModTimeFromReader(
|
||||||
encodeElements(fileName8)[0],
|
encodeElements(fileName8)[0],
|
||||||
time.Time{},
|
time.Time{},
|
||||||
newBackupStreamReader(
|
io.NopCloser(bytes.NewReader(fileData8))),
|
||||||
serializationVersion,
|
|
||||||
io.NopCloser(bytes.NewReader(fileData8)))),
|
|
||||||
})
|
})
|
||||||
counters[folderID5] = count
|
counters[folderID5] = count
|
||||||
|
|
||||||
|
|||||||
@ -18,6 +18,7 @@ import (
|
|||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
@ -36,8 +37,6 @@ const (
|
|||||||
// possibly corresponding to who is making the backup.
|
// possibly corresponding to who is making the backup.
|
||||||
corsoHost = "corso-host"
|
corsoHost = "corso-host"
|
||||||
corsoUser = "corso"
|
corsoUser = "corso"
|
||||||
|
|
||||||
serializationVersion uint32 = 1
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// common manifest tags
|
// common manifest tags
|
||||||
@ -447,7 +446,7 @@ func loadDirsAndItems(
|
|||||||
dir: dir,
|
dir: dir,
|
||||||
items: dirItems.items,
|
items: dirItems.items,
|
||||||
counter: bcounter,
|
counter: bcounter,
|
||||||
expectedVersion: serializationVersion,
|
expectedVersion: readers.DefaultSerializationVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
|
if err := mergeCol.addCollection(dirItems.dir.String(), dc); err != nil {
|
||||||
|
|||||||
@ -184,7 +184,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_FirstRun_NoChanges() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
k, err := openKopiaRepo(t, ctx)
|
k, err := openLocalKopiaRepo(t, ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
w := &Wrapper{k}
|
w := &Wrapper{k}
|
||||||
@ -204,7 +204,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_NoForce_Fails
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
k, err := openKopiaRepo(t, ctx)
|
k, err := openLocalKopiaRepo(t, ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
w := &Wrapper{k}
|
w := &Wrapper{k}
|
||||||
@ -241,7 +241,7 @@ func (suite *BasicKopiaIntegrationSuite) TestMaintenance_WrongUser_Force_Succeed
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
k, err := openKopiaRepo(t, ctx)
|
k, err := openLocalKopiaRepo(t, ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
w := &Wrapper{k}
|
w := &Wrapper{k}
|
||||||
@ -754,7 +754,7 @@ func (suite *KopiaIntegrationSuite) SetupTest() {
|
|||||||
t := suite.T()
|
t := suite.T()
|
||||||
suite.ctx, suite.flush = tester.NewContext(t)
|
suite.ctx, suite.flush = tester.NewContext(t)
|
||||||
|
|
||||||
c, err := openKopiaRepo(t, suite.ctx)
|
c, err := openLocalKopiaRepo(t, suite.ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.w = &Wrapper{c}
|
suite.w = &Wrapper{c}
|
||||||
@ -1245,7 +1245,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
k, err := openKopiaRepo(t, ctx)
|
k, err := openLocalKopiaRepo(t, ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = k.Compression(ctx, "s2-default")
|
err = k.Compression(ctx, "s2-default")
|
||||||
@ -1268,7 +1268,10 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
|
|||||||
ctx,
|
ctx,
|
||||||
[]identity.Reasoner{r},
|
[]identity.Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
[]data.BackupCollection{dc1, dc2},
|
[]data.BackupCollection{
|
||||||
|
dataMock.NewVersionedBackupCollection(t, dc1),
|
||||||
|
dataMock.NewVersionedBackupCollection(t, dc2),
|
||||||
|
},
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
@ -1556,7 +1559,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
//nolint:forbidigo
|
//nolint:forbidigo
|
||||||
suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls)
|
suite.ctx, _ = logger.CtxOrSeed(context.Background(), ls)
|
||||||
|
|
||||||
c, err := openKopiaRepo(t, suite.ctx)
|
c, err := openLocalKopiaRepo(t, suite.ctx)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
suite.w = &Wrapper{c}
|
suite.w = &Wrapper{c}
|
||||||
@ -1577,12 +1580,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, collection)
|
collections = append(
|
||||||
|
collections,
|
||||||
|
dataMock.NewVersionedBackupCollection(t, collection))
|
||||||
}
|
}
|
||||||
|
|
||||||
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
|
r := NewReason(testTenant, testUser, path.ExchangeService, path.EmailCategory)
|
||||||
|
|
||||||
stats, deets, _, err := suite.w.ConsumeBackupCollections(
|
// Other tests check basic things about deets so not doing that again here.
|
||||||
|
stats, _, _, err := suite.w.ConsumeBackupCollections(
|
||||||
suite.ctx,
|
suite.ctx,
|
||||||
[]identity.Reasoner{r},
|
[]identity.Reasoner{r},
|
||||||
nil,
|
nil,
|
||||||
@ -1597,8 +1603,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
|
|||||||
require.Equal(t, stats.TotalDirectoryCount, expectedDirs)
|
require.Equal(t, stats.TotalDirectoryCount, expectedDirs)
|
||||||
require.Equal(t, stats.IgnoredErrorCount, 0)
|
require.Equal(t, stats.IgnoredErrorCount, 0)
|
||||||
require.False(t, stats.Incomplete)
|
require.False(t, stats.Incomplete)
|
||||||
// 6 file and 2 folder entries.
|
|
||||||
assert.Len(t, deets.Details().Entries, expectedFiles+2)
|
|
||||||
|
|
||||||
suite.snapshotID = manifest.ID(stats.SnapshotID)
|
suite.snapshotID = manifest.ID(stats.SnapshotID)
|
||||||
}
|
}
|
||||||
@ -1629,7 +1633,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
excludePrefix bool
|
excludePrefix bool
|
||||||
expectedCachedItems int
|
expectedCachedItems int
|
||||||
expectedUncachedItems int
|
expectedUncachedItems int
|
||||||
cols func() []data.BackupCollection
|
cols func(t *testing.T) []data.BackupCollection
|
||||||
backupIDCheck require.ValueAssertionFunc
|
backupIDCheck require.ValueAssertionFunc
|
||||||
restoreCheck assert.ErrorAssertionFunc
|
restoreCheck assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
@ -1638,7 +1642,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
excludeItem: true,
|
excludeItem: true,
|
||||||
expectedCachedItems: len(suite.filesByPath) - 1,
|
expectedCachedItems: len(suite.filesByPath) - 1,
|
||||||
expectedUncachedItems: 0,
|
expectedUncachedItems: 0,
|
||||||
cols: func() []data.BackupCollection {
|
cols: func(t *testing.T) []data.BackupCollection {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
backupIDCheck: require.NotEmpty,
|
backupIDCheck: require.NotEmpty,
|
||||||
@ -1650,7 +1654,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
excludePrefix: true,
|
excludePrefix: true,
|
||||||
expectedCachedItems: len(suite.filesByPath) - 1,
|
expectedCachedItems: len(suite.filesByPath) - 1,
|
||||||
expectedUncachedItems: 0,
|
expectedUncachedItems: 0,
|
||||||
cols: func() []data.BackupCollection {
|
cols: func(t *testing.T) []data.BackupCollection {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
backupIDCheck: require.NotEmpty,
|
backupIDCheck: require.NotEmpty,
|
||||||
@ -1661,7 +1665,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
// No snapshot should be made since there were no changes.
|
// No snapshot should be made since there were no changes.
|
||||||
expectedCachedItems: 0,
|
expectedCachedItems: 0,
|
||||||
expectedUncachedItems: 0,
|
expectedUncachedItems: 0,
|
||||||
cols: func() []data.BackupCollection {
|
cols: func(t *testing.T) []data.BackupCollection {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
// Backup doesn't run.
|
// Backup doesn't run.
|
||||||
@ -1671,7 +1675,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
name: "NoExcludeItemWithChanges",
|
name: "NoExcludeItemWithChanges",
|
||||||
expectedCachedItems: len(suite.filesByPath),
|
expectedCachedItems: len(suite.filesByPath),
|
||||||
expectedUncachedItems: 1,
|
expectedUncachedItems: 1,
|
||||||
cols: func() []data.BackupCollection {
|
cols: func(t *testing.T) []data.BackupCollection {
|
||||||
c := exchMock.NewCollection(
|
c := exchMock.NewCollection(
|
||||||
suite.testPath1,
|
suite.testPath1,
|
||||||
suite.testPath1,
|
suite.testPath1,
|
||||||
@ -1679,7 +1683,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
c.ColState = data.NotMovedState
|
c.ColState = data.NotMovedState
|
||||||
c.PrevPath = suite.testPath1
|
c.PrevPath = suite.testPath1
|
||||||
|
|
||||||
return []data.BackupCollection{c}
|
return []data.BackupCollection{
|
||||||
|
dataMock.NewVersionedBackupCollection(t, c),
|
||||||
|
}
|
||||||
},
|
},
|
||||||
backupIDCheck: require.NotEmpty,
|
backupIDCheck: require.NotEmpty,
|
||||||
restoreCheck: assert.NoError,
|
restoreCheck: assert.NoError,
|
||||||
@ -1717,7 +1723,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
|||||||
Manifest: man,
|
Manifest: man,
|
||||||
Reasons: []identity.Reasoner{r},
|
Reasons: []identity.Reasoner{r},
|
||||||
}),
|
}),
|
||||||
test.cols(),
|
test.cols(t),
|
||||||
excluded,
|
excluded,
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
|
|||||||
@ -100,7 +100,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
case path.GroupsService:
|
case path.GroupsService:
|
||||||
colls, ssmb, canUsePreviousBackup, err = groups.ProduceBackupCollections(
|
colls, ssmb, err = groups.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
@ -111,6 +111,10 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// canUsePreviousBacukp can be always returned true for groups as we
|
||||||
|
// return a tombstone collection in case the metadata read fails
|
||||||
|
canUsePreviousBackup = true
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
|
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,6 +11,9 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/data/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/service/exchange"
|
"github.com/alcionai/corso/src/internal/m365/service/exchange"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/m365/service/sharepoint"
|
"github.com/alcionai/corso/src/internal/m365/service/sharepoint"
|
||||||
@ -458,9 +461,8 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
|||||||
for item := range collection.Items(ctx, fault.New(true)) {
|
for item := range collection.Items(ctx, fault.New(true)) {
|
||||||
t.Log("File: " + item.ID())
|
t.Log("File: " + item.ID())
|
||||||
|
|
||||||
bs, err := io.ReadAll(item.ToReader())
|
_, err := io.ReadAll(item.ToReader())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
t.Log(string(bs))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -575,3 +577,123 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint()
|
|||||||
assert.NotZero(t, status.Successes)
|
assert.NotZero(t, status.Successes)
|
||||||
t.Log(status.String())
|
t.Log(status.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_InvalidMetadata() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
var (
|
||||||
|
groupID = tconfig.M365GroupID(t)
|
||||||
|
ctrl = newController(ctx, t, path.GroupsService)
|
||||||
|
groupIDs = []string{groupID}
|
||||||
|
)
|
||||||
|
|
||||||
|
id, name, err := ctrl.PopulateProtectedResourceIDAndName(ctx, groupID, nil)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
sel := selectors.NewGroupsBackup(groupIDs)
|
||||||
|
sel.Include(sel.LibraryFolders([]string{"test"}, selectors.PrefixMatch()))
|
||||||
|
|
||||||
|
sel.SetDiscreteOwnerIDName(id, name)
|
||||||
|
|
||||||
|
site, err := suite.connector.AC.Groups().GetRootSite(ctx, groupID)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
pth, err := path.Build(
|
||||||
|
suite.tenantID,
|
||||||
|
groupID,
|
||||||
|
path.GroupsService,
|
||||||
|
path.LibrariesCategory,
|
||||||
|
true,
|
||||||
|
odConsts.SitesPathDir,
|
||||||
|
ptr.Val(site.GetId()))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
mmc := []data.RestoreCollection{
|
||||||
|
mock.Collection{
|
||||||
|
Path: pth,
|
||||||
|
ItemData: []data.Item{
|
||||||
|
&mock.Item{
|
||||||
|
ItemID: "previouspath",
|
||||||
|
Reader: io.NopCloser(bytes.NewReader([]byte("invalid"))),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bpc := inject.BackupProducerConfig{
|
||||||
|
LastBackupVersion: version.NoBackup,
|
||||||
|
Options: control.DefaultOptions(),
|
||||||
|
ProtectedResource: inMock.NewProvider(id, name),
|
||||||
|
Selector: sel.Selector,
|
||||||
|
MetadataCollections: mmc,
|
||||||
|
}
|
||||||
|
|
||||||
|
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
||||||
|
ctx,
|
||||||
|
bpc,
|
||||||
|
fault.New(true))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
|
// No excludes yet as this isn't an incremental backup.
|
||||||
|
assert.True(t, excludes.Empty())
|
||||||
|
|
||||||
|
// we don't know an exact count of drives this will produce,
|
||||||
|
// but it should be more than one.
|
||||||
|
assert.Greater(t, len(collections), 1)
|
||||||
|
|
||||||
|
p, err := path.BuildMetadata(
|
||||||
|
suite.tenantID,
|
||||||
|
groupID,
|
||||||
|
path.GroupsService,
|
||||||
|
path.LibrariesCategory,
|
||||||
|
false)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
p, err = p.Append(false, odConsts.SitesPathDir)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
foundSitesMetadata := false
|
||||||
|
foundRootTombstone := false
|
||||||
|
|
||||||
|
sp, err := path.BuildPrefix(
|
||||||
|
suite.tenantID,
|
||||||
|
groupID,
|
||||||
|
path.GroupsService,
|
||||||
|
path.LibrariesCategory)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId()))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
for _, coll := range collections {
|
||||||
|
if coll.State() == data.DeletedState {
|
||||||
|
if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() {
|
||||||
|
foundRootTombstone = true
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sitesMetadataCollection := coll.FullPath().String() == p.String()
|
||||||
|
|
||||||
|
for object := range coll.Items(ctx, fault.New(true)) {
|
||||||
|
if object.ID() == "previouspath" && sitesMetadataCollection {
|
||||||
|
foundSitesMetadata = true
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
_, err := buf.ReadFrom(object.ToReader())
|
||||||
|
assert.NoError(t, err, "reading item", clues.ToCore(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, foundSitesMetadata, "missing sites metadata")
|
||||||
|
assert.True(t, foundRootTombstone, "missing root tombstone")
|
||||||
|
|
||||||
|
status := ctrl.Wait()
|
||||||
|
assert.NotZero(t, status.Successes)
|
||||||
|
t.Log(status.String())
|
||||||
|
}
|
||||||
|
|||||||
@ -584,15 +584,24 @@ func (oc *Collection) streamDriveItem(
|
|||||||
return progReader, nil
|
return progReader, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// We wrap the reader with a lazy reader so that the progress bar is only
|
storeItem, err := data.NewUnindexedPrefetchedItem(
|
||||||
// initialized if the file is read. Since we're not actually lazily reading
|
|
||||||
// data just use the eager item implementation.
|
|
||||||
oc.data <- data.NewUnindexedPrefetchedItem(
|
|
||||||
metaReader,
|
metaReader,
|
||||||
metaFileName+metaSuffix,
|
metaFileName+metaSuffix,
|
||||||
// Metadata file should always use the latest time as
|
// Metadata file should always use the latest time as
|
||||||
// permissions change does not update mod time.
|
// permissions change does not update mod time.
|
||||||
time.Now())
|
time.Now())
|
||||||
|
if err != nil {
|
||||||
|
errs.AddRecoverable(ctx, clues.Stack(err).
|
||||||
|
WithClues(ctx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// We wrap the reader with a lazy reader so that the progress bar is only
|
||||||
|
// initialized if the file is read. Since we're not actually lazily reading
|
||||||
|
// data just use the eager item implementation.
|
||||||
|
oc.data <- storeItem
|
||||||
|
|
||||||
// Item read successfully, add to collection
|
// Item read successfully, add to collection
|
||||||
if isFile {
|
if isFile {
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata"
|
metaTD "github.com/alcionai/corso/src/internal/m365/collection/drive/metadata/testdata"
|
||||||
@ -256,7 +257,7 @@ func (suite *CollectionUnitSuite) TestCollection() {
|
|||||||
mt := readItem.(data.ItemModTime)
|
mt := readItem.(data.ItemModTime)
|
||||||
assert.Equal(t, now, mt.ModTime())
|
assert.Equal(t, now, mt.ModTime())
|
||||||
|
|
||||||
readData, err := io.ReadAll(readItem.ToReader())
|
rr, err := readers.NewVersionedRestoreReader(readItem.ToReader())
|
||||||
test.expectErr(t, err)
|
test.expectErr(t, err)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -267,13 +268,25 @@ func (suite *CollectionUnitSuite) TestCollection() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
assert.False(t, rr.Format().DelInFlight)
|
||||||
|
|
||||||
|
readData, err := io.ReadAll(rr)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
assert.Equal(t, stubItemContent, readData)
|
assert.Equal(t, stubItemContent, readData)
|
||||||
|
|
||||||
readItemMeta := readItems[1]
|
readItemMeta := readItems[1]
|
||||||
assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID())
|
assert.Equal(t, stubItemID+metadata.MetaFileSuffix, readItemMeta.ID())
|
||||||
|
|
||||||
|
rr, err = readers.NewVersionedRestoreReader(readItemMeta.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
assert.False(t, rr.Format().DelInFlight)
|
||||||
|
|
||||||
readMeta := metadata.Metadata{}
|
readMeta := metadata.Metadata{}
|
||||||
err = json.NewDecoder(readItemMeta.ToReader()).Decode(&readMeta)
|
err = json.NewDecoder(rr).Decode(&readMeta)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
metaTD.AssertMetadataEqual(t, stubMeta, readMeta)
|
metaTD.AssertMetadataEqual(t, stubMeta, readMeta)
|
||||||
@ -485,12 +498,18 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime()
|
|||||||
|
|
||||||
for _, i := range readItems {
|
for _, i := range readItems {
|
||||||
if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) {
|
if strings.HasSuffix(i.ID(), metadata.MetaFileSuffix) {
|
||||||
content, err := io.ReadAll(i.ToReader())
|
rr, err := readers.NewVersionedRestoreReader(i.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
assert.False(t, rr.Format().DelInFlight)
|
||||||
|
|
||||||
|
content, err := io.ReadAll(rr)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content))
|
require.Equal(t, `{"filename":"Fake Item","permissionMode":1}`, string(content))
|
||||||
|
|
||||||
im, ok := i.(data.ItemModTime)
|
im, ok := i.(data.ItemModTime)
|
||||||
require.Equal(t, ok, true, "modtime interface")
|
require.True(t, ok, "modtime interface")
|
||||||
require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time")
|
require.Greater(t, im.ModTime(), mtime, "permissions time greater than mod time")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -135,11 +135,6 @@ func deserializeMetadata(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
// Successful decode.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is conservative, but report an error if either any of the items
|
// This is conservative, but report an error if either any of the items
|
||||||
// for any of the deserialized maps have duplicate drive IDs or there's
|
// for any of the deserialized maps have duplicate drive IDs or there's
|
||||||
// some other problem deserializing things. This will cause the entire
|
// some other problem deserializing things. This will cause the entire
|
||||||
@ -147,7 +142,9 @@ func deserializeMetadata(
|
|||||||
// these cases. We can make the logic for deciding when to continue vs.
|
// these cases. We can make the logic for deciding when to continue vs.
|
||||||
// when to fail less strict in the future if needed.
|
// when to fail less strict in the future if needed.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Stack(err).WithClues(ictx)
|
errs.Fail(clues.Stack(err).WithClues(ictx))
|
||||||
|
|
||||||
|
return map[string]string{}, map[string]map[string]string{}, false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -230,16 +227,16 @@ func (c *Collections) Get(
|
|||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
prevDriveIDToDelta, oldPrevPathsByDriveID, canUsePrevBackup, err := deserializeMetadata(ctx, prevMetadata)
|
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePrevBackup)
|
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
||||||
|
|
||||||
driveTombstones := map[string]struct{}{}
|
driveTombstones := map[string]struct{}{}
|
||||||
|
|
||||||
for driveID := range oldPrevPathsByDriveID {
|
for driveID := range oldPathsByDriveID {
|
||||||
driveTombstones[driveID] = struct{}{}
|
driveTombstones[driveID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,88 +254,76 @@ func (c *Collections) Get(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
driveIDToDeltaLink = map[string]string{}
|
// Drive ID -> delta URL for drive
|
||||||
driveIDToPrevPaths = map[string]map[string]string{}
|
deltaURLs = map[string]string{}
|
||||||
numPrevItems = 0
|
// Drive ID -> folder ID -> folder path
|
||||||
|
folderPaths = map[string]map[string]string{}
|
||||||
|
numPrevItems = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, d := range drives {
|
for _, d := range drives {
|
||||||
var (
|
var (
|
||||||
driveID = ptr.Val(d.GetId())
|
driveID = ptr.Val(d.GetId())
|
||||||
driveName = ptr.Val(d.GetName())
|
driveName = ptr.Val(d.GetName())
|
||||||
ictx = clues.Add(
|
prevDelta = prevDeltas[driveID]
|
||||||
ctx,
|
oldPaths = oldPathsByDriveID[driveID]
|
||||||
"drive_id", driveID,
|
numOldDelta = 0
|
||||||
"drive_name", clues.Hide(driveName))
|
ictx = clues.Add(ctx, "drive_id", driveID, "drive_name", driveName)
|
||||||
|
|
||||||
excludedItemIDs = map[string]struct{}{}
|
|
||||||
oldPrevPaths = oldPrevPathsByDriveID[driveID]
|
|
||||||
prevDeltaLink = prevDriveIDToDelta[driveID]
|
|
||||||
|
|
||||||
// itemCollection is used to identify which collection a
|
|
||||||
// file belongs to. This is useful to delete a file from the
|
|
||||||
// collection it was previously in, in case it was moved to a
|
|
||||||
// different collection within the same delta query
|
|
||||||
// item ID -> item ID
|
|
||||||
itemCollection = map[string]string{}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
delete(driveTombstones, driveID)
|
delete(driveTombstones, driveID)
|
||||||
|
|
||||||
if _, ok := driveIDToPrevPaths[driveID]; !ok {
|
|
||||||
driveIDToPrevPaths[driveID] = map[string]string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := c.CollectionMap[driveID]; !ok {
|
if _, ok := c.CollectionMap[driveID]; !ok {
|
||||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(prevDelta) > 0 {
|
||||||
|
numOldDelta++
|
||||||
|
}
|
||||||
|
|
||||||
logger.Ctx(ictx).Infow(
|
logger.Ctx(ictx).Infow(
|
||||||
"previous metadata for drive",
|
"previous metadata for drive",
|
||||||
"num_paths_entries", len(oldPrevPaths))
|
"num_paths_entries", len(oldPaths),
|
||||||
|
"num_deltas_entries", numOldDelta)
|
||||||
|
|
||||||
items, du, err := c.handler.EnumerateDriveItemsDelta(
|
delta, paths, excluded, err := collectItems(
|
||||||
ictx,
|
ictx,
|
||||||
|
c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()),
|
||||||
driveID,
|
driveID,
|
||||||
prevDeltaLink)
|
driveName,
|
||||||
|
c.UpdateCollections,
|
||||||
|
oldPaths,
|
||||||
|
prevDelta,
|
||||||
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Used for logging below.
|
||||||
|
numDeltas := 0
|
||||||
|
|
||||||
// It's alright to have an empty folders map (i.e. no folders found) but not
|
// It's alright to have an empty folders map (i.e. no folders found) but not
|
||||||
// an empty delta token. This is because when deserializing the metadata we
|
// an empty delta token. This is because when deserializing the metadata we
|
||||||
// remove entries for which there is no corresponding delta token/folder. If
|
// remove entries for which there is no corresponding delta token/folder. If
|
||||||
// we leave empty delta tokens then we may end up setting the State field
|
// we leave empty delta tokens then we may end up setting the State field
|
||||||
// for collections when not actually getting delta results.
|
// for collections when not actually getting delta results.
|
||||||
if len(du.URL) > 0 {
|
if len(delta.URL) > 0 {
|
||||||
driveIDToDeltaLink[driveID] = du.URL
|
deltaURLs[driveID] = delta.URL
|
||||||
}
|
numDeltas++
|
||||||
|
|
||||||
newPrevPaths, err := c.UpdateCollections(
|
|
||||||
ctx,
|
|
||||||
driveID,
|
|
||||||
driveName,
|
|
||||||
items,
|
|
||||||
oldPrevPaths,
|
|
||||||
itemCollection,
|
|
||||||
excludedItemIDs,
|
|
||||||
du.Reset,
|
|
||||||
errs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, clues.Stack(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid the edge case where there's no paths but we do have a valid delta
|
// Avoid the edge case where there's no paths but we do have a valid delta
|
||||||
// token. We can accomplish this by adding an empty paths map for this
|
// token. We can accomplish this by adding an empty paths map for this
|
||||||
// drive. If we don't have this then the next backup won't use the delta
|
// drive. If we don't have this then the next backup won't use the delta
|
||||||
// token because it thinks the folder paths weren't persisted.
|
// token because it thinks the folder paths weren't persisted.
|
||||||
driveIDToPrevPaths[driveID] = map[string]string{}
|
folderPaths[driveID] = map[string]string{}
|
||||||
maps.Copy(driveIDToPrevPaths[driveID], newPrevPaths)
|
maps.Copy(folderPaths[driveID], paths)
|
||||||
|
|
||||||
logger.Ctx(ictx).Infow(
|
logger.Ctx(ictx).Infow(
|
||||||
"persisted metadata for drive",
|
"persisted metadata for drive",
|
||||||
"num_new_paths_entries", len(newPrevPaths),
|
"num_paths_entries", len(paths),
|
||||||
"delta_reset", du.Reset)
|
"num_deltas_entries", numDeltas,
|
||||||
|
"delta_reset", delta.Reset)
|
||||||
|
|
||||||
numDriveItems := c.NumItems - numPrevItems
|
numDriveItems := c.NumItems - numPrevItems
|
||||||
numPrevItems = c.NumItems
|
numPrevItems = c.NumItems
|
||||||
@ -350,7 +335,7 @@ func (c *Collections) Get(
|
|||||||
err = c.addURLCacheToDriveCollections(
|
err = c.addURLCacheToDriveCollections(
|
||||||
ictx,
|
ictx,
|
||||||
driveID,
|
driveID,
|
||||||
prevDeltaLink,
|
prevDelta,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
@ -359,8 +344,8 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
// For both cases we don't need to do set difference on folder map if the
|
// For both cases we don't need to do set difference on folder map if the
|
||||||
// delta token was valid because we should see all the changes.
|
// delta token was valid because we should see all the changes.
|
||||||
if !du.Reset {
|
if !delta.Reset {
|
||||||
if len(excludedItemIDs) == 0 {
|
if len(excluded) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,7 +354,7 @@ func (c *Collections) Get(
|
|||||||
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
ssmb.Add(p.String(), excludedItemIDs)
|
ssmb.Add(p.String(), excluded)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -384,11 +369,13 @@ func (c *Collections) Get(
|
|||||||
foundFolders[id] = struct{}{}
|
foundFolders[id] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
for fldID, p := range oldPrevPaths {
|
for fldID, p := range oldPaths {
|
||||||
if _, ok := foundFolders[fldID]; ok {
|
if _, ok := foundFolders[fldID]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
delete(paths, fldID)
|
||||||
|
|
||||||
prevPath, err := path.FromDataLayerPath(p, false)
|
prevPath, err := path.FromDataLayerPath(p, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||||
@ -456,14 +443,14 @@ func (c *Collections) Get(
|
|||||||
// empty/missing and default to a full backup.
|
// empty/missing and default to a full backup.
|
||||||
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
||||||
|
|
||||||
return collections, canUsePrevBackup, nil
|
return collections, canUsePreviousBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
md, err := graph.MakeMetadataCollection(
|
md, err := graph.MakeMetadataCollection(
|
||||||
pathPrefix,
|
pathPrefix,
|
||||||
[]graph.MetadataCollectionEntry{
|
[]graph.MetadataCollectionEntry{
|
||||||
graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths),
|
graph.NewMetadataEntry(bupMD.PreviousPathFileName, folderPaths),
|
||||||
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink),
|
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, deltaURLs),
|
||||||
},
|
},
|
||||||
c.statusUpdater)
|
c.statusUpdater)
|
||||||
|
|
||||||
@ -476,7 +463,7 @@ func (c *Collections) Get(
|
|||||||
collections = append(collections, md)
|
collections = append(collections, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, canUsePrevBackup, nil
|
return collections, canUsePreviousBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
||||||
@ -490,7 +477,7 @@ func (c *Collections) addURLCacheToDriveCollections(
|
|||||||
driveID,
|
driveID,
|
||||||
prevDelta,
|
prevDelta,
|
||||||
urlCacheRefreshInterval,
|
urlCacheRefreshInterval,
|
||||||
c.handler,
|
c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()),
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -546,21 +533,22 @@ func updateCollectionPaths(
|
|||||||
|
|
||||||
func (c *Collections) handleDelete(
|
func (c *Collections) handleDelete(
|
||||||
itemID, driveID string,
|
itemID, driveID string,
|
||||||
oldPrevPaths, currPrevPaths, newPrevPaths map[string]string,
|
oldPaths, newPaths map[string]string,
|
||||||
isFolder bool,
|
isFolder bool,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
|
itemCollection map[string]map[string]string,
|
||||||
invalidPrevDelta bool,
|
invalidPrevDelta bool,
|
||||||
) error {
|
) error {
|
||||||
if !isFolder {
|
if !isFolder {
|
||||||
// Try to remove the item from the Collection if an entry exists for this
|
// Try to remove the item from the Collection if an entry exists for this
|
||||||
// item. This handles cases where an item was created and deleted during the
|
// item. This handles cases where an item was created and deleted during the
|
||||||
// same delta query.
|
// same delta query.
|
||||||
if parentID, ok := currPrevPaths[itemID]; ok {
|
if parentID, ok := itemCollection[driveID][itemID]; ok {
|
||||||
if col := c.CollectionMap[driveID][parentID]; col != nil {
|
if col := c.CollectionMap[driveID][parentID]; col != nil {
|
||||||
col.Remove(itemID)
|
col.Remove(itemID)
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(currPrevPaths, itemID)
|
delete(itemCollection[driveID], itemID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't need to add to exclude list if the delta is invalid since the
|
// Don't need to add to exclude list if the delta is invalid since the
|
||||||
@ -581,7 +569,7 @@ func (c *Collections) handleDelete(
|
|||||||
|
|
||||||
var prevPath path.Path
|
var prevPath path.Path
|
||||||
|
|
||||||
prevPathStr, ok := oldPrevPaths[itemID]
|
prevPathStr, ok := oldPaths[itemID]
|
||||||
if ok {
|
if ok {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@ -598,7 +586,7 @@ func (c *Collections) handleDelete(
|
|||||||
// Nested folders also return deleted delta results so we don't have to
|
// Nested folders also return deleted delta results so we don't have to
|
||||||
// worry about doing a prefix search in the map to remove the subtree of
|
// worry about doing a prefix search in the map to remove the subtree of
|
||||||
// the deleted folder/package.
|
// the deleted folder/package.
|
||||||
delete(newPrevPaths, itemID)
|
delete(newPaths, itemID)
|
||||||
|
|
||||||
if prevPath == nil || invalidPrevDelta {
|
if prevPath == nil || invalidPrevDelta {
|
||||||
// It is possible that an item was created and deleted between two delta
|
// It is possible that an item was created and deleted between two delta
|
||||||
@ -688,29 +676,21 @@ func (c *Collections) getCollectionPath(
|
|||||||
|
|
||||||
// UpdateCollections initializes and adds the provided drive items to Collections
|
// UpdateCollections initializes and adds the provided drive items to Collections
|
||||||
// A new collection is created for every drive folder (or package).
|
// A new collection is created for every drive folder (or package).
|
||||||
// oldPrevPaths is the unchanged data that was loaded from the metadata file.
|
// oldPaths is the unchanged data that was loaded from the metadata file.
|
||||||
// This map is not modified during the call.
|
// newPaths starts as a copy of oldPaths and is updated as changes are found in
|
||||||
// currPrevPaths starts as a copy of oldPaths and is updated as changes are found in
|
// the returned results.
|
||||||
// the returned results. Items are added to this collection throughout the call.
|
|
||||||
// newPrevPaths, ie: the items added during this call, get returned as a map.
|
|
||||||
func (c *Collections) UpdateCollections(
|
func (c *Collections) UpdateCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
driveID, driveName string,
|
driveID, driveName string,
|
||||||
items []models.DriveItemable,
|
items []models.DriveItemable,
|
||||||
oldPrevPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
currPrevPaths map[string]string,
|
newPaths map[string]string,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
|
itemCollection map[string]map[string]string,
|
||||||
invalidPrevDelta bool,
|
invalidPrevDelta bool,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (map[string]string, error) {
|
) error {
|
||||||
var (
|
el := errs.Local()
|
||||||
el = errs.Local()
|
|
||||||
newPrevPaths = map[string]string{}
|
|
||||||
)
|
|
||||||
|
|
||||||
if !invalidPrevDelta {
|
|
||||||
maps.Copy(newPrevPaths, oldPrevPaths)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
@ -720,12 +700,8 @@ func (c *Collections) UpdateCollections(
|
|||||||
var (
|
var (
|
||||||
itemID = ptr.Val(item.GetId())
|
itemID = ptr.Val(item.GetId())
|
||||||
itemName = ptr.Val(item.GetName())
|
itemName = ptr.Val(item.GetName())
|
||||||
|
ictx = clues.Add(ctx, "item_id", itemID, "item_name", clues.Hide(itemName))
|
||||||
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
isFolder = item.GetFolder() != nil || item.GetPackageEscaped() != nil
|
||||||
ictx = clues.Add(
|
|
||||||
ctx,
|
|
||||||
"item_id", itemID,
|
|
||||||
"item_name", clues.Hide(itemName),
|
|
||||||
"item_is_folder", isFolder)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if item.GetMalware() != nil {
|
if item.GetMalware() != nil {
|
||||||
@ -747,13 +723,13 @@ func (c *Collections) UpdateCollections(
|
|||||||
if err := c.handleDelete(
|
if err := c.handleDelete(
|
||||||
itemID,
|
itemID,
|
||||||
driveID,
|
driveID,
|
||||||
oldPrevPaths,
|
oldPaths,
|
||||||
currPrevPaths,
|
newPaths,
|
||||||
newPrevPaths,
|
|
||||||
isFolder,
|
isFolder,
|
||||||
excluded,
|
excluded,
|
||||||
|
itemCollection,
|
||||||
invalidPrevDelta); err != nil {
|
invalidPrevDelta); err != nil {
|
||||||
return nil, clues.Stack(err).WithClues(ictx)
|
return clues.Stack(err).WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
@ -779,13 +755,13 @@ func (c *Collections) UpdateCollections(
|
|||||||
// Deletions are handled above so this is just moves/renames.
|
// Deletions are handled above so this is just moves/renames.
|
||||||
var prevPath path.Path
|
var prevPath path.Path
|
||||||
|
|
||||||
prevPathStr, ok := oldPrevPaths[itemID]
|
prevPathStr, ok := oldPaths[itemID]
|
||||||
if ok {
|
if ok {
|
||||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
|
el.AddRecoverable(ctx, clues.Wrap(err, "invalid previous path").
|
||||||
WithClues(ictx).
|
WithClues(ictx).
|
||||||
With("prev_path_string", path.LoggableDir(prevPathStr)))
|
With("path_string", prevPathStr))
|
||||||
}
|
}
|
||||||
} else if item.GetRoot() != nil {
|
} else if item.GetRoot() != nil {
|
||||||
// Root doesn't move or get renamed.
|
// Root doesn't move or get renamed.
|
||||||
@ -795,11 +771,11 @@ func (c *Collections) UpdateCollections(
|
|||||||
// Moved folders don't cause delta results for any subfolders nested in
|
// Moved folders don't cause delta results for any subfolders nested in
|
||||||
// them. We need to go through and update paths to handle that. We only
|
// them. We need to go through and update paths to handle that. We only
|
||||||
// update newPaths so we don't accidentally clobber previous deletes.
|
// update newPaths so we don't accidentally clobber previous deletes.
|
||||||
updatePath(newPrevPaths, itemID, collectionPath.String())
|
updatePath(newPaths, itemID, collectionPath.String())
|
||||||
|
|
||||||
found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath)
|
found, err := updateCollectionPaths(driveID, itemID, c.CollectionMap, collectionPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Stack(err).WithClues(ictx)
|
return clues.Stack(err).WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if found {
|
if found {
|
||||||
@ -822,7 +798,7 @@ func (c *Collections) UpdateCollections(
|
|||||||
invalidPrevDelta,
|
invalidPrevDelta,
|
||||||
nil)
|
nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Stack(err).WithClues(ictx)
|
return clues.Stack(err).WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
col.driveName = driveName
|
col.driveName = driveName
|
||||||
@ -844,38 +820,35 @@ func (c *Collections) UpdateCollections(
|
|||||||
case item.GetFile() != nil:
|
case item.GetFile() != nil:
|
||||||
// Deletions are handled above so this is just moves/renames.
|
// Deletions are handled above so this is just moves/renames.
|
||||||
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
||||||
return nil, clues.New("file without parent ID").WithClues(ictx)
|
return clues.New("file without parent ID").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the collection for this item.
|
// Get the collection for this item.
|
||||||
parentID := ptr.Val(item.GetParentReference().GetId())
|
parentID := ptr.Val(item.GetParentReference().GetId())
|
||||||
ictx = clues.Add(ictx, "parent_id", parentID)
|
ictx = clues.Add(ictx, "parent_id", parentID)
|
||||||
|
|
||||||
collection, ok := c.CollectionMap[driveID][parentID]
|
collection, found := c.CollectionMap[driveID][parentID]
|
||||||
if !ok {
|
if !found {
|
||||||
return nil, clues.New("item seen before parent folder").WithClues(ictx)
|
return clues.New("item seen before parent folder").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This will only kick in if the file was moved multiple times
|
// Delete the file from previous collection. This will
|
||||||
// within a single delta query. We delete the file from the previous
|
// only kick in if the file was moved multiple times
|
||||||
// collection so that it doesn't appear in two places.
|
// within a single delta query
|
||||||
prevParentContainerID, ok := currPrevPaths[itemID]
|
icID, found := itemCollection[driveID][itemID]
|
||||||
if ok {
|
if found {
|
||||||
prevColl, found := c.CollectionMap[driveID][prevParentContainerID]
|
pcollection, found := c.CollectionMap[driveID][icID]
|
||||||
if !found {
|
if !found {
|
||||||
return nil, clues.New("previous collection not found").
|
return clues.New("previous collection not found").WithClues(ictx)
|
||||||
With("prev_parent_container_id", prevParentContainerID).
|
|
||||||
WithClues(ictx)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok := prevColl.Remove(itemID); !ok {
|
removed := pcollection.Remove(itemID)
|
||||||
return nil, clues.New("removing item from prev collection").
|
if !removed {
|
||||||
With("prev_parent_container_id", prevParentContainerID).
|
return clues.New("removing from prev collection").WithClues(ictx)
|
||||||
WithClues(ictx)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
currPrevPaths[itemID] = parentID
|
itemCollection[driveID][itemID] = parentID
|
||||||
|
|
||||||
if collection.Add(item) {
|
if collection.Add(item) {
|
||||||
c.NumItems++
|
c.NumItems++
|
||||||
@ -896,13 +869,11 @@ func (c *Collections) UpdateCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
el.AddRecoverable(ictx, clues.New("item is neither folder nor file").
|
return clues.New("item type not supported").WithClues(ictx)
|
||||||
WithClues(ictx).
|
|
||||||
Label(fault.LabelForceNoBackupCreation))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return newPrevPaths, el.Failure()
|
return el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
type dirScopeChecker interface {
|
type dirScopeChecker interface {
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -16,6 +17,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
@ -135,7 +137,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath)
|
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, testBaseDrivePath)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
testCase string
|
||||||
items []models.DriveItemable
|
items []models.DriveItemable
|
||||||
inputFolderMap map[string]string
|
inputFolderMap map[string]string
|
||||||
scope selectors.OneDriveScope
|
scope selectors.OneDriveScope
|
||||||
@ -145,11 +147,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedContainerCount int
|
expectedContainerCount int
|
||||||
expectedFileCount int
|
expectedFileCount int
|
||||||
expectedSkippedCount int
|
expectedSkippedCount int
|
||||||
expectedPrevPaths map[string]string
|
expectedMetadataPaths map[string]string
|
||||||
expectedExcludes map[string]struct{}
|
expectedExcludes map[string]struct{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Invalid item",
|
testCase: "Invalid item",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("item", "item", testBaseDrivePath, "root", false, false, false),
|
driveItem("item", "item", testBaseDrivePath, "root", false, false, false),
|
||||||
@ -161,13 +163,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
"root": expectedStatePath(data.NotMovedState, ""),
|
"root": expectedStatePath(data.NotMovedState, ""),
|
||||||
},
|
},
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Single File",
|
testCase: "Single File",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("file", "file", testBaseDrivePath, "root", true, false, false),
|
driveItem("file", "file", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -182,13 +184,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
// Root folder is skipped since it's always present.
|
// Root folder is skipped since it's always present.
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Single Folder",
|
testCase: "Single Folder",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -200,7 +202,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
"root": expectedStatePath(data.NotMovedState, ""),
|
"root": expectedStatePath(data.NotMovedState, ""),
|
||||||
"folder": expectedStatePath(data.NewState, folder),
|
"folder": expectedStatePath(data.NewState, folder),
|
||||||
},
|
},
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
},
|
},
|
||||||
@ -209,7 +211,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Single Package",
|
testCase: "Single Package",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("package", "package", testBaseDrivePath, "root", false, false, true),
|
driveItem("package", "package", testBaseDrivePath, "root", false, false, true),
|
||||||
@ -221,7 +223,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
"root": expectedStatePath(data.NotMovedState, ""),
|
"root": expectedStatePath(data.NotMovedState, ""),
|
||||||
"package": expectedStatePath(data.NewState, pkg),
|
"package": expectedStatePath(data.NewState, pkg),
|
||||||
},
|
},
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"package": expectedPath("/package"),
|
"package": expectedPath("/package"),
|
||||||
},
|
},
|
||||||
@ -230,7 +232,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -250,7 +252,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 5,
|
expectedItemCount: 5,
|
||||||
expectedFileCount: 3,
|
expectedFileCount: 3,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
"package": expectedPath("/package"),
|
"package": expectedPath("/package"),
|
||||||
@ -258,7 +260,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"),
|
expectedExcludes: getDelList("fileInRoot", "fileInFolder", "fileInPackage"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "contains folder selector",
|
testCase: "contains folder selector",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -283,7 +285,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
// just "folder" isn't added here because the include check is done on the
|
// just "folder" isn't added here because the include check is done on the
|
||||||
// parent path since we only check later if something is a folder or not.
|
// parent path since we only check later if something is a folder or not.
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
"folder2": expectedPath(folderSub + folder),
|
"folder2": expectedPath(folderSub + folder),
|
||||||
@ -291,7 +293,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("fileInFolder", "fileInFolder2"),
|
expectedExcludes: getDelList("fileInFolder", "fileInFolder2"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "prefix subfolder selector",
|
testCase: "prefix subfolder selector",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -314,14 +316,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 3,
|
expectedItemCount: 3,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
"folder2": expectedPath(folderSub + folder),
|
"folder2": expectedPath(folderSub + folder),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("fileInFolder2"),
|
expectedExcludes: getDelList("fileInFolder2"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "match subfolder selector",
|
testCase: "match subfolder selector",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -342,13 +344,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
// No child folders for subfolder so nothing here.
|
// No child folders for subfolder so nothing here.
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("fileInSubfolder"),
|
expectedExcludes: getDelList("fileInSubfolder"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "not moved folder tree",
|
testCase: "not moved folder tree",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -366,7 +368,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
@ -374,7 +376,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "moved folder tree",
|
testCase: "moved folder tree",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -392,7 +394,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath(folderSub),
|
"subfolder": expectedPath(folderSub),
|
||||||
@ -400,7 +402,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "moved folder tree with file no previous",
|
testCase: "moved folder tree with file no previous",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -417,14 +419,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder2"),
|
"folder": expectedPath("/folder2"),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "moved folder tree with file no previous 1",
|
testCase: "moved folder tree with file no previous 1",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -440,14 +442,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "moved folder tree and subfolder 1",
|
testCase: "moved folder tree and subfolder 1",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -467,7 +469,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath("/subfolder"),
|
"subfolder": expectedPath("/subfolder"),
|
||||||
@ -475,7 +477,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "moved folder tree and subfolder 2",
|
testCase: "moved folder tree and subfolder 2",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false),
|
driveItem("subfolder", "subfolder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -495,7 +497,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath(folder),
|
"folder": expectedPath(folder),
|
||||||
"subfolder": expectedPath("/subfolder"),
|
"subfolder": expectedPath("/subfolder"),
|
||||||
@ -503,7 +505,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "move subfolder when moving parent",
|
testCase: "move subfolder when moving parent",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder2", "folder2", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -537,7 +539,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 5,
|
expectedItemCount: 5,
|
||||||
expectedFileCount: 2,
|
expectedFileCount: 2,
|
||||||
expectedContainerCount: 4,
|
expectedContainerCount: 4,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
"folder2": expectedPath("/folder2"),
|
"folder2": expectedPath("/folder2"),
|
||||||
@ -546,7 +548,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"),
|
expectedExcludes: getDelList("itemInSubfolder", "itemInFolder2"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "moved folder tree multiple times",
|
testCase: "moved folder tree multiple times",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
driveItem("folder", "folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -566,7 +568,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 2,
|
expectedItemCount: 2,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder2"),
|
"folder": expectedPath("/folder2"),
|
||||||
"subfolder": expectedPath("/folder2/subfolder"),
|
"subfolder": expectedPath("/folder2/subfolder"),
|
||||||
@ -574,7 +576,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedExcludes: getDelList("file"),
|
expectedExcludes: getDelList("file"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted folder and package",
|
testCase: "deleted folder and package",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"), // root is always present, but not necessary here
|
driveRootItem("root"), // root is always present, but not necessary here
|
||||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -595,13 +597,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 0,
|
expectedItemCount: 0,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delete folder without previous",
|
testCase: "delete folder without previous",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -617,13 +619,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 0,
|
expectedItemCount: 0,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delete folder tree move subfolder",
|
testCase: "delete folder tree move subfolder",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
delItem("folder", testBaseDrivePath, "root", false, true, false),
|
||||||
@ -644,14 +646,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 2,
|
expectedContainerCount: 2,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"subfolder": expectedPath("/subfolder"),
|
"subfolder": expectedPath("/subfolder"),
|
||||||
},
|
},
|
||||||
expectedExcludes: map[string]struct{}{},
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delete file",
|
testCase: "delete file",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("item", testBaseDrivePath, "root", true, false, false),
|
delItem("item", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -667,13 +669,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 1,
|
expectedItemCount: 1,
|
||||||
expectedFileCount: 1,
|
expectedFileCount: 1,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
},
|
},
|
||||||
expectedExcludes: getDelList("item"),
|
expectedExcludes: getDelList("item"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "item before parent errors",
|
testCase: "item before parent errors",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false),
|
driveItem("file", "file", testBaseDrivePath+"/folder", "folder", true, false, false),
|
||||||
@ -688,11 +690,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedItemCount: 0,
|
expectedItemCount: 0,
|
||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
expectedPrevPaths: nil,
|
expectedMetadataPaths: map[string]string{
|
||||||
expectedExcludes: map[string]struct{}{},
|
"root": expectedPath(""),
|
||||||
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
testCase: "1 root file, 1 folder, 1 package, 1 good file, 1 malware",
|
||||||
items: []models.DriveItemable{
|
items: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
driveItem("fileInRoot", "fileInRoot", testBaseDrivePath, "root", true, false, false),
|
||||||
@ -713,7 +717,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
expectedFileCount: 2,
|
expectedFileCount: 2,
|
||||||
expectedContainerCount: 3,
|
expectedContainerCount: 3,
|
||||||
expectedSkippedCount: 1,
|
expectedSkippedCount: 1,
|
||||||
expectedPrevPaths: map[string]string{
|
expectedMetadataPaths: map[string]string{
|
||||||
"root": expectedPath(""),
|
"root": expectedPath(""),
|
||||||
"folder": expectedPath("/folder"),
|
"folder": expectedPath("/folder"),
|
||||||
"package": expectedPath("/package"),
|
"package": expectedPath("/package"),
|
||||||
@ -722,23 +726,26 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, tt := range tests {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(tt.testCase, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
excludes = map[string]struct{}{}
|
excludes = map[string]struct{}{}
|
||||||
currPrevPaths = map[string]string{}
|
outputFolderMap = map[string]string{}
|
||||||
errs = fault.New(true)
|
itemCollection = map[string]map[string]string{
|
||||||
|
driveID: {},
|
||||||
|
}
|
||||||
|
errs = fault.New(true)
|
||||||
)
|
)
|
||||||
|
|
||||||
maps.Copy(currPrevPaths, test.inputFolderMap)
|
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
||||||
|
|
||||||
c := NewCollections(
|
c := NewCollections(
|
||||||
&itemBackupHandler{api.Drives{}, user, test.scope},
|
&itemBackupHandler{api.Drives{}, user, tt.scope},
|
||||||
tenant,
|
tenant,
|
||||||
user,
|
user,
|
||||||
nil,
|
nil,
|
||||||
@ -746,24 +753,25 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
|
|
||||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||||
|
|
||||||
newPrevPaths, err := c.UpdateCollections(
|
err := c.UpdateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
"General",
|
"General",
|
||||||
test.items,
|
tt.items,
|
||||||
test.inputFolderMap,
|
tt.inputFolderMap,
|
||||||
currPrevPaths,
|
outputFolderMap,
|
||||||
excludes,
|
excludes,
|
||||||
|
itemCollection,
|
||||||
false,
|
false,
|
||||||
errs)
|
errs)
|
||||||
test.expect(t, err, clues.ToCore(err))
|
tt.expect(t, err, clues.ToCore(err))
|
||||||
assert.Equal(t, len(test.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
assert.Equal(t, len(tt.expectedCollectionIDs), len(c.CollectionMap[driveID]), "total collections")
|
||||||
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
|
||||||
assert.Equal(t, test.expectedFileCount, c.NumFiles, "file count")
|
assert.Equal(t, tt.expectedFileCount, c.NumFiles, "file count")
|
||||||
assert.Equal(t, test.expectedContainerCount, c.NumContainers, "container count")
|
assert.Equal(t, tt.expectedContainerCount, c.NumContainers, "container count")
|
||||||
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
assert.Equal(t, tt.expectedSkippedCount, len(errs.Skipped()), "skipped items")
|
||||||
|
|
||||||
for id, sp := range test.expectedCollectionIDs {
|
for id, sp := range tt.expectedCollectionIDs {
|
||||||
if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) {
|
if !assert.Containsf(t, c.CollectionMap[driveID], id, "missing collection with id %s", id) {
|
||||||
// Skip collections we don't find so we don't get an NPE.
|
// Skip collections we don't find so we don't get an NPE.
|
||||||
continue
|
continue
|
||||||
@ -774,8 +782,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
|||||||
assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id)
|
assert.Equalf(t, sp.prevPath, c.CollectionMap[driveID][id].PreviousPath(), "prev path for collection %s", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, test.expectedPrevPaths, newPrevPaths, "metadata paths")
|
assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap, "metadata paths")
|
||||||
assert.Equal(t, test.expectedExcludes, excludes, "exclude list")
|
assert.Equal(t, tt.expectedExcludes, excludes, "exclude list")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -977,7 +985,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
{
|
{
|
||||||
// Bad formats are logged but skip adding entries to the maps and don't
|
// Bad formats are logged but skip adding entries to the maps and don't
|
||||||
// return an error.
|
// return an error.
|
||||||
name: "BadFormat",
|
name: "BadFormat",
|
||||||
|
expectedDeltas: map[string]string{},
|
||||||
|
expectedPaths: map[string]map[string]string{},
|
||||||
cols: []func() []graph.MetadataCollectionEntry{
|
cols: []func() []graph.MetadataCollectionEntry{
|
||||||
func() []graph.MetadataCollectionEntry {
|
func() []graph.MetadataCollectionEntry {
|
||||||
return []graph.MetadataCollectionEntry{
|
return []graph.MetadataCollectionEntry{
|
||||||
@ -988,7 +998,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
canUsePreviousBackup: false,
|
canUsePreviousBackup: false,
|
||||||
errCheck: assert.Error,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Unexpected files are logged and skipped. They don't cause an error to
|
// Unexpected files are logged and skipped. They don't cause an error to
|
||||||
@ -1053,10 +1063,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDeltas: nil,
|
expectedDeltas: map[string]string{},
|
||||||
expectedPaths: nil,
|
expectedPaths: map[string]map[string]string{},
|
||||||
canUsePreviousBackup: false,
|
canUsePreviousBackup: false,
|
||||||
errCheck: assert.Error,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "DriveAlreadyFound_Deltas",
|
name: "DriveAlreadyFound_Deltas",
|
||||||
@ -1083,10 +1093,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedDeltas: nil,
|
expectedDeltas: map[string]string{},
|
||||||
expectedPaths: nil,
|
expectedPaths: map[string]map[string]string{},
|
||||||
canUsePreviousBackup: false,
|
canUsePreviousBackup: false,
|
||||||
errCheck: assert.Error,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1114,7 +1124,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cols = append(cols, data.NoFetchRestoreCollection{Collection: mc})
|
cols = append(cols, dataMock.NewUnversionedRestoreCollection(
|
||||||
|
t,
|
||||||
|
data.NoFetchRestoreCollection{Collection: mc}))
|
||||||
}
|
}
|
||||||
|
|
||||||
deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols)
|
deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols)
|
||||||
@ -1293,8 +1305,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1332,8 +1343,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1410,8 +1420,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &empty, // probably will never happen with graph
|
DeltaLink: &empty, // probably will never happen with graph
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1448,8 +1457,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
NextLink: &next,
|
NextLink: &next,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
@ -1457,8 +1465,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1500,8 +1507,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
driveID2: {
|
driveID2: {
|
||||||
@ -1511,8 +1517,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false),
|
driveItem("folder2", "folder", driveBasePath2, "root2", false, true, false),
|
||||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false),
|
driveItem("file2", "file", driveBasePath2+"/folder", "folder2", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1564,8 +1569,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
driveID2: {
|
driveID2: {
|
||||||
@ -1575,8 +1579,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("folder", "folder", driveBasePath2, "root", false, true, false),
|
driveItem("folder", "folder", driveBasePath2, "root", false, true, false),
|
||||||
driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false),
|
driveItem("file2", "file", driveBasePath2+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1634,6 +1637,87 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
expectedFolderPaths: nil,
|
expectedFolderPaths: nil,
|
||||||
expectedDelList: nil,
|
expectedDelList: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "OneDrive_OneItemPage_DeltaError",
|
||||||
|
drives: []models.Driveable{drive1},
|
||||||
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
|
driveID1: {
|
||||||
|
{
|
||||||
|
Err: getDeltaError(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Values: []models.DriveItemable{
|
||||||
|
driveRootItem("root"),
|
||||||
|
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
||||||
|
},
|
||||||
|
DeltaLink: &delta,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||||
|
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||||
|
},
|
||||||
|
expectedDeltaURLs: map[string]string{
|
||||||
|
driveID1: delta,
|
||||||
|
},
|
||||||
|
expectedFolderPaths: map[string]map[string]string{
|
||||||
|
driveID1: {
|
||||||
|
"root": rootFolderPath1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OneDrive_TwoItemPage_DeltaError",
|
||||||
|
drives: []models.Driveable{drive1},
|
||||||
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
|
driveID1: {
|
||||||
|
{
|
||||||
|
Err: getDeltaError(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Values: []models.DriveItemable{
|
||||||
|
driveRootItem("root"),
|
||||||
|
driveItem("file", "file", driveBasePath1, "root", true, false, false),
|
||||||
|
},
|
||||||
|
NextLink: &next,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Values: []models.DriveItemable{
|
||||||
|
driveRootItem("root"),
|
||||||
|
driveItem("folder", "folder", driveBasePath1, "root", false, true, false),
|
||||||
|
driveItem("file2", "file", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
|
},
|
||||||
|
DeltaLink: &delta,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
canUsePreviousBackup: true,
|
||||||
|
errCheck: assert.NoError,
|
||||||
|
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||||
|
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||||
|
expectedPath1("/folder"): {data.NewState: {"folder", "file2"}},
|
||||||
|
},
|
||||||
|
expectedDeltaURLs: map[string]string{
|
||||||
|
driveID1: delta,
|
||||||
|
},
|
||||||
|
expectedFolderPaths: map[string]map[string]string{
|
||||||
|
driveID1: {
|
||||||
|
"root": rootFolderPath1,
|
||||||
|
"folder": folderPath1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedDelList: pmMock.NewPrefixMap(map[string]map[string]struct{}{}),
|
||||||
|
doNotMergeItems: map[string]bool{
|
||||||
|
rootFolderPath1: true,
|
||||||
|
folderPath1: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "OneDrive_TwoItemPage_NoDeltaError",
|
name: "OneDrive_TwoItemPage_NoDeltaError",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
@ -1686,14 +1770,16 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
|
{
|
||||||
|
Err: getDeltaError(),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false),
|
driveItem("folder2", "folder2", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder2", "folder2", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1731,14 +1817,16 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
|
{
|
||||||
|
Err: getDeltaError(),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
driveItem("folder2", "folder", driveBasePath1, "root", false, true, false),
|
driveItem("folder2", "folder", driveBasePath1, "root", false, true, false),
|
||||||
driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false),
|
driveItem("file", "file", driveBasePath1+"/folder", "folder2", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1795,8 +1883,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
driveItem("file2", "file2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false),
|
malwareItem("malware2", "malware2", driveBasePath1+"/folder", "folder", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1826,10 +1913,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
expectedSkippedCount: 2,
|
expectedSkippedCount: 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Deleted Folder In New Results",
|
name: "One Drive Delta Error Deleted Folder In New Results",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
|
{
|
||||||
|
Err: getDeltaError(),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
@ -1846,8 +1936,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
delItem("folder2", driveBasePath1, "root", false, true, false),
|
delItem("folder2", driveBasePath1, "root", false, true, false),
|
||||||
delItem("file2", driveBasePath1, "root", true, false, false),
|
delItem("file2", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1882,17 +1971,19 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Random Folder Delete",
|
name: "One Drive Delta Error Random Folder Delete",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
|
{
|
||||||
|
Err: getDeltaError(),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1923,17 +2014,19 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "One Drive Random Item Delete",
|
name: "One Drive Delta Error Random Item Delete",
|
||||||
drives: []models.Driveable{drive1},
|
drives: []models.Driveable{drive1},
|
||||||
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
items: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID1: {
|
driveID1: {
|
||||||
|
{
|
||||||
|
Err: getDeltaError(),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Values: []models.DriveItemable{
|
Values: []models.DriveItemable{
|
||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1979,8 +2072,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta2,
|
DeltaLink: &delta2,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2023,8 +2115,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2062,8 +2153,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("folder", driveBasePath1, "root", false, true, false),
|
delItem("folder", driveBasePath1, "root", false, true, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2098,8 +2188,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
driveRootItem("root"),
|
driveRootItem("root"),
|
||||||
delItem("file", driveBasePath1, "root", true, false, false),
|
delItem("file", driveBasePath1, "root", true, false, false),
|
||||||
},
|
},
|
||||||
DeltaLink: &delta,
|
DeltaLink: &delta,
|
||||||
ResetDelta: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2181,7 +2270,6 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
mbh := mock.DefaultOneDriveBH("a-user")
|
mbh := mock.DefaultOneDriveBH("a-user")
|
||||||
mbh.DrivePagerV = mockDrivePager
|
mbh.DrivePagerV = mockDrivePager
|
||||||
mbh.ItemPagerV = itemPagers
|
mbh.ItemPagerV = itemPagers
|
||||||
mbh.DriveItemEnumeration = mock.PagerResultToEDID(test.items)
|
|
||||||
|
|
||||||
c := NewCollections(
|
c := NewCollections(
|
||||||
mbh,
|
mbh,
|
||||||
@ -2211,7 +2299,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {})
|
||||||
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
|
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
|
||||||
|
|
||||||
prevMetadata := []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: mc}}
|
prevMetadata := []data.RestoreCollection{
|
||||||
|
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: mc}),
|
||||||
|
}
|
||||||
errs := fault.New(true)
|
errs := fault.New(true)
|
||||||
|
|
||||||
delList := prefixmatcher.NewStringSetBuilder()
|
delList := prefixmatcher.NewStringSetBuilder()
|
||||||
@ -2238,7 +2328,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
deltas, paths, _, err := deserializeMetadata(
|
deltas, paths, _, err := deserializeMetadata(
|
||||||
ctx,
|
ctx,
|
||||||
[]data.RestoreCollection{
|
[]data.RestoreCollection{
|
||||||
data.NoFetchRestoreCollection{Collection: baseCol},
|
dataMock.NewUnversionedRestoreCollection(
|
||||||
|
t,
|
||||||
|
data.NoFetchRestoreCollection{Collection: baseCol}),
|
||||||
})
|
})
|
||||||
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
||||||
continue
|
continue
|
||||||
@ -2408,6 +2500,121 @@ func delItem(
|
|||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getDeltaError() error {
|
||||||
|
syncStateNotFound := "SyncStateNotFound"
|
||||||
|
me := odataerrors.NewMainError()
|
||||||
|
me.SetCode(&syncStateNotFound)
|
||||||
|
|
||||||
|
deltaError := odataerrors.NewODataError()
|
||||||
|
deltaError.SetErrorEscaped(me)
|
||||||
|
|
||||||
|
return deltaError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *OneDriveCollectionsUnitSuite) TestCollectItems() {
|
||||||
|
next := "next"
|
||||||
|
delta := "delta"
|
||||||
|
prevDelta := "prev-delta"
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
items []apiMock.PagerResult[models.DriveItemable]
|
||||||
|
deltaURL string
|
||||||
|
prevDeltaSuccess bool
|
||||||
|
prevDelta string
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "delta on first run",
|
||||||
|
deltaURL: delta,
|
||||||
|
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||||
|
{DeltaLink: &delta},
|
||||||
|
},
|
||||||
|
prevDeltaSuccess: true,
|
||||||
|
prevDelta: prevDelta,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty prev delta",
|
||||||
|
deltaURL: delta,
|
||||||
|
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||||
|
{DeltaLink: &delta},
|
||||||
|
},
|
||||||
|
prevDeltaSuccess: false,
|
||||||
|
prevDelta: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "next then delta",
|
||||||
|
deltaURL: delta,
|
||||||
|
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||||
|
{NextLink: &next},
|
||||||
|
{DeltaLink: &delta},
|
||||||
|
},
|
||||||
|
prevDeltaSuccess: true,
|
||||||
|
prevDelta: prevDelta,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid prev delta",
|
||||||
|
deltaURL: delta,
|
||||||
|
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||||
|
{Err: getDeltaError()},
|
||||||
|
{DeltaLink: &delta}, // works on retry
|
||||||
|
},
|
||||||
|
prevDelta: prevDelta,
|
||||||
|
prevDeltaSuccess: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fail a normal delta query",
|
||||||
|
items: []apiMock.PagerResult[models.DriveItemable]{
|
||||||
|
{NextLink: &next},
|
||||||
|
{Err: assert.AnError},
|
||||||
|
},
|
||||||
|
prevDelta: prevDelta,
|
||||||
|
prevDeltaSuccess: true,
|
||||||
|
err: assert.AnError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
||||||
|
ToReturn: test.items,
|
||||||
|
}
|
||||||
|
|
||||||
|
collectorFunc := func(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID, driveName string,
|
||||||
|
driveItems []models.DriveItemable,
|
||||||
|
oldPaths map[string]string,
|
||||||
|
newPaths map[string]string,
|
||||||
|
excluded map[string]struct{},
|
||||||
|
itemCollection map[string]map[string]string,
|
||||||
|
doNotMergeItems bool,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
delta, _, _, err := collectItems(
|
||||||
|
ctx,
|
||||||
|
itemPager,
|
||||||
|
"",
|
||||||
|
"General",
|
||||||
|
collectorFunc,
|
||||||
|
map[string]string{},
|
||||||
|
test.prevDelta,
|
||||||
|
fault.New(true))
|
||||||
|
|
||||||
|
require.ErrorIs(t, err, test.err, "delta fetch err", clues.ToCore(err))
|
||||||
|
require.Equal(t, test.deltaURL, delta.URL, "delta url")
|
||||||
|
require.Equal(t, !test.prevDeltaSuccess, delta.Reset, "delta reset")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
|
func (suite *OneDriveCollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
|
||||||
driveID := "test-drive"
|
driveID := "test-drive"
|
||||||
collCount := 3
|
collCount := 3
|
||||||
|
|||||||
@ -36,7 +36,6 @@ type BackupHandler interface {
|
|||||||
GetItemPermissioner
|
GetItemPermissioner
|
||||||
GetItemer
|
GetItemer
|
||||||
NewDrivePagerer
|
NewDrivePagerer
|
||||||
EnumerateDriveItemsDeltaer
|
|
||||||
|
|
||||||
// PathPrefix constructs the service and category specific path prefix for
|
// PathPrefix constructs the service and category specific path prefix for
|
||||||
// the given values.
|
// the given values.
|
||||||
@ -51,7 +50,7 @@ type BackupHandler interface {
|
|||||||
|
|
||||||
// ServiceCat returns the service and category used by this implementation.
|
// ServiceCat returns the service and category used by this implementation.
|
||||||
ServiceCat() (path.ServiceType, path.CategoryType)
|
ServiceCat() (path.ServiceType, path.CategoryType)
|
||||||
|
NewItemPager(driveID, link string, fields []string) api.DeltaPager[models.DriveItemable]
|
||||||
// FormatDisplayPath creates a human-readable string to represent the
|
// FormatDisplayPath creates a human-readable string to represent the
|
||||||
// provided path.
|
// provided path.
|
||||||
FormatDisplayPath(driveName string, parentPath *path.Builder) string
|
FormatDisplayPath(driveName string, parentPath *path.Builder) string
|
||||||
@ -80,17 +79,6 @@ type GetItemer interface {
|
|||||||
) (models.DriveItemable, error)
|
) (models.DriveItemable, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type EnumerateDriveItemsDeltaer interface {
|
|
||||||
EnumerateDriveItemsDelta(
|
|
||||||
ctx context.Context,
|
|
||||||
driveID, prevDeltaLink string,
|
|
||||||
) (
|
|
||||||
[]models.DriveItemable,
|
|
||||||
api.DeltaUpdate,
|
|
||||||
error,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// restore
|
// restore
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
142
src/internal/m365/collection/drive/item_collector.go
Normal file
142
src/internal/m365/collection/drive/item_collector.go
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
package drive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeltaUpdate holds the results of a current delta token. It normally
|
||||||
|
// gets produced when aggregating the addition and removal of items in
|
||||||
|
// a delta-queryable folder.
|
||||||
|
// FIXME: This is same as exchange.api.DeltaUpdate
|
||||||
|
type DeltaUpdate struct {
|
||||||
|
// the deltaLink itself
|
||||||
|
URL string
|
||||||
|
// true if the old delta was marked as invalid
|
||||||
|
Reset bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// itemCollector functions collect the items found in a drive
|
||||||
|
type itemCollector func(
|
||||||
|
ctx context.Context,
|
||||||
|
driveID, driveName string,
|
||||||
|
driveItems []models.DriveItemable,
|
||||||
|
oldPaths map[string]string,
|
||||||
|
newPaths map[string]string,
|
||||||
|
excluded map[string]struct{},
|
||||||
|
itemCollections map[string]map[string]string,
|
||||||
|
validPrevDelta bool,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) error
|
||||||
|
|
||||||
|
// collectItems will enumerate all items in the specified drive and hand them to the
|
||||||
|
// provided `collector` method
|
||||||
|
func collectItems(
|
||||||
|
ctx context.Context,
|
||||||
|
pager api.DeltaPager[models.DriveItemable],
|
||||||
|
driveID, driveName string,
|
||||||
|
collector itemCollector,
|
||||||
|
oldPaths map[string]string,
|
||||||
|
prevDelta string,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) (
|
||||||
|
DeltaUpdate,
|
||||||
|
map[string]string, // newPaths
|
||||||
|
map[string]struct{}, // excluded
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
var (
|
||||||
|
newDeltaURL = ""
|
||||||
|
newPaths = map[string]string{}
|
||||||
|
excluded = map[string]struct{}{}
|
||||||
|
invalidPrevDelta = len(prevDelta) == 0
|
||||||
|
|
||||||
|
// itemCollection is used to identify which collection a
|
||||||
|
// file belongs to. This is useful to delete a file from the
|
||||||
|
// collection it was previously in, in case it was moved to a
|
||||||
|
// different collection within the same delta query
|
||||||
|
// drive ID -> item ID -> item ID
|
||||||
|
itemCollection = map[string]map[string]string{
|
||||||
|
driveID: {},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if !invalidPrevDelta {
|
||||||
|
maps.Copy(newPaths, oldPaths)
|
||||||
|
pager.SetNextLink(prevDelta)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// assume delta urls here, which allows single-token consumption
|
||||||
|
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC))
|
||||||
|
|
||||||
|
if graph.IsErrInvalidDelta(err) {
|
||||||
|
logger.Ctx(ctx).Infow("Invalid previous delta link", "link", prevDelta)
|
||||||
|
|
||||||
|
invalidPrevDelta = true
|
||||||
|
newPaths = map[string]string{}
|
||||||
|
|
||||||
|
pager.Reset(ctx)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return DeltaUpdate{}, nil, nil, graph.Wrap(ctx, err, "getting page")
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := page.GetValue()
|
||||||
|
|
||||||
|
err = collector(
|
||||||
|
ctx,
|
||||||
|
driveID,
|
||||||
|
driveName,
|
||||||
|
vals,
|
||||||
|
oldPaths,
|
||||||
|
newPaths,
|
||||||
|
excluded,
|
||||||
|
itemCollection,
|
||||||
|
invalidPrevDelta,
|
||||||
|
errs)
|
||||||
|
if err != nil {
|
||||||
|
return DeltaUpdate{}, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nextLink, deltaLink := api.NextAndDeltaLink(page)
|
||||||
|
|
||||||
|
if len(deltaLink) > 0 {
|
||||||
|
newDeltaURL = deltaLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if there are more items
|
||||||
|
if len(nextLink) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink)
|
||||||
|
pager.SetNextLink(nextLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
|
||||||
|
func newItem(name string, folder bool) *models.DriveItem {
|
||||||
|
itemToCreate := models.NewDriveItem()
|
||||||
|
itemToCreate.SetName(&name)
|
||||||
|
|
||||||
|
if folder {
|
||||||
|
itemToCreate.SetFolder(models.NewFolder())
|
||||||
|
} else {
|
||||||
|
itemToCreate.SetFile(models.NewFile())
|
||||||
|
}
|
||||||
|
|
||||||
|
return itemToCreate
|
||||||
|
}
|
||||||
@ -87,6 +87,13 @@ func (h itemBackupHandler) NewDrivePager(
|
|||||||
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h itemBackupHandler) NewItemPager(
|
||||||
|
driveID, link string,
|
||||||
|
fields []string,
|
||||||
|
) api.DeltaPager[models.DriveItemable] {
|
||||||
|
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
||||||
|
}
|
||||||
|
|
||||||
func (h itemBackupHandler) AugmentItemInfo(
|
func (h itemBackupHandler) AugmentItemInfo(
|
||||||
dii details.ItemInfo,
|
dii details.ItemInfo,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
@ -132,13 +139,6 @@ func (h itemBackupHandler) IncludesDir(dir string) bool {
|
|||||||
return h.scope.Matches(selectors.OneDriveFolder, dir)
|
return h.scope.Matches(selectors.OneDriveFolder, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h itemBackupHandler) EnumerateDriveItemsDelta(
|
|
||||||
ctx context.Context,
|
|
||||||
driveID, prevDeltaLink string,
|
|
||||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
|
||||||
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Restore
|
// Restore
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -20,6 +20,8 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,6 +60,83 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
|||||||
suite.userDriveID = ptr.Val(odDrives[0].GetId())
|
suite.userDriveID = ptr.Val(odDrives[0].GetId())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestItemReader is an integration test that makes a few assumptions
|
||||||
|
// about the test environment
|
||||||
|
// 1) It assumes the test user has a drive
|
||||||
|
// 2) It assumes the drive has a file it can use to test `driveItemReader`
|
||||||
|
// The test checks these in below
|
||||||
|
func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
var driveItem models.DriveItemable
|
||||||
|
// This item collector tries to find "a" drive item that is a non-empty
|
||||||
|
// file to test the reader function
|
||||||
|
itemCollector := func(
|
||||||
|
_ context.Context,
|
||||||
|
_, _ string,
|
||||||
|
items []models.DriveItemable,
|
||||||
|
_ map[string]string,
|
||||||
|
_ map[string]string,
|
||||||
|
_ map[string]struct{},
|
||||||
|
_ map[string]map[string]string,
|
||||||
|
_ bool,
|
||||||
|
_ *fault.Bus,
|
||||||
|
) error {
|
||||||
|
if driveItem != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range items {
|
||||||
|
if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 {
|
||||||
|
driveItem = item
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ip := suite.service.ac.
|
||||||
|
Drives().
|
||||||
|
NewDriveItemDeltaPager(suite.userDriveID, "", api.DriveItemSelectDefault())
|
||||||
|
|
||||||
|
_, _, _, err := collectItems(
|
||||||
|
ctx,
|
||||||
|
ip,
|
||||||
|
suite.userDriveID,
|
||||||
|
"General",
|
||||||
|
itemCollector,
|
||||||
|
map[string]string{},
|
||||||
|
"",
|
||||||
|
fault.New(true))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// Test Requirement 2: Need a file
|
||||||
|
require.NotEmpty(
|
||||||
|
t,
|
||||||
|
driveItem,
|
||||||
|
"no file item found for user %s drive %s",
|
||||||
|
suite.user,
|
||||||
|
suite.userDriveID)
|
||||||
|
|
||||||
|
bh := itemBackupHandler{
|
||||||
|
suite.service.ac.Drives(),
|
||||||
|
suite.user,
|
||||||
|
(&selectors.OneDriveBackup{}).Folders(selectors.Any())[0],
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read data for the file
|
||||||
|
itemData, err := downloadItem(ctx, bh, driveItem)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
size, err := io.Copy(io.Discard, itemData)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
require.NotZero(t, size)
|
||||||
|
}
|
||||||
|
|
||||||
// TestItemWriter is an integration test for uploading data to OneDrive
|
// TestItemWriter is an integration test for uploading data to OneDrive
|
||||||
// It creates a new folder with a new item and writes data to it
|
// It creates a new folder with a new item and writes data to it
|
||||||
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||||
@ -92,7 +171,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.driveID,
|
test.driveID,
|
||||||
ptr.Val(root.GetId()),
|
ptr.Val(root.GetId()),
|
||||||
api.NewDriveItem(newFolderName, true),
|
newItem(newFolderName, true),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, newFolder.GetId())
|
require.NotNil(t, newFolder.GetId())
|
||||||
@ -104,7 +183,7 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
|||||||
ctx,
|
ctx,
|
||||||
test.driveID,
|
test.driveID,
|
||||||
ptr.Val(newFolder.GetId()),
|
ptr.Val(newFolder.GetId()),
|
||||||
api.NewDriveItem(newItemName, false),
|
newItem(newItemName, false),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, newItem.GetId())
|
require.NotNil(t, newItem.GetId())
|
||||||
@ -238,7 +317,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "success",
|
name: "success",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := api.NewDriveItem("test", false)
|
di := newItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -257,7 +336,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "success, content url set instead of download url",
|
name: "success, content url set instead of download url",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := api.NewDriveItem("test", false)
|
di := newItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@content.downloadUrl": url,
|
"@content.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -276,7 +355,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "api getter returns error",
|
name: "api getter returns error",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := api.NewDriveItem("test", false)
|
di := newItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -292,7 +371,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "download url is empty",
|
name: "download url is empty",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := api.NewDriveItem("test", false)
|
di := newItem("test", false)
|
||||||
return di
|
return di
|
||||||
},
|
},
|
||||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||||
@ -307,7 +386,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "malware",
|
name: "malware",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := api.NewDriveItem("test", false)
|
di := newItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -329,7 +408,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
|||||||
{
|
{
|
||||||
name: "non-2xx http response",
|
name: "non-2xx http response",
|
||||||
itemFunc: func() models.DriveItemable {
|
itemFunc: func() models.DriveItemable {
|
||||||
di := api.NewDriveItem("test", false)
|
di := newItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
@ -378,7 +457,7 @@ func (suite *ItemUnitTestSuite) TestDownloadItem_ConnectionResetErrorOnFirstRead
|
|||||||
url = "https://example.com"
|
url = "https://example.com"
|
||||||
|
|
||||||
itemFunc = func() models.DriveItemable {
|
itemFunc = func() models.DriveItemable {
|
||||||
di := api.NewDriveItem("test", false)
|
di := newItem("test", false)
|
||||||
di.SetAdditionalData(map[string]any{
|
di.SetAdditionalData(map[string]any{
|
||||||
"@microsoft.graph.downloadUrl": url,
|
"@microsoft.graph.downloadUrl": url,
|
||||||
})
|
})
|
||||||
|
|||||||
@ -92,6 +92,13 @@ func (h libraryBackupHandler) NewDrivePager(
|
|||||||
return h.ac.NewSiteDrivePager(resourceOwner, fields)
|
return h.ac.NewSiteDrivePager(resourceOwner, fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h libraryBackupHandler) NewItemPager(
|
||||||
|
driveID, link string,
|
||||||
|
fields []string,
|
||||||
|
) api.DeltaPager[models.DriveItemable] {
|
||||||
|
return h.ac.NewDriveItemDeltaPager(driveID, link, fields)
|
||||||
|
}
|
||||||
|
|
||||||
func (h libraryBackupHandler) AugmentItemInfo(
|
func (h libraryBackupHandler) AugmentItemInfo(
|
||||||
dii details.ItemInfo,
|
dii details.ItemInfo,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
@ -170,13 +177,6 @@ func (h libraryBackupHandler) IncludesDir(dir string) bool {
|
|||||||
return h.scope.Matches(selectors.SharePointLibraryFolder, dir)
|
return h.scope.Matches(selectors.SharePointLibraryFolder, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h libraryBackupHandler) EnumerateDriveItemsDelta(
|
|
||||||
ctx context.Context,
|
|
||||||
driveID, prevDeltaLink string,
|
|
||||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
|
||||||
return h.ac.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Restore
|
// Restore
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -671,7 +671,7 @@ func createFolder(
|
|||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
parentFolderID,
|
parentFolderID,
|
||||||
api.NewDriveItem(folderName, true),
|
newItem(folderName, true),
|
||||||
control.Replace)
|
control.Replace)
|
||||||
|
|
||||||
// ErrItemAlreadyExistsConflict can only occur for folders if the
|
// ErrItemAlreadyExistsConflict can only occur for folders if the
|
||||||
@ -692,7 +692,7 @@ func createFolder(
|
|||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
parentFolderID,
|
parentFolderID,
|
||||||
api.NewDriveItem(folderName, true),
|
newItem(folderName, true),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "creating folder")
|
return nil, clues.Wrap(err, "creating folder")
|
||||||
@ -733,7 +733,7 @@ func restoreFile(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
item = api.NewDriveItem(name, false)
|
item = newItem(name, false)
|
||||||
collisionKey = api.DriveItemCollisionKey(item)
|
collisionKey = api.DriveItemCollisionKey(item)
|
||||||
collision api.DriveItemIDType
|
collision api.DriveItemIDType
|
||||||
shouldDeleteOriginal bool
|
shouldDeleteOriginal bool
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/str"
|
"github.com/alcionai/corso/src/internal/common/str"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -46,7 +47,7 @@ type urlCache struct {
|
|||||||
refreshMu sync.Mutex
|
refreshMu sync.Mutex
|
||||||
deltaQueryCount int
|
deltaQueryCount int
|
||||||
|
|
||||||
edid EnumerateDriveItemsDeltaer
|
itemPager api.DeltaPager[models.DriveItemable]
|
||||||
|
|
||||||
errs *fault.Bus
|
errs *fault.Bus
|
||||||
}
|
}
|
||||||
@ -55,10 +56,13 @@ type urlCache struct {
|
|||||||
func newURLCache(
|
func newURLCache(
|
||||||
driveID, prevDelta string,
|
driveID, prevDelta string,
|
||||||
refreshInterval time.Duration,
|
refreshInterval time.Duration,
|
||||||
edid EnumerateDriveItemsDeltaer,
|
itemPager api.DeltaPager[models.DriveItemable],
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (*urlCache, error) {
|
) (*urlCache, error) {
|
||||||
err := validateCacheParams(driveID, refreshInterval, edid)
|
err := validateCacheParams(
|
||||||
|
driveID,
|
||||||
|
refreshInterval,
|
||||||
|
itemPager)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "cache params")
|
return nil, clues.Wrap(err, "cache params")
|
||||||
}
|
}
|
||||||
@ -67,9 +71,9 @@ func newURLCache(
|
|||||||
idToProps: make(map[string]itemProps),
|
idToProps: make(map[string]itemProps),
|
||||||
lastRefreshTime: time.Time{},
|
lastRefreshTime: time.Time{},
|
||||||
driveID: driveID,
|
driveID: driveID,
|
||||||
edid: edid,
|
|
||||||
prevDelta: prevDelta,
|
prevDelta: prevDelta,
|
||||||
refreshInterval: refreshInterval,
|
refreshInterval: refreshInterval,
|
||||||
|
itemPager: itemPager,
|
||||||
errs: errs,
|
errs: errs,
|
||||||
},
|
},
|
||||||
nil
|
nil
|
||||||
@ -79,7 +83,7 @@ func newURLCache(
|
|||||||
func validateCacheParams(
|
func validateCacheParams(
|
||||||
driveID string,
|
driveID string,
|
||||||
refreshInterval time.Duration,
|
refreshInterval time.Duration,
|
||||||
edid EnumerateDriveItemsDeltaer,
|
itemPager api.DeltaPager[models.DriveItemable],
|
||||||
) error {
|
) error {
|
||||||
if len(driveID) == 0 {
|
if len(driveID) == 0 {
|
||||||
return clues.New("drive id is empty")
|
return clues.New("drive id is empty")
|
||||||
@ -89,8 +93,8 @@ func validateCacheParams(
|
|||||||
return clues.New("invalid refresh interval")
|
return clues.New("invalid refresh interval")
|
||||||
}
|
}
|
||||||
|
|
||||||
if edid == nil {
|
if itemPager == nil {
|
||||||
return clues.New("nil item enumerator")
|
return clues.New("nil item pager")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -156,23 +160,44 @@ func (uc *urlCache) refreshCache(
|
|||||||
// Issue a delta query to graph
|
// Issue a delta query to graph
|
||||||
logger.Ctx(ctx).Info("refreshing url cache")
|
logger.Ctx(ctx).Info("refreshing url cache")
|
||||||
|
|
||||||
items, du, err := uc.edid.EnumerateDriveItemsDelta(ctx, uc.driveID, uc.prevDelta)
|
err := uc.deltaQuery(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// clear cache
|
||||||
uc.idToProps = make(map[string]itemProps)
|
uc.idToProps = make(map[string]itemProps)
|
||||||
return clues.Stack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
uc.deltaQueryCount++
|
return err
|
||||||
|
|
||||||
if err := uc.updateCache(ctx, items, uc.errs); err != nil {
|
|
||||||
return clues.Stack(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Info("url cache refreshed")
|
logger.Ctx(ctx).Info("url cache refreshed")
|
||||||
|
|
||||||
// Update last refresh time
|
// Update last refresh time
|
||||||
uc.lastRefreshTime = time.Now()
|
uc.lastRefreshTime = time.Now()
|
||||||
uc.prevDelta = du.URL
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deltaQuery performs a delta query on the drive and update the cache
|
||||||
|
func (uc *urlCache) deltaQuery(
|
||||||
|
ctx context.Context,
|
||||||
|
) error {
|
||||||
|
logger.Ctx(ctx).Debug("starting delta query")
|
||||||
|
// Reset item pager to remove any previous state
|
||||||
|
uc.itemPager.Reset(ctx)
|
||||||
|
|
||||||
|
_, _, _, err := collectItems(
|
||||||
|
ctx,
|
||||||
|
uc.itemPager,
|
||||||
|
uc.driveID,
|
||||||
|
"",
|
||||||
|
uc.updateCache,
|
||||||
|
map[string]string{},
|
||||||
|
uc.prevDelta,
|
||||||
|
uc.errs)
|
||||||
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "delta query")
|
||||||
|
}
|
||||||
|
|
||||||
|
uc.deltaQueryCount++
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -199,7 +224,13 @@ func (uc *urlCache) readCache(
|
|||||||
// It assumes that cacheMu is held by caller in write mode
|
// It assumes that cacheMu is held by caller in write mode
|
||||||
func (uc *urlCache) updateCache(
|
func (uc *urlCache) updateCache(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
_, _ string,
|
||||||
items []models.DriveItemable,
|
items []models.DriveItemable,
|
||||||
|
_ map[string]string,
|
||||||
|
_ map[string]string,
|
||||||
|
_ map[string]struct{},
|
||||||
|
_ map[string]map[string]string,
|
||||||
|
_ bool,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
el := errs.Local()
|
el := errs.Local()
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -17,19 +18,15 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
|
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// integration
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type URLCacheIntegrationSuite struct {
|
type URLCacheIntegrationSuite struct {
|
||||||
tester.Suite
|
tester.Suite
|
||||||
ac api.Client
|
ac api.Client
|
||||||
@ -71,10 +68,11 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
|||||||
// url cache
|
// url cache
|
||||||
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
ac = suite.ac.Drives()
|
ac = suite.ac.Drives()
|
||||||
driveID = suite.driveID
|
driveID = suite.driveID
|
||||||
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
newFolderName = testdata.DefaultRestoreConfig("folder").Location
|
||||||
|
driveItemPager = suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectDefault())
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
@ -84,11 +82,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
root, err := ac.GetRootFolder(ctx, driveID)
|
root, err := ac.GetRootFolder(ctx, driveID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
newFolder, err := ac.PostItemInContainer(
|
newFolder, err := ac.Drives().PostItemInContainer(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
ptr.Val(root.GetId()),
|
ptr.Val(root.GetId()),
|
||||||
api.NewDriveItem(newFolderName, true),
|
newItem(newFolderName, true),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -96,10 +94,33 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
|
|
||||||
nfid := ptr.Val(newFolder.GetId())
|
nfid := ptr.Val(newFolder.GetId())
|
||||||
|
|
||||||
|
collectorFunc := func(
|
||||||
|
context.Context,
|
||||||
|
string,
|
||||||
|
string,
|
||||||
|
[]models.DriveItemable,
|
||||||
|
map[string]string,
|
||||||
|
map[string]string,
|
||||||
|
map[string]struct{},
|
||||||
|
map[string]map[string]string,
|
||||||
|
bool,
|
||||||
|
*fault.Bus,
|
||||||
|
) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Get the previous delta to feed into url cache
|
// Get the previous delta to feed into url cache
|
||||||
_, du, err := ac.EnumerateDriveItemsDelta(ctx, suite.driveID, "")
|
prevDelta, _, _, err := collectItems(
|
||||||
|
ctx,
|
||||||
|
suite.ac.Drives().NewDriveItemDeltaPager(driveID, "", api.DriveItemSelectURLCache()),
|
||||||
|
suite.driveID,
|
||||||
|
"drive-name",
|
||||||
|
collectorFunc,
|
||||||
|
map[string]string{},
|
||||||
|
"",
|
||||||
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotEmpty(t, du.URL)
|
require.NotNil(t, prevDelta.URL)
|
||||||
|
|
||||||
// Create a bunch of files in the new folder
|
// Create a bunch of files in the new folder
|
||||||
var items []models.DriveItemable
|
var items []models.DriveItemable
|
||||||
@ -107,11 +128,11 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||||
|
|
||||||
item, err := ac.PostItemInContainer(
|
item, err := ac.Drives().PostItemInContainer(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
nfid,
|
nfid,
|
||||||
api.NewDriveItem(newItemName, false),
|
newItem(newItemName, false),
|
||||||
control.Copy)
|
control.Copy)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -121,9 +142,9 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
// Create a new URL cache with a long TTL
|
// Create a new URL cache with a long TTL
|
||||||
uc, err := newURLCache(
|
uc, err := newURLCache(
|
||||||
suite.driveID,
|
suite.driveID,
|
||||||
du.URL,
|
prevDelta.URL,
|
||||||
1*time.Hour,
|
1*time.Hour,
|
||||||
suite.ac.Drives(),
|
driveItemPager,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -174,10 +195,6 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
require.Equal(t, 1, uc.deltaQueryCount)
|
require.Equal(t, 1, uc.deltaQueryCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// unit
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type URLCacheUnitSuite struct {
|
type URLCacheUnitSuite struct {
|
||||||
tester.Suite
|
tester.Suite
|
||||||
}
|
}
|
||||||
@ -188,20 +205,27 @@ func TestURLCacheUnitSuite(t *testing.T) {
|
|||||||
|
|
||||||
func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
||||||
deltaString := "delta"
|
deltaString := "delta"
|
||||||
|
next := "next"
|
||||||
driveID := "drive1"
|
driveID := "drive1"
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
pagerItems map[string][]models.DriveItemable
|
pagerResult map[string][]apiMock.PagerResult[models.DriveItemable]
|
||||||
pagerErr map[string]error
|
|
||||||
expectedItemProps map[string]itemProps
|
expectedItemProps map[string]itemProps
|
||||||
expectedErr require.ErrorAssertionFunc
|
expectedErr require.ErrorAssertionFunc
|
||||||
cacheAssert func(*urlCache, time.Time)
|
cacheAssert func(*urlCache, time.Time)
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "single item in cache",
|
name: "single item in cache",
|
||||||
pagerItems: map[string][]models.DriveItemable{
|
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
driveID: {
|
||||||
|
{
|
||||||
|
Values: []models.DriveItemable{
|
||||||
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
|
},
|
||||||
|
DeltaLink: &deltaString,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
"1": {
|
"1": {
|
||||||
@ -218,13 +242,18 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "multiple items in cache",
|
name: "multiple items in cache",
|
||||||
pagerItems: map[string][]models.DriveItemable{
|
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID: {
|
driveID: {
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
{
|
||||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
Values: []models.DriveItemable{
|
||||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||||
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||||
|
fileItem("4", "file4", "root", "root", "https://dummy4.com", false),
|
||||||
|
fileItem("5", "file5", "root", "root", "https://dummy5.com", false),
|
||||||
|
},
|
||||||
|
DeltaLink: &deltaString,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -258,13 +287,18 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "duplicate items with potentially new urls",
|
name: "duplicate items with potentially new urls",
|
||||||
pagerItems: map[string][]models.DriveItemable{
|
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID: {
|
driveID: {
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
{
|
||||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
Values: []models.DriveItemable{
|
||||||
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||||
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
fileItem("3", "file3", "root", "root", "https://dummy3.com", false),
|
||||||
|
fileItem("1", "file1", "root", "root", "https://test1.com", false),
|
||||||
|
fileItem("2", "file2", "root", "root", "https://test2.com", false),
|
||||||
|
},
|
||||||
|
DeltaLink: &deltaString,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -290,11 +324,16 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deleted items",
|
name: "deleted items",
|
||||||
pagerItems: map[string][]models.DriveItemable{
|
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID: {
|
driveID: {
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
{
|
||||||
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
Values: []models.DriveItemable{
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
|
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||||
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", true),
|
||||||
|
},
|
||||||
|
DeltaLink: &deltaString,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -316,8 +355,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "item not found in cache",
|
name: "item not found in cache",
|
||||||
pagerItems: map[string][]models.DriveItemable{
|
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID: {fileItem("1", "file1", "root", "root", "https://dummy1.com", false)},
|
driveID: {
|
||||||
|
{
|
||||||
|
Values: []models.DriveItemable{
|
||||||
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
|
},
|
||||||
|
DeltaLink: &deltaString,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
"2": {},
|
"2": {},
|
||||||
@ -330,10 +376,23 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delta query error",
|
name: "multi-page delta query error",
|
||||||
pagerItems: map[string][]models.DriveItemable{},
|
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
pagerErr: map[string]error{
|
driveID: {
|
||||||
driveID: errors.New("delta query error"),
|
{
|
||||||
|
Values: []models.DriveItemable{
|
||||||
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
|
},
|
||||||
|
NextLink: &next,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Values: []models.DriveItemable{
|
||||||
|
fileItem("2", "file2", "root", "root", "https://dummy2.com", false),
|
||||||
|
},
|
||||||
|
DeltaLink: &deltaString,
|
||||||
|
Err: errors.New("delta query error"),
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
"1": {},
|
"1": {},
|
||||||
@ -349,10 +408,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
name: "folder item",
|
name: "folder item",
|
||||||
pagerItems: map[string][]models.DriveItemable{
|
pagerResult: map[string][]apiMock.PagerResult[models.DriveItemable]{
|
||||||
driveID: {
|
driveID: {
|
||||||
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
{
|
||||||
driveItem("2", "folder2", "root", "root", false, true, false),
|
Values: []models.DriveItemable{
|
||||||
|
fileItem("1", "file1", "root", "root", "https://dummy1.com", false),
|
||||||
|
driveItem("2", "folder2", "root", "root", false, true, false),
|
||||||
|
},
|
||||||
|
DeltaLink: &deltaString,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedItemProps: map[string]itemProps{
|
expectedItemProps: map[string]itemProps{
|
||||||
@ -373,17 +437,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
medi := mock.EnumeratesDriveItemsDelta{
|
itemPager := &apiMock.DeltaPager[models.DriveItemable]{
|
||||||
Items: test.pagerItems,
|
ToReturn: test.pagerResult[driveID],
|
||||||
Err: test.pagerErr,
|
|
||||||
DeltaUpdate: map[string]api.DeltaUpdate{driveID: {URL: deltaString}},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, err := newURLCache(
|
cache, err := newURLCache(
|
||||||
driveID,
|
driveID,
|
||||||
"",
|
"",
|
||||||
1*time.Hour,
|
1*time.Hour,
|
||||||
&medi,
|
itemPager,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||||
@ -418,17 +480,15 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
|
|
||||||
// Test needsRefresh
|
// Test needsRefresh
|
||||||
func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
||||||
var (
|
driveID := "drive1"
|
||||||
t = suite.T()
|
t := suite.T()
|
||||||
driveID = "drive1"
|
refreshInterval := 1 * time.Second
|
||||||
refreshInterval = 1 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
cache, err := newURLCache(
|
cache, err := newURLCache(
|
||||||
driveID,
|
driveID,
|
||||||
"",
|
"",
|
||||||
refreshInterval,
|
refreshInterval,
|
||||||
&mock.EnumeratesDriveItemsDelta{},
|
&apiMock.DeltaPager[models.DriveItemable]{},
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
@ -450,12 +510,14 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
|||||||
require.False(t, cache.needsRefresh())
|
require.False(t, cache.needsRefresh())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test newURLCache
|
||||||
func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
||||||
|
// table driven tests
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
driveID string
|
driveID string
|
||||||
refreshInt time.Duration
|
refreshInt time.Duration
|
||||||
itemPager EnumerateDriveItemsDeltaer
|
itemPager api.DeltaPager[models.DriveItemable]
|
||||||
errors *fault.Bus
|
errors *fault.Bus
|
||||||
expectedErr require.ErrorAssertionFunc
|
expectedErr require.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
@ -463,7 +525,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
|||||||
name: "invalid driveID",
|
name: "invalid driveID",
|
||||||
driveID: "",
|
driveID: "",
|
||||||
refreshInt: 1 * time.Hour,
|
refreshInt: 1 * time.Hour,
|
||||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||||
errors: fault.New(true),
|
errors: fault.New(true),
|
||||||
expectedErr: require.Error,
|
expectedErr: require.Error,
|
||||||
},
|
},
|
||||||
@ -471,12 +533,12 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
|||||||
name: "invalid refresh interval",
|
name: "invalid refresh interval",
|
||||||
driveID: "drive1",
|
driveID: "drive1",
|
||||||
refreshInt: 100 * time.Millisecond,
|
refreshInt: 100 * time.Millisecond,
|
||||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||||
errors: fault.New(true),
|
errors: fault.New(true),
|
||||||
expectedErr: require.Error,
|
expectedErr: require.Error,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid item enumerator",
|
name: "invalid itemPager",
|
||||||
driveID: "drive1",
|
driveID: "drive1",
|
||||||
refreshInt: 1 * time.Hour,
|
refreshInt: 1 * time.Hour,
|
||||||
itemPager: nil,
|
itemPager: nil,
|
||||||
@ -487,7 +549,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
|||||||
name: "valid",
|
name: "valid",
|
||||||
driveID: "drive1",
|
driveID: "drive1",
|
||||||
refreshInt: 1 * time.Hour,
|
refreshInt: 1 * time.Hour,
|
||||||
itemPager: &mock.EnumeratesDriveItemsDelta{},
|
itemPager: &apiMock.DeltaPager[models.DriveItemable]{},
|
||||||
errors: fault.New(true),
|
errors: fault.New(true),
|
||||||
expectedErr: require.NoError,
|
expectedErr: require.NoError,
|
||||||
},
|
},
|
||||||
|
|||||||
@ -15,7 +15,9 @@ import (
|
|||||||
|
|
||||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
dataMock "github.com/alcionai/corso/src/internal/data/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
@ -322,7 +324,7 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
||||||
data.NoFetchRestoreCollection{Collection: coll},
|
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: coll}),
|
||||||
})
|
})
|
||||||
test.expectError(t, err, clues.ToCore(err))
|
test.expectError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -591,7 +593,7 @@ func (suite *BackupIntgSuite) TestDelta() {
|
|||||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||||
|
|
||||||
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
||||||
data.NoFetchRestoreCollection{Collection: metadata},
|
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: metadata}),
|
||||||
})
|
})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
@ -666,7 +668,12 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() {
|
|||||||
for stream := range streamChannel {
|
for stream := range streamChannel {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
|
|
||||||
read, err := buf.ReadFrom(stream.ToReader())
|
rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
|
||||||
|
read, err := buf.ReadFrom(rr)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
assert.NotZero(t, read)
|
assert.NotZero(t, read)
|
||||||
|
|
||||||
@ -744,7 +751,13 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() {
|
|||||||
|
|
||||||
for stream := range edc.Items(ctx, fault.New(true)) {
|
for stream := range edc.Items(ctx, fault.New(true)) {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
read, err := buf.ReadFrom(stream.ToReader())
|
|
||||||
|
rr, err := readers.NewVersionedRestoreReader(stream.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
|
||||||
|
read, err := buf.ReadFrom(rr)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
assert.NotZero(t, read)
|
assert.NotZero(t, read)
|
||||||
|
|
||||||
@ -878,7 +891,12 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() {
|
|||||||
for item := range edc.Items(ctx, fault.New(true)) {
|
for item := range edc.Items(ctx, fault.New(true)) {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
|
|
||||||
read, err := buf.ReadFrom(item.ToReader())
|
rr, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
|
||||||
|
read, err := buf.ReadFrom(rr)
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
assert.NotZero(t, read)
|
assert.NotZero(t, read)
|
||||||
|
|
||||||
@ -1198,7 +1216,9 @@ func checkMetadata(
|
|||||||
) {
|
) {
|
||||||
catPaths, _, err := ParseMetadataCollections(
|
catPaths, _, err := ParseMetadataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
[]data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}})
|
[]data.RestoreCollection{
|
||||||
|
dataMock.NewUnversionedRestoreCollection(t, data.NoFetchRestoreCollection{Collection: c}),
|
||||||
|
})
|
||||||
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
|
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@ -278,10 +278,21 @@ func (col *prefetchCollection) streamItems(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
stream <- data.NewPrefetchedItem(
|
item, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(itemData)),
|
io.NopCloser(bytes.NewReader(itemData)),
|
||||||
id,
|
id,
|
||||||
details.ItemInfo{Exchange: info})
|
details.ItemInfo{Exchange: info})
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(
|
||||||
|
ctx,
|
||||||
|
clues.Stack(err).
|
||||||
|
WithClues(ctx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stream <- item
|
||||||
|
|
||||||
atomic.AddInt64(&success, 1)
|
atomic.AddInt64(&success, 1)
|
||||||
atomic.AddInt64(&totalBytes, info.Size)
|
atomic.AddInt64(&totalBytes, info.Size)
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import (
|
|||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/exchange/mock"
|
"github.com/alcionai/corso/src/internal/m365/collection/exchange/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
@ -55,13 +56,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
ed := data.NewPrefetchedItem(
|
ed, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(test.readData)),
|
io.NopCloser(bytes.NewReader(test.readData)),
|
||||||
"itemID",
|
"itemID",
|
||||||
details.ItemInfo{})
|
details.ItemInfo{})
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||||
|
assert.False(t, r.Format().DelInFlight)
|
||||||
|
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
_, err := buf.ReadFrom(ed.ToReader())
|
_, err = buf.ReadFrom(r)
|
||||||
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
||||||
assert.Equal(t, test.readData, buf.Bytes(), "read data")
|
assert.Equal(t, test.readData, buf.Bytes(), "read data")
|
||||||
assert.Equal(t, "itemID", ed.ID(), "item ID")
|
assert.Equal(t, "itemID", ed.ID(), "item ID")
|
||||||
@ -493,11 +501,11 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() {
|
|||||||
time.Now(),
|
time.Now(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
_, err := li.(data.ItemInfo).Info()
|
_, err := li.Info()
|
||||||
assert.Error(suite.T(), err, "Info without reading data should error")
|
assert.Error(suite.T(), err, "Info without reading data should error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *CollectionUnitSuite) TestLazyItem() {
|
func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() {
|
||||||
var (
|
var (
|
||||||
parentPath = "inbox/private/silly cats"
|
parentPath = "inbox/private/silly cats"
|
||||||
now = time.Now()
|
now = time.Now()
|
||||||
@ -505,44 +513,19 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
|
|||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
modTime time.Time
|
|
||||||
getErr error
|
getErr error
|
||||||
serializeErr error
|
serializeErr error
|
||||||
expectModTime time.Time
|
|
||||||
expectReadErrType error
|
expectReadErrType error
|
||||||
dataCheck assert.ValueAssertionFunc
|
|
||||||
expectInfoErr bool
|
|
||||||
expectInfoErrType error
|
|
||||||
}{
|
}{
|
||||||
{
|
|
||||||
name: "ReturnsEmptyReaderOnDeletedInFlight",
|
|
||||||
modTime: now,
|
|
||||||
getErr: graph.ErrDeletedInFlight,
|
|
||||||
dataCheck: assert.Empty,
|
|
||||||
expectInfoErr: true,
|
|
||||||
expectInfoErrType: data.ErrNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ReturnsValidReaderAndInfo",
|
|
||||||
modTime: now,
|
|
||||||
dataCheck: assert.NotEmpty,
|
|
||||||
expectModTime: now,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "ReturnsErrorOnGenericGetError",
|
name: "ReturnsErrorOnGenericGetError",
|
||||||
modTime: now,
|
|
||||||
getErr: assert.AnError,
|
getErr: assert.AnError,
|
||||||
expectReadErrType: assert.AnError,
|
expectReadErrType: assert.AnError,
|
||||||
dataCheck: assert.Empty,
|
|
||||||
expectInfoErr: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ReturnsErrorOnGenericSerializeError",
|
name: "ReturnsErrorOnGenericSerializeError",
|
||||||
modTime: now,
|
|
||||||
serializeErr: assert.AnError,
|
serializeErr: assert.AnError,
|
||||||
expectReadErrType: assert.AnError,
|
expectReadErrType: assert.AnError,
|
||||||
dataCheck: assert.Empty,
|
|
||||||
expectInfoErr: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,47 +558,128 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
|
|||||||
userID: "userID",
|
userID: "userID",
|
||||||
itemID: "itemID",
|
itemID: "itemID",
|
||||||
getter: getter,
|
getter: getter,
|
||||||
modTime: test.modTime,
|
modTime: now,
|
||||||
immutableIDs: false,
|
immutableIDs: false,
|
||||||
parentPath: parentPath,
|
parentPath: parentPath,
|
||||||
},
|
},
|
||||||
"itemID",
|
"itemID",
|
||||||
test.modTime,
|
now,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||||
assert.Equal(
|
assert.Equal(t, now, li.ModTime(), "item mod time")
|
||||||
t,
|
|
||||||
test.modTime,
|
|
||||||
li.(data.ItemModTime).ModTime(),
|
|
||||||
"item mod time")
|
|
||||||
|
|
||||||
readData, err := io.ReadAll(li.ToReader())
|
_, err := readers.NewVersionedRestoreReader(li.ToReader())
|
||||||
if test.expectReadErrType == nil {
|
assert.ErrorIs(t, err, test.expectReadErrType)
|
||||||
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
|
|
||||||
} else {
|
|
||||||
assert.ErrorIs(t, err, test.expectReadErrType, "read error")
|
|
||||||
}
|
|
||||||
|
|
||||||
test.dataCheck(t, readData, "read item data")
|
|
||||||
|
|
||||||
info, err := li.(data.ItemInfo).Info()
|
|
||||||
|
|
||||||
// Didn't expect an error getting info, it should be valid.
|
|
||||||
if !test.expectInfoErr {
|
|
||||||
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
|
|
||||||
assert.Equal(t, parentPath, info.Exchange.ParentPath)
|
|
||||||
assert.Equal(t, test.expectModTime, info.Modified())
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should get some form of error when trying to get info.
|
// Should get some form of error when trying to get info.
|
||||||
|
_, err = li.Info()
|
||||||
assert.Error(t, err, "Info()")
|
assert.Error(t, err, "Info()")
|
||||||
|
|
||||||
if test.expectInfoErrType != nil {
|
|
||||||
assert.ErrorIs(t, err, test.expectInfoErrType, "Info() error")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlight() {
|
||||||
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
|
||||||
|
parentPath = "inbox/private/silly cats"
|
||||||
|
now = time.Now()
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
getter := &mock.ItemGetSerialize{GetErr: graph.ErrDeletedInFlight}
|
||||||
|
|
||||||
|
li := data.NewLazyItem(
|
||||||
|
ctx,
|
||||||
|
&lazyItemGetter{
|
||||||
|
userID: "userID",
|
||||||
|
itemID: "itemID",
|
||||||
|
getter: getter,
|
||||||
|
modTime: now,
|
||||||
|
immutableIDs: false,
|
||||||
|
parentPath: parentPath,
|
||||||
|
},
|
||||||
|
"itemID",
|
||||||
|
now,
|
||||||
|
fault.New(true))
|
||||||
|
|
||||||
|
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
now,
|
||||||
|
li.ModTime(),
|
||||||
|
"item mod time")
|
||||||
|
|
||||||
|
r, err := readers.NewVersionedRestoreReader(li.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||||
|
assert.True(t, r.Format().DelInFlight)
|
||||||
|
|
||||||
|
readData, err := io.ReadAll(r)
|
||||||
|
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Empty(t, readData, "read item data")
|
||||||
|
|
||||||
|
_, err = li.Info()
|
||||||
|
assert.ErrorIs(t, err, data.ErrNotFound, "Info() error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *CollectionUnitSuite) TestLazyItem() {
|
||||||
|
var (
|
||||||
|
t = suite.T()
|
||||||
|
|
||||||
|
parentPath = "inbox/private/silly cats"
|
||||||
|
now = time.Now()
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
// Exact data type doesn't really matter.
|
||||||
|
testData := models.NewMessage()
|
||||||
|
testData.SetSubject(ptr.To("hello world"))
|
||||||
|
|
||||||
|
getter := &mock.ItemGetSerialize{GetData: testData}
|
||||||
|
|
||||||
|
li := data.NewLazyItem(
|
||||||
|
ctx,
|
||||||
|
&lazyItemGetter{
|
||||||
|
userID: "userID",
|
||||||
|
itemID: "itemID",
|
||||||
|
getter: getter,
|
||||||
|
modTime: now,
|
||||||
|
immutableIDs: false,
|
||||||
|
parentPath: parentPath,
|
||||||
|
},
|
||||||
|
"itemID",
|
||||||
|
now,
|
||||||
|
fault.New(true))
|
||||||
|
|
||||||
|
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
now,
|
||||||
|
li.ModTime(),
|
||||||
|
"item mod time")
|
||||||
|
|
||||||
|
r, err := readers.NewVersionedRestoreReader(li.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||||
|
assert.False(t, r.Format().DelInFlight)
|
||||||
|
|
||||||
|
readData, err := io.ReadAll(r)
|
||||||
|
assert.NoError(t, err, "reading item data: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.NotEmpty(t, readData, "read item data")
|
||||||
|
|
||||||
|
info, err := li.Info()
|
||||||
|
assert.NoError(t, err, "getting item info: %v", clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, parentPath, info.Exchange.ParentPath)
|
||||||
|
assert.Equal(t, now, info.Modified())
|
||||||
|
}
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package groups
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -526,6 +527,8 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
|||||||
|
|
||||||
require.NotEmpty(t, c.FullPath().Folder(false))
|
require.NotEmpty(t, c.FullPath().Folder(false))
|
||||||
|
|
||||||
|
fmt.Printf("\n-----\nfolder %+v\n-----\n", c.FullPath().Folder(false))
|
||||||
|
|
||||||
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
// TODO(ashmrtn): Remove when LocationPath is made part of BackupCollection
|
||||||
// interface.
|
// interface.
|
||||||
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
if !assert.Implements(t, (*data.LocationPather)(nil), c) {
|
||||||
@ -534,6 +537,8 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
|||||||
|
|
||||||
loc := c.(data.LocationPather).LocationPath().String()
|
loc := c.(data.LocationPather).LocationPath().String()
|
||||||
|
|
||||||
|
fmt.Printf("\n-----\nloc %+v\n-----\n", c.(data.LocationPather).LocationPath().String())
|
||||||
|
|
||||||
require.NotEmpty(t, loc)
|
require.NotEmpty(t, loc)
|
||||||
|
|
||||||
delete(test.channelNames, loc)
|
delete(test.channelNames, loc)
|
||||||
|
|||||||
@ -67,6 +67,15 @@ func (bh channelsBackupHandler) canonicalPath(
|
|||||||
false)
|
false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bh channelsBackupHandler) PathPrefix(tenantID string) (path.Path, error) {
|
||||||
|
return path.Build(
|
||||||
|
tenantID,
|
||||||
|
bh.protectedResource,
|
||||||
|
path.GroupsService,
|
||||||
|
path.ChannelMessagesCategory,
|
||||||
|
false)
|
||||||
|
}
|
||||||
|
|
||||||
func (bh channelsBackupHandler) GetChannelMessage(
|
func (bh channelsBackupHandler) GetChannelMessage(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
teamID, channelID, itemID string,
|
teamID, channelID, itemID string,
|
||||||
|
|||||||
@ -150,27 +150,47 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
parentFolderID,
|
parentFolderID,
|
||||||
id)
|
id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer"))
|
el.AddRecoverable(
|
||||||
|
ctx,
|
||||||
|
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := writer.WriteObjectValue("", item); err != nil {
|
if err := writer.WriteObjectValue("", item); err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Wrap(err, "writing channel message to serializer"))
|
el.AddRecoverable(
|
||||||
|
ctx,
|
||||||
|
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
itemData, err := writer.GetSerializedContent()
|
itemData, err := writer.GetSerializedContent()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Wrap(err, "serializing channel message"))
|
el.AddRecoverable(
|
||||||
|
ctx,
|
||||||
|
clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info.ParentPath = col.LocationPath().String()
|
info.ParentPath = col.LocationPath().String()
|
||||||
|
|
||||||
col.stream <- data.NewPrefetchedItem(
|
storeItem, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(itemData)),
|
io.NopCloser(bytes.NewReader(itemData)),
|
||||||
id,
|
id,
|
||||||
details.ItemInfo{Groups: info})
|
details.ItemInfo{Groups: info})
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(
|
||||||
|
ctx,
|
||||||
|
clues.Stack(err).
|
||||||
|
WithClues(ctx).
|
||||||
|
Label(fault.LabelForceNoBackupCreation))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
col.stream <- storeItem
|
||||||
|
|
||||||
atomic.AddInt64(&streamedItems, 1)
|
atomic.AddInt64(&streamedItems, 1)
|
||||||
atomic.AddInt64(&totalBytes, info.Size)
|
atomic.AddInt64(&totalBytes, info.Size)
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/groups/mock"
|
"github.com/alcionai/corso/src/internal/m365/collection/groups/mock"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
@ -48,13 +49,20 @@ func (suite *CollectionUnitSuite) TestPrefetchedItem_Reader() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
ed := data.NewPrefetchedItem(
|
ed, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(test.readData)),
|
io.NopCloser(bytes.NewReader(test.readData)),
|
||||||
"itemID",
|
"itemID",
|
||||||
details.ItemInfo{})
|
details.ItemInfo{})
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
r, err := readers.NewVersionedRestoreReader(ed.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, r.Format().Version)
|
||||||
|
assert.False(t, r.Format().DelInFlight)
|
||||||
|
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
_, err := buf.ReadFrom(ed.ToReader())
|
_, err = buf.ReadFrom(r)
|
||||||
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
assert.NoError(t, err, "reading data: %v", clues.ToCore(err))
|
||||||
assert.Equal(t, test.readData, buf.Bytes(), "read data")
|
assert.Equal(t, test.readData, buf.Bytes(), "read data")
|
||||||
assert.Equal(t, "itemID", ed.ID(), "item ID")
|
assert.Equal(t, "itemID", ed.ID(), "item ID")
|
||||||
|
|||||||
@ -211,11 +211,17 @@ func (sc *Collection) retrieveLists(
|
|||||||
metrics.Bytes += size
|
metrics.Bytes += size
|
||||||
|
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
sc.data <- data.NewPrefetchedItem(
|
|
||||||
|
item, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
ptr.Val(lst.GetId()),
|
ptr.Val(lst.GetId()),
|
||||||
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
|
details.ItemInfo{SharePoint: ListToSPInfo(lst, size)})
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sc.data <- item
|
||||||
progress <- struct{}{}
|
progress <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -272,11 +278,17 @@ func (sc *Collection) retrievePages(
|
|||||||
if size > 0 {
|
if size > 0 {
|
||||||
metrics.Bytes += size
|
metrics.Bytes += size
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
sc.data <- data.NewPrefetchedItem(
|
|
||||||
|
item, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
ptr.Val(pg.GetId()),
|
ptr.Val(pg.GetId()),
|
||||||
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
|
details.ItemInfo{SharePoint: pageToSPInfo(pg, root, size)})
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(ctx, clues.Stack(err).WithClues(ctx).Label(fault.LabelForceNoBackupCreation))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sc.data <- item
|
||||||
progress <- struct{}{}
|
progress <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -103,10 +103,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
|||||||
byteArray, err := ow.GetSerializedContent()
|
byteArray, err := ow.GetSerializedContent()
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
data := data.NewPrefetchedItem(
|
data, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
name,
|
name,
|
||||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return data
|
return data
|
||||||
},
|
},
|
||||||
@ -132,10 +133,11 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
|||||||
page, err := betaAPI.CreatePageFromBytes(byteArray)
|
page, err := betaAPI.CreatePageFromBytes(byteArray)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
data := data.NewPrefetchedItem(
|
data, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
itemName,
|
itemName,
|
||||||
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
|
details.ItemInfo{SharePoint: betaAPI.PageInfo(page, int64(len(byteArray)))})
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return data
|
return data
|
||||||
},
|
},
|
||||||
@ -194,10 +196,11 @@ func (suite *SharePointCollectionSuite) TestListCollection_Restore() {
|
|||||||
byteArray, err := service.Serialize(listing)
|
byteArray, err := service.Serialize(listing)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
listData := data.NewPrefetchedItem(
|
listData, err := data.NewPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
testName,
|
testName,
|
||||||
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
details.ItemInfo{SharePoint: ListToSPInfo(listing, int64(len(byteArray)))})
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
destName := testdata.DefaultRestoreConfig("").Location
|
destName := testdata.DefaultRestoreConfig("").Location
|
||||||
|
|
||||||
|
|||||||
@ -79,20 +79,29 @@ func NewController(
|
|||||||
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
rc := resource.UnknownResource
|
var rCli *resourceClient
|
||||||
|
|
||||||
switch pst {
|
// no failure for unknown service.
|
||||||
case path.ExchangeService, path.OneDriveService:
|
// In that case we create a controller that doesn't attempt to look up any resource
|
||||||
rc = resource.Users
|
// data. This case helps avoid unnecessary service calls when the end user is running
|
||||||
case path.GroupsService:
|
// repo init and connect commands via the CLI. All other callers should be expected
|
||||||
rc = resource.Groups
|
// to pass in a known service, or else expect downstream failures.
|
||||||
case path.SharePointService:
|
if pst != path.UnknownService {
|
||||||
rc = resource.Sites
|
rc := resource.UnknownResource
|
||||||
}
|
|
||||||
|
|
||||||
rCli, err := getResourceClient(rc, ac)
|
switch pst {
|
||||||
if err != nil {
|
case path.ExchangeService, path.OneDriveService:
|
||||||
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
|
rc = resource.Users
|
||||||
|
case path.GroupsService:
|
||||||
|
rc = resource.Groups
|
||||||
|
case path.SharePointService:
|
||||||
|
rc = resource.Sites
|
||||||
|
}
|
||||||
|
|
||||||
|
rCli, err = getResourceClient(rc, ac)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "creating resource client").WithClues(ctx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrl := Controller{
|
ctrl := Controller{
|
||||||
@ -110,6 +119,10 @@ func NewController(
|
|||||||
return &ctrl, nil
|
return &ctrl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ctrl *Controller) VerifyAccess(ctx context.Context) error {
|
||||||
|
return ctrl.AC.Access().GetToken(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Processing Status
|
// Processing Status
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -195,7 +208,7 @@ func getResourceClient(rc resource.Category, ac api.Client) (*resourceClient, er
|
|||||||
case resource.Groups:
|
case resource.Groups:
|
||||||
return &resourceClient{enum: rc, getter: ac.Groups()}, nil
|
return &resourceClient{enum: rc, getter: ac.Groups()}, nil
|
||||||
default:
|
default:
|
||||||
return nil, clues.New("unrecognized owner resource enum").With("resource_enum", rc)
|
return nil, clues.New("unrecognized owner resource type").With("resource_enum", rc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -861,7 +861,7 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MultipleContactsSingleFolder",
|
name: "MultipleContactsInRestoreFolder",
|
||||||
service: path.ExchangeService,
|
service: path.ExchangeService,
|
||||||
collections: []stub.ColInfo{
|
collections: []stub.ColInfo{
|
||||||
{
|
{
|
||||||
@ -887,49 +887,77 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
// TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
|
||||||
name: "MultipleContactsMultipleFolders",
|
//{
|
||||||
service: path.ExchangeService,
|
// name: "MultipleContactsSingleFolder",
|
||||||
collections: []stub.ColInfo{
|
// service: path.ExchangeService,
|
||||||
{
|
// collections: []stub.ColInfo{
|
||||||
PathElements: []string{"Work"},
|
// {
|
||||||
Category: path.ContactsCategory,
|
// PathElements: []string{"Contacts"},
|
||||||
Items: []stub.ItemInfo{
|
// Category: path.ContactsCategory,
|
||||||
{
|
// Items: []stub.ItemInfo{
|
||||||
Name: "someencodeditemID",
|
// {
|
||||||
Data: exchMock.ContactBytes("Ghimley"),
|
// Name: "someencodeditemID",
|
||||||
LookupKey: "Ghimley",
|
// Data: exchMock.ContactBytes("Ghimley"),
|
||||||
},
|
// LookupKey: "Ghimley",
|
||||||
{
|
// },
|
||||||
Name: "someencodeditemID2",
|
// {
|
||||||
Data: exchMock.ContactBytes("Irgot"),
|
// Name: "someencodeditemID2",
|
||||||
LookupKey: "Irgot",
|
// Data: exchMock.ContactBytes("Irgot"),
|
||||||
},
|
// LookupKey: "Irgot",
|
||||||
{
|
// },
|
||||||
Name: "someencodeditemID3",
|
// {
|
||||||
Data: exchMock.ContactBytes("Jannes"),
|
// Name: "someencodeditemID3",
|
||||||
LookupKey: "Jannes",
|
// Data: exchMock.ContactBytes("Jannes"),
|
||||||
},
|
// LookupKey: "Jannes",
|
||||||
},
|
// },
|
||||||
},
|
// },
|
||||||
{
|
// },
|
||||||
PathElements: []string{"Personal"},
|
// },
|
||||||
Category: path.ContactsCategory,
|
//},
|
||||||
Items: []stub.ItemInfo{
|
//{
|
||||||
{
|
// name: "MultipleContactsMultipleFolders",
|
||||||
Name: "someencodeditemID4",
|
// service: path.ExchangeService,
|
||||||
Data: exchMock.ContactBytes("Argon"),
|
// collections: []stub.ColInfo{
|
||||||
LookupKey: "Argon",
|
// {
|
||||||
},
|
// PathElements: []string{"Work"},
|
||||||
{
|
// Category: path.ContactsCategory,
|
||||||
Name: "someencodeditemID5",
|
// Items: []stub.ItemInfo{
|
||||||
Data: exchMock.ContactBytes("Bernard"),
|
// {
|
||||||
LookupKey: "Bernard",
|
// Name: "someencodeditemID",
|
||||||
},
|
// Data: exchMock.ContactBytes("Ghimley"),
|
||||||
},
|
// LookupKey: "Ghimley",
|
||||||
},
|
// },
|
||||||
},
|
// {
|
||||||
},
|
// Name: "someencodeditemID2",
|
||||||
|
// Data: exchMock.ContactBytes("Irgot"),
|
||||||
|
// LookupKey: "Irgot",
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// Name: "someencodeditemID3",
|
||||||
|
// Data: exchMock.ContactBytes("Jannes"),
|
||||||
|
// LookupKey: "Jannes",
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// PathElements: []string{"Personal"},
|
||||||
|
// Category: path.ContactsCategory,
|
||||||
|
// Items: []stub.ItemInfo{
|
||||||
|
// {
|
||||||
|
// Name: "someencodeditemID4",
|
||||||
|
// Data: exchMock.ContactBytes("Argon"),
|
||||||
|
// LookupKey: "Argon",
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// Name: "someencodeditemID5",
|
||||||
|
// Data: exchMock.ContactBytes("Bernard"),
|
||||||
|
// LookupKey: "Bernard",
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
//},
|
||||||
// {
|
// {
|
||||||
// name: "MultipleEventsSingleCalendar",
|
// name: "MultipleEventsSingleCalendar",
|
||||||
// service: path.ExchangeService,
|
// service: path.ExchangeService,
|
||||||
@ -1017,34 +1045,35 @@ func (suite *ControllerIntegrationSuite) TestRestoreAndBackup_core() {
|
|||||||
|
|
||||||
func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
||||||
table := []restoreBackupInfo{
|
table := []restoreBackupInfo{
|
||||||
{
|
// TODO(ashmrtn): Re-enable when we can restore contacts to nested folders.
|
||||||
name: "Contacts",
|
//{
|
||||||
service: path.ExchangeService,
|
// name: "Contacts",
|
||||||
collections: []stub.ColInfo{
|
// service: path.ExchangeService,
|
||||||
{
|
// collections: []stub.ColInfo{
|
||||||
PathElements: []string{"Work"},
|
// {
|
||||||
Category: path.ContactsCategory,
|
// PathElements: []string{"Work"},
|
||||||
Items: []stub.ItemInfo{
|
// Category: path.ContactsCategory,
|
||||||
{
|
// Items: []stub.ItemInfo{
|
||||||
Name: "someencodeditemID",
|
// {
|
||||||
Data: exchMock.ContactBytes("Ghimley"),
|
// Name: "someencodeditemID",
|
||||||
LookupKey: "Ghimley",
|
// Data: exchMock.ContactBytes("Ghimley"),
|
||||||
},
|
// LookupKey: "Ghimley",
|
||||||
},
|
// },
|
||||||
},
|
// },
|
||||||
{
|
// },
|
||||||
PathElements: []string{"Personal"},
|
// {
|
||||||
Category: path.ContactsCategory,
|
// PathElements: []string{"Personal"},
|
||||||
Items: []stub.ItemInfo{
|
// Category: path.ContactsCategory,
|
||||||
{
|
// Items: []stub.ItemInfo{
|
||||||
Name: "someencodeditemID2",
|
// {
|
||||||
Data: exchMock.ContactBytes("Irgot"),
|
// Name: "someencodeditemID2",
|
||||||
LookupKey: "Irgot",
|
// Data: exchMock.ContactBytes("Irgot"),
|
||||||
},
|
// LookupKey: "Irgot",
|
||||||
},
|
// },
|
||||||
},
|
// },
|
||||||
},
|
// },
|
||||||
},
|
// },
|
||||||
|
//},
|
||||||
// {
|
// {
|
||||||
// name: "Events",
|
// name: "Events",
|
||||||
// service: path.ExchangeService,
|
// service: path.ExchangeService,
|
||||||
|
|||||||
@ -70,6 +70,7 @@ const (
|
|||||||
NoSPLicense errorMessage = "Tenant does not have a SPO license"
|
NoSPLicense errorMessage = "Tenant does not have a SPO license"
|
||||||
parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request"
|
parameterDeltaTokenNotSupported errorMessage = "Parameter 'DeltaToken' not supported for this request"
|
||||||
usersCannotBeResolved errorMessage = "One or more users could not be resolved"
|
usersCannotBeResolved errorMessage = "One or more users could not be resolved"
|
||||||
|
requestedSiteCouldNotBeFound errorMessage = "Requested site could not be found"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -259,6 +260,10 @@ func IsErrUsersCannotBeResolved(err error) bool {
|
|||||||
return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved)
|
return hasErrorCode(err, noResolvedUsers) || hasErrorMessage(err, usersCannotBeResolved)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsErrSiteNotFound(err error) bool {
|
||||||
|
return hasErrorMessage(err, requestedSiteCouldNotBeFound)
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// error parsers
|
// error parsers
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -628,6 +628,51 @@ func (suite *GraphErrorsUnitSuite) TestIsErrUsersCannotBeResolved() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *GraphErrorsUnitSuite) TestIsErrSiteCouldNotBeFound() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
expect assert.BoolAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
err: nil,
|
||||||
|
expect: assert.False,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-matching",
|
||||||
|
err: assert.AnError,
|
||||||
|
expect: assert.False,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-matching oDataErr",
|
||||||
|
err: odErrMsg("InvalidRequest", "cant resolve sites"),
|
||||||
|
expect: assert.False,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "matching oDataErr msg",
|
||||||
|
err: odErrMsg("InvalidRequest", string(requestedSiteCouldNotBeFound)),
|
||||||
|
expect: assert.True,
|
||||||
|
},
|
||||||
|
// next two tests are to make sure the checks are case insensitive
|
||||||
|
{
|
||||||
|
name: "oDataErr uppercase",
|
||||||
|
err: odErrMsg("InvalidRequest", strings.ToUpper(string(requestedSiteCouldNotBeFound))),
|
||||||
|
expect: assert.True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "oDataErr lowercase",
|
||||||
|
err: odErrMsg("InvalidRequest", strings.ToLower(string(requestedSiteCouldNotBeFound))),
|
||||||
|
expect: assert.True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
test.expect(suite.T(), IsErrSiteNotFound(test.err))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() {
|
func (suite *GraphErrorsUnitSuite) TestIsErrCannotOpenFileAttachment() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
|
|||||||
@ -57,11 +57,16 @@ func (mce MetadataCollectionEntry) toMetadataItem() (metadataItem, error) {
|
|||||||
return metadataItem{}, clues.Wrap(err, "serializing metadata")
|
return metadataItem{}, clues.Wrap(err, "serializing metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
item, err := data.NewUnindexedPrefetchedItem(
|
||||||
|
io.NopCloser(buf),
|
||||||
|
mce.fileName,
|
||||||
|
time.Now())
|
||||||
|
if err != nil {
|
||||||
|
return metadataItem{}, clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
return metadataItem{
|
return metadataItem{
|
||||||
Item: data.NewUnindexedPrefetchedItem(
|
Item: item,
|
||||||
io.NopCloser(buf),
|
|
||||||
mce.fileName,
|
|
||||||
time.Now()),
|
|
||||||
size: int64(buf.Len()),
|
size: int64(buf.Len()),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
@ -69,13 +70,16 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
|||||||
items := []metadataItem{}
|
items := []metadataItem{}
|
||||||
|
|
||||||
for i := 0; i < len(itemNames); i++ {
|
for i := 0; i < len(itemNames); i++ {
|
||||||
|
item, err := data.NewUnindexedPrefetchedItem(
|
||||||
|
io.NopCloser(bytes.NewReader(itemData[i])),
|
||||||
|
itemNames[i],
|
||||||
|
time.Time{})
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
items = append(
|
items = append(
|
||||||
items,
|
items,
|
||||||
metadataItem{
|
metadataItem{
|
||||||
Item: data.NewUnindexedPrefetchedItem(
|
Item: item,
|
||||||
io.NopCloser(bytes.NewReader(itemData[i])),
|
|
||||||
itemNames[i],
|
|
||||||
time.Time{}),
|
|
||||||
size: int64(len(itemData[i])),
|
size: int64(len(itemData[i])),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -103,7 +107,13 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
|||||||
for s := range c.Items(ctx, fault.New(true)) {
|
for s := range c.Items(ctx, fault.New(true)) {
|
||||||
gotNames = append(gotNames, s.ID())
|
gotNames = append(gotNames, s.ID())
|
||||||
|
|
||||||
buf, err := io.ReadAll(s.ToReader())
|
rr, err := readers.NewVersionedRestoreReader(s.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
assert.False(t, rr.Format().DelInFlight)
|
||||||
|
|
||||||
|
buf, err := io.ReadAll(rr)
|
||||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -204,11 +214,17 @@ func (suite *MetadataCollectionUnitSuite) TestMakeMetadataCollection() {
|
|||||||
for item := range col.Items(ctx, fault.New(true)) {
|
for item := range col.Items(ctx, fault.New(true)) {
|
||||||
assert.Equal(t, test.metadata.fileName, item.ID())
|
assert.Equal(t, test.metadata.fileName, item.ID())
|
||||||
|
|
||||||
|
rr, err := readers.NewVersionedRestoreReader(item.ToReader())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version)
|
||||||
|
assert.False(t, rr.Format().DelInFlight)
|
||||||
|
|
||||||
gotMap := map[string]string{}
|
gotMap := map[string]string{}
|
||||||
decoder := json.NewDecoder(item.ToReader())
|
decoder := json.NewDecoder(rr)
|
||||||
itemCount++
|
itemCount++
|
||||||
|
|
||||||
err := decoder.Decode(&gotMap)
|
err = decoder.Decode(&gotMap)
|
||||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,6 +16,7 @@ import (
|
|||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
|
odStub "github.com/alcionai/corso/src/internal/m365/service/onedrive/stub"
|
||||||
@ -573,7 +574,12 @@ func compareExchangeEmail(
|
|||||||
expected map[string][]byte,
|
expected map[string][]byte,
|
||||||
item data.Item,
|
item data.Item,
|
||||||
) {
|
) {
|
||||||
itemData, err := io.ReadAll(item.ToReader())
|
rr := versionedReadWrapper(t, item.ToReader())
|
||||||
|
if rr == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
itemData, err := io.ReadAll(rr)
|
||||||
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -600,7 +606,12 @@ func compareExchangeContact(
|
|||||||
expected map[string][]byte,
|
expected map[string][]byte,
|
||||||
item data.Item,
|
item data.Item,
|
||||||
) {
|
) {
|
||||||
itemData, err := io.ReadAll(item.ToReader())
|
rr := versionedReadWrapper(t, item.ToReader())
|
||||||
|
if rr == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
itemData, err := io.ReadAll(rr)
|
||||||
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -628,7 +639,12 @@ func compareExchangeEvent(
|
|||||||
expected map[string][]byte,
|
expected map[string][]byte,
|
||||||
item data.Item,
|
item data.Item,
|
||||||
) {
|
) {
|
||||||
itemData, err := io.ReadAll(item.ToReader())
|
rr := versionedReadWrapper(t, item.ToReader())
|
||||||
|
if rr == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
itemData, err := io.ReadAll(rr)
|
||||||
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
if !assert.NoError(t, err, "reading collection item", item.ID(), clues.ToCore(err)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -718,7 +734,12 @@ func compareDriveItem(
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
buf, err := io.ReadAll(item.ToReader())
|
rr := versionedReadWrapper(t, item.ToReader())
|
||||||
|
if rr == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := io.ReadAll(rr)
|
||||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -850,6 +871,29 @@ func compareDriveItem(
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// versionedReaderWrapper strips out the version format header and checks it
|
||||||
|
// meets the current standard for all service types. If it doesn't meet the
|
||||||
|
// standard, returns nil. Else returns the versionedRestoreReader.
|
||||||
|
func versionedReadWrapper(
|
||||||
|
t *testing.T,
|
||||||
|
reader io.ReadCloser,
|
||||||
|
) io.ReadCloser {
|
||||||
|
rr, err := readers.NewVersionedRestoreReader(reader)
|
||||||
|
if !assert.NoError(t, err, clues.ToCore(err)) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.Equal(t, readers.DefaultSerializationVersion, rr.Format().Version) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.False(t, rr.Format().DelInFlight) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return rr
|
||||||
|
}
|
||||||
|
|
||||||
// compareItem compares the data returned by backup with the expected data.
|
// compareItem compares the data returned by backup with the expected data.
|
||||||
// Returns true if a comparison was done else false. Bool return is mostly used
|
// Returns true if a comparison was done else false. Bool return is mostly used
|
||||||
// to exclude OneDrive permissions for the root right now.
|
// to exclude OneDrive permissions for the root right now.
|
||||||
@ -919,30 +963,9 @@ func checkHasCollections(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fp := g.FullPath()
|
|
||||||
loc := g.(data.LocationPather).LocationPath()
|
loc := g.(data.LocationPather).LocationPath()
|
||||||
|
|
||||||
if fp.Service() == path.OneDriveService ||
|
gotNames = append(gotNames, loc.String())
|
||||||
(fp.Service() == path.SharePointService && fp.Category() == path.LibrariesCategory) {
|
|
||||||
dp, err := path.ToDrivePath(fp)
|
|
||||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
loc = path.BuildDriveLocation(dp.DriveID, loc.Elements()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := loc.ToDataLayerPath(
|
|
||||||
fp.Tenant(),
|
|
||||||
fp.ProtectedResource(),
|
|
||||||
fp.Service(),
|
|
||||||
fp.Category(),
|
|
||||||
false)
|
|
||||||
if !assert.NoError(t, err, clues.ToCore(err)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
gotNames = append(gotNames, p.String())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.ElementsMatch(t, expectedNames, gotNames, "returned collections")
|
assert.ElementsMatch(t, expectedNames, gotNames, "returned collections")
|
||||||
@ -963,14 +986,18 @@ func checkCollections(
|
|||||||
|
|
||||||
for _, returned := range got {
|
for _, returned := range got {
|
||||||
var (
|
var (
|
||||||
hasItems bool
|
expectedColDataByLoc map[string][]byte
|
||||||
service = returned.FullPath().Service()
|
hasItems bool
|
||||||
category = returned.FullPath().Category()
|
service = returned.FullPath().Service()
|
||||||
expectedColData = expected[returned.FullPath().String()]
|
category = returned.FullPath().Category()
|
||||||
folders = returned.FullPath().Elements()
|
folders = returned.FullPath().Elements()
|
||||||
rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location
|
rootDir = folders[len(folders)-1] == mci.RestoreCfg.Location
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if p, ok := returned.(data.LocationPather); ok {
|
||||||
|
expectedColDataByLoc = expected[p.LocationPath().String()]
|
||||||
|
}
|
||||||
|
|
||||||
// Need to iterate through all items even if we don't expect to find a match
|
// Need to iterate through all items even if we don't expect to find a match
|
||||||
// because otherwise we'll deadlock waiting for the status. Unexpected or
|
// because otherwise we'll deadlock waiting for the status. Unexpected or
|
||||||
// missing collection paths will be reported by checkHasCollections.
|
// missing collection paths will be reported by checkHasCollections.
|
||||||
@ -990,14 +1017,14 @@ func checkCollections(
|
|||||||
hasItems = true
|
hasItems = true
|
||||||
gotItems++
|
gotItems++
|
||||||
|
|
||||||
if expectedColData == nil {
|
if expectedColDataByLoc == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !compareItem(
|
if !compareItem(
|
||||||
t,
|
t,
|
||||||
returned.FullPath(),
|
returned.FullPath(),
|
||||||
expectedColData,
|
expectedColDataByLoc,
|
||||||
service,
|
service,
|
||||||
category,
|
category,
|
||||||
item,
|
item,
|
||||||
|
|||||||
@ -84,6 +84,7 @@ func (ctrl *Controller) ConsumeRestoreCollections(
|
|||||||
rcc,
|
rcc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
ctrl.backupDriveIDNames,
|
ctrl.backupDriveIDNames,
|
||||||
|
ctrl.backupSiteIDWebURL,
|
||||||
dcs,
|
dcs,
|
||||||
deets,
|
deets,
|
||||||
errs,
|
errs,
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/identity"
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -35,19 +36,18 @@ func ProduceBackupCollections(
|
|||||||
creds account.M365Config,
|
creds account.M365Config,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
||||||
b, err := bpc.Selector.ToGroupsBackup()
|
b, err := bpc.Selector.ToGroupsBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Wrap(err, "groupsDataCollection: parsing selector")
|
return nil, nil, clues.Wrap(err, "groupsDataCollection: parsing selector")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
categories = map[path.CategoryType]struct{}{}
|
categories = map[path.CategoryType]struct{}{}
|
||||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||||
canUsePreviousBackup bool
|
sitesPreviousPaths = map[string]string{}
|
||||||
sitesPreviousPaths = map[string]string{}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx = clues.Add(
|
ctx = clues.Add(
|
||||||
@ -60,7 +60,7 @@ func ProduceBackupCollections(
|
|||||||
bpc.ProtectedResource.ID(),
|
bpc.ProtectedResource.ID(),
|
||||||
api.CallConfig{})
|
api.CallConfig{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Wrap(err, "getting group").WithClues(ctx)
|
return nil, nil, clues.Wrap(err, "getting group").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
isTeam := api.IsTeam(ctx, group)
|
isTeam := api.IsTeam(ctx, group)
|
||||||
@ -79,12 +79,9 @@ func ProduceBackupCollections(
|
|||||||
|
|
||||||
switch scope.Category().PathType() {
|
switch scope.Category().PathType() {
|
||||||
case path.LibrariesCategory:
|
case path.LibrariesCategory:
|
||||||
// TODO(meain): Private channels get a separate SharePoint
|
sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs)
|
||||||
// site. We should also back those up and not just the
|
|
||||||
// default one.
|
|
||||||
resp, err := ac.Groups().GetRootSite(ctx, bpc.ProtectedResource.ID())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
siteMetadataCollection := map[string][]data.RestoreCollection{}
|
siteMetadataCollection := map[string][]data.RestoreCollection{}
|
||||||
@ -95,39 +92,47 @@ func ProduceBackupCollections(
|
|||||||
siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c)
|
siteMetadataCollection[siteID] = append(siteMetadataCollection[siteID], c)
|
||||||
}
|
}
|
||||||
|
|
||||||
pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName()))
|
for _, s := range sites {
|
||||||
sbpc := inject.BackupProducerConfig{
|
pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetName()))
|
||||||
LastBackupVersion: bpc.LastBackupVersion,
|
sbpc := inject.BackupProducerConfig{
|
||||||
Options: bpc.Options,
|
LastBackupVersion: bpc.LastBackupVersion,
|
||||||
ProtectedResource: pr,
|
Options: bpc.Options,
|
||||||
Selector: bpc.Selector,
|
ProtectedResource: pr,
|
||||||
MetadataCollections: siteMetadataCollection[ptr.Val(resp.GetId())],
|
Selector: bpc.Selector,
|
||||||
}
|
MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())],
|
||||||
|
}
|
||||||
|
|
||||||
bh := drive.NewGroupBackupHandler(
|
bh := drive.NewGroupBackupHandler(
|
||||||
bpc.ProtectedResource.ID(),
|
bpc.ProtectedResource.ID(),
|
||||||
ptr.Val(resp.GetId()),
|
ptr.Val(s.GetId()),
|
||||||
ac.Drives(),
|
ac.Drives(),
|
||||||
scope)
|
scope)
|
||||||
|
|
||||||
cp, err := bh.SitePathPrefix(creds.AzureTenantID)
|
sp, err := bh.SitePathPrefix(creds.AzureTenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Wrap(err, "getting canonical path")
|
return nil, nil, clues.Wrap(err, "getting site path")
|
||||||
}
|
}
|
||||||
|
|
||||||
sitesPreviousPaths[ptr.Val(resp.GetId())] = cp.String()
|
sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String()
|
||||||
|
|
||||||
dbcs, canUsePreviousBackup, err = site.CollectLibraries(
|
cs, canUsePreviousBackup, err := site.CollectLibraries(
|
||||||
ctx,
|
ctx,
|
||||||
sbpc,
|
sbpc,
|
||||||
bh,
|
bh,
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
ssmb,
|
ssmb,
|
||||||
su,
|
su,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, err)
|
el.AddRecoverable(ctx, err)
|
||||||
continue
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !canUsePreviousBackup {
|
||||||
|
dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
dbcs = append(dbcs, cs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
case path.ChannelMessagesCategory:
|
case path.ChannelMessagesCategory:
|
||||||
@ -135,10 +140,12 @@ func ProduceBackupCollections(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
dbcs, canUsePreviousBackup, err = groups.CreateCollections(
|
bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels())
|
||||||
|
|
||||||
|
cs, canUsePreviousBackup, err := groups.CreateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels()),
|
bh,
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
scope,
|
scope,
|
||||||
su,
|
su,
|
||||||
@ -147,6 +154,17 @@ func ProduceBackupCollections(
|
|||||||
el.AddRecoverable(ctx, err)
|
el.AddRecoverable(ctx, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !canUsePreviousBackup {
|
||||||
|
tp, err := bh.PathPrefix(creds.AzureTenantID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, clues.Wrap(err, "getting message path")
|
||||||
|
}
|
||||||
|
|
||||||
|
dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
dbcs = append(dbcs, cs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, dbcs...)
|
collections = append(collections, dbcs...)
|
||||||
@ -165,7 +183,7 @@ func ProduceBackupCollections(
|
|||||||
su,
|
su,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
@ -178,12 +196,12 @@ func ProduceBackupCollections(
|
|||||||
sitesPreviousPaths,
|
sitesPreviousPaths,
|
||||||
su)
|
su)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, md)
|
collections = append(collections, md)
|
||||||
|
|
||||||
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
return collections, ssmb.ToReader(), el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSitesMetadataCollection(
|
func getSitesMetadataCollection(
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -29,24 +30,20 @@ func ConsumeRestoreCollections(
|
|||||||
rcc inject.RestoreConsumerConfig,
|
rcc inject.RestoreConsumerConfig,
|
||||||
ac api.Client,
|
ac api.Client,
|
||||||
backupDriveIDNames idname.Cacher,
|
backupDriveIDNames idname.Cacher,
|
||||||
|
backupSiteIDWebURL idname.Cacher,
|
||||||
dcs []data.RestoreCollection,
|
dcs []data.RestoreCollection,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
ctr *count.Bus,
|
ctr *count.Bus,
|
||||||
) (*support.ControllerOperationStatus, error) {
|
) (*support.ControllerOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
restoreMetrics support.CollectionMetrics
|
restoreMetrics support.CollectionMetrics
|
||||||
caches = drive.NewRestoreCaches(backupDriveIDNames)
|
caches = drive.NewRestoreCaches(backupDriveIDNames)
|
||||||
lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService())
|
lrh = drive.NewLibraryRestoreHandler(ac, rcc.Selector.PathService())
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
|
webURLToSiteNames = map[string]string{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: uncomment when a handler is available
|
|
||||||
// err := caches.Populate(ctx, lrh, rcc.ProtectedResource.ID())
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, clues.Wrap(err, "initializing restore caches")
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Reorder collections so that the parents directories are created
|
// Reorder collections so that the parents directories are created
|
||||||
// before the child directories; a requirement for permissions.
|
// before the child directories; a requirement for permissions.
|
||||||
data.SortRestoreCollections(dcs)
|
data.SortRestoreCollections(dcs)
|
||||||
@ -59,7 +56,7 @@ func ConsumeRestoreCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
resp models.Siteable
|
siteName string
|
||||||
category = dc.FullPath().Category()
|
category = dc.FullPath().Category()
|
||||||
metrics support.CollectionMetrics
|
metrics support.CollectionMetrics
|
||||||
ictx = clues.Add(ctx,
|
ictx = clues.Add(ctx,
|
||||||
@ -71,16 +68,25 @@ func ConsumeRestoreCollections(
|
|||||||
|
|
||||||
switch dc.FullPath().Category() {
|
switch dc.FullPath().Category() {
|
||||||
case path.LibrariesCategory:
|
case path.LibrariesCategory:
|
||||||
// TODO(meain): As of now we only restore the root site
|
siteID := dc.FullPath().Folders()[1]
|
||||||
// and that too to whatever is currently the root site of the
|
|
||||||
// group and not the original one. Not sure if the
|
webURL, ok := backupSiteIDWebURL.NameOf(siteID)
|
||||||
// original can be changed.
|
if !ok {
|
||||||
resp, err = ac.Groups().GetRootSite(ctx, rcc.ProtectedResource.ID())
|
// This should not happen, but just in case
|
||||||
if err != nil {
|
logger.Ctx(ctx).With("site_id", siteID).Info("site weburl not found, using site id")
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pr := idname.NewProvider(ptr.Val(resp.GetId()), ptr.Val(resp.GetName()))
|
siteName, err = getSiteName(ctx, siteID, webURL, ac.Sites(), webURLToSiteNames)
|
||||||
|
if err != nil {
|
||||||
|
el.AddRecoverable(ctx, clues.Wrap(err, "getting site").
|
||||||
|
With("web_url", webURL, "site_id", siteID))
|
||||||
|
} else if len(siteName) == 0 {
|
||||||
|
// Site was deleted in between and restore and is not
|
||||||
|
// available anymore.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pr := idname.NewProvider(siteID, siteName)
|
||||||
srcc := inject.RestoreConsumerConfig{
|
srcc := inject.RestoreConsumerConfig{
|
||||||
BackupVersion: rcc.BackupVersion,
|
BackupVersion: rcc.BackupVersion,
|
||||||
Options: rcc.Options,
|
Options: rcc.Options,
|
||||||
@ -133,3 +139,38 @@ func ConsumeRestoreCollections(
|
|||||||
|
|
||||||
return status, el.Failure()
|
return status, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getSiteName(
|
||||||
|
ctx context.Context,
|
||||||
|
siteID string,
|
||||||
|
webURL string,
|
||||||
|
ac api.GetByIDer[models.Siteable],
|
||||||
|
webURLToSiteNames map[string]string,
|
||||||
|
) (string, error) {
|
||||||
|
siteName, ok := webURLToSiteNames[webURL]
|
||||||
|
if ok {
|
||||||
|
return siteName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
site, err := ac.GetByID(ctx, siteID, api.CallConfig{})
|
||||||
|
if err != nil {
|
||||||
|
webURLToSiteNames[webURL] = ""
|
||||||
|
|
||||||
|
if graph.IsErrSiteNotFound(err) {
|
||||||
|
// TODO(meain): Should we surface this to the user somehow?
|
||||||
|
// In case a site that we had previously backed up was
|
||||||
|
// deleted, skip that site with a warning.
|
||||||
|
logger.Ctx(ctx).With("web_url", webURL, "site_id", siteID).
|
||||||
|
Info("Site does not exist, skipping restore.")
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
siteName = ptr.Val(site.GetDisplayName())
|
||||||
|
webURLToSiteNames[webURL] = siteName
|
||||||
|
|
||||||
|
return siteName, nil
|
||||||
|
}
|
||||||
|
|||||||
@ -7,12 +7,17 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/data/mock"
|
"github.com/alcionai/corso/src/internal/data/mock"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/internal/tester/tconfig"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
@ -52,9 +57,118 @@ func (suite *GroupsUnitSuite) TestConsumeRestoreCollections_noErrorOnGroups() {
|
|||||||
rcc,
|
rcc,
|
||||||
api.Client{},
|
api.Client{},
|
||||||
idname.NewCache(map[string]string{}),
|
idname.NewCache(map[string]string{}),
|
||||||
|
idname.NewCache(map[string]string{}),
|
||||||
dcs,
|
dcs,
|
||||||
nil,
|
nil,
|
||||||
fault.New(false),
|
fault.New(false),
|
||||||
nil)
|
nil)
|
||||||
assert.NoError(t, err, "Groups Channels restore")
|
assert.NoError(t, err, "Groups Channels restore")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type groupsIntegrationSuite struct {
|
||||||
|
tester.Suite
|
||||||
|
resource string
|
||||||
|
tenantID string
|
||||||
|
ac api.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGroupsIntegrationSuite(t *testing.T) {
|
||||||
|
suite.Run(t, &groupsIntegrationSuite{
|
||||||
|
Suite: tester.NewIntegrationSuite(
|
||||||
|
t,
|
||||||
|
[][]string{tconfig.M365AcctCredEnvs}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *groupsIntegrationSuite) SetupSuite() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
graph.InitializeConcurrencyLimiter(ctx, true, 4)
|
||||||
|
|
||||||
|
suite.resource = tconfig.M365TeamID(t)
|
||||||
|
|
||||||
|
acct := tconfig.NewM365Account(t)
|
||||||
|
creds, err := acct.M365Config()
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
suite.ac, err = api.NewClient(creds, control.DefaultOptions())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
suite.tenantID = creds.AzureTenantID
|
||||||
|
}
|
||||||
|
|
||||||
|
// test for getSiteName
|
||||||
|
func (suite *groupsIntegrationSuite) TestGetSiteName() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
ctx, flush := tester.NewContext(t)
|
||||||
|
defer flush()
|
||||||
|
|
||||||
|
rootSite, err := suite.ac.Groups().GetRootSite(ctx, suite.resource)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
// Generate a fake site ID that appears valid to graph API but doesn't actually exist.
|
||||||
|
// This "could" be flaky, but highly unlikely
|
||||||
|
unavailableSiteID := []rune(ptr.Val(rootSite.GetId()))
|
||||||
|
firstIDChar := slices.Index(unavailableSiteID, ',') + 1
|
||||||
|
|
||||||
|
if unavailableSiteID[firstIDChar] != '2' {
|
||||||
|
unavailableSiteID[firstIDChar] = '2'
|
||||||
|
} else {
|
||||||
|
unavailableSiteID[firstIDChar] = '1'
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
siteID string
|
||||||
|
webURL string
|
||||||
|
siteName string
|
||||||
|
webURLToSiteNames map[string]string
|
||||||
|
expectErr assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid",
|
||||||
|
siteID: ptr.Val(rootSite.GetId()),
|
||||||
|
webURL: ptr.Val(rootSite.GetWebUrl()),
|
||||||
|
siteName: *rootSite.GetDisplayName(),
|
||||||
|
webURLToSiteNames: map[string]string{},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unavailable",
|
||||||
|
siteID: string(unavailableSiteID),
|
||||||
|
webURL: "https://does-not-matter",
|
||||||
|
siteName: "",
|
||||||
|
webURLToSiteNames: map[string]string{},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "previously found",
|
||||||
|
siteID: "random-id",
|
||||||
|
webURL: "https://random-url",
|
||||||
|
siteName: "random-name",
|
||||||
|
webURLToSiteNames: map[string]string{"https://random-url": "random-name"},
|
||||||
|
expectErr: assert.NoError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
siteName, err := getSiteName(
|
||||||
|
ctx,
|
||||||
|
test.siteID,
|
||||||
|
test.webURL,
|
||||||
|
suite.ac.Sites(),
|
||||||
|
test.webURLToSiteNames)
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
test.expectErr(t, err)
|
||||||
|
assert.Equal(t, test.siteName, siteName)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -8,13 +8,11 @@ import (
|
|||||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
|
||||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -24,8 +22,6 @@ import (
|
|||||||
type BackupHandler struct {
|
type BackupHandler struct {
|
||||||
ItemInfo details.ItemInfo
|
ItemInfo details.ItemInfo
|
||||||
|
|
||||||
DriveItemEnumeration EnumeratesDriveItemsDelta
|
|
||||||
|
|
||||||
GI GetsItem
|
GI GetsItem
|
||||||
GIP GetsItemPermission
|
GIP GetsItemPermission
|
||||||
|
|
||||||
@ -59,7 +55,6 @@ func DefaultOneDriveBH(resourceOwner string) *BackupHandler {
|
|||||||
OneDrive: &details.OneDriveInfo{},
|
OneDrive: &details.OneDriveInfo{},
|
||||||
Extension: &details.ExtensionData{},
|
Extension: &details.ExtensionData{},
|
||||||
},
|
},
|
||||||
DriveItemEnumeration: EnumeratesDriveItemsDelta{},
|
|
||||||
GI: GetsItem{Err: clues.New("not defined")},
|
GI: GetsItem{Err: clues.New("not defined")},
|
||||||
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
||||||
PathPrefixFn: defaultOneDrivePathPrefixer,
|
PathPrefixFn: defaultOneDrivePathPrefixer,
|
||||||
@ -129,6 +124,10 @@ func (h BackupHandler) NewDrivePager(string, []string) api.Pager[models.Driveabl
|
|||||||
return h.DrivePagerV
|
return h.DrivePagerV
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DeltaPager[models.DriveItemable] {
|
||||||
|
return h.ItemPagerV[driveID]
|
||||||
|
}
|
||||||
|
|
||||||
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
|
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
|
||||||
return "/" + pb.String()
|
return "/" + pb.String()
|
||||||
}
|
}
|
||||||
@ -153,13 +152,6 @@ func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.R
|
|||||||
return h.GetResps[c], h.GetErrs[c]
|
return h.GetResps[c], h.GetErrs[c]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h BackupHandler) EnumerateDriveItemsDelta(
|
|
||||||
ctx context.Context,
|
|
||||||
driveID, prevDeltaLink string,
|
|
||||||
) ([]models.DriveItemable, api.DeltaUpdate, error) {
|
|
||||||
return h.DriveItemEnumeration.EnumerateDriveItemsDelta(ctx, driveID, prevDeltaLink)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
|
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
|
||||||
return h.GI.GetItem(ctx, "", "")
|
return h.GI.GetItem(ctx, "", "")
|
||||||
}
|
}
|
||||||
@ -262,65 +254,6 @@ func (m GetsItem) GetItem(
|
|||||||
return m.Item, m.Err
|
return m.Item, m.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Enumerates Drive Items
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type EnumeratesDriveItemsDelta struct {
|
|
||||||
Items map[string][]models.DriveItemable
|
|
||||||
DeltaUpdate map[string]api.DeltaUpdate
|
|
||||||
Err map[string]error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (edi EnumeratesDriveItemsDelta) EnumerateDriveItemsDelta(
|
|
||||||
_ context.Context,
|
|
||||||
driveID, _ string,
|
|
||||||
) (
|
|
||||||
[]models.DriveItemable,
|
|
||||||
api.DeltaUpdate,
|
|
||||||
error,
|
|
||||||
) {
|
|
||||||
return edi.Items[driveID], edi.DeltaUpdate[driveID], edi.Err[driveID]
|
|
||||||
}
|
|
||||||
|
|
||||||
func PagerResultToEDID(
|
|
||||||
m map[string][]apiMock.PagerResult[models.DriveItemable],
|
|
||||||
) EnumeratesDriveItemsDelta {
|
|
||||||
edi := EnumeratesDriveItemsDelta{
|
|
||||||
Items: map[string][]models.DriveItemable{},
|
|
||||||
DeltaUpdate: map[string]api.DeltaUpdate{},
|
|
||||||
Err: map[string]error{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for driveID, results := range m {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
items = []models.DriveItemable{}
|
|
||||||
deltaUpdate api.DeltaUpdate
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, pr := range results {
|
|
||||||
items = append(items, pr.Values...)
|
|
||||||
|
|
||||||
if pr.DeltaLink != nil {
|
|
||||||
deltaUpdate = api.DeltaUpdate{URL: ptr.Val(pr.DeltaLink)}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pr.Err != nil {
|
|
||||||
err = pr.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
deltaUpdate.Reset = deltaUpdate.Reset || pr.ResetDelta
|
|
||||||
}
|
|
||||||
|
|
||||||
edi.Items[driveID] = items
|
|
||||||
edi.Err[driveID] = err
|
|
||||||
edi.DeltaUpdate[driveID] = deltaUpdate
|
|
||||||
}
|
|
||||||
|
|
||||||
return edi
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Get Item Permissioner
|
// Get Item Permissioner
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -109,10 +109,11 @@ func (suite *SharePointPageSuite) TestRestoreSinglePage() {
|
|||||||
//nolint:lll
|
//nolint:lll
|
||||||
byteArray := spMock.Page("Byte Test")
|
byteArray := spMock.Page("Byte Test")
|
||||||
|
|
||||||
pageData := data.NewUnindexedPrefetchedItem(
|
pageData, err := data.NewUnindexedPrefetchedItem(
|
||||||
io.NopCloser(bytes.NewReader(byteArray)),
|
io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
testName,
|
testName,
|
||||||
time.Now())
|
time.Now())
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
info, err := api.RestoreSitePage(
|
info, err := api.RestoreSitePage(
|
||||||
ctx,
|
ctx,
|
||||||
|
|||||||
@ -90,9 +90,12 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
paths = map[string]string{}
|
paths = map[string]string{}
|
||||||
currPaths = map[string]string{}
|
newPaths = map[string]string{}
|
||||||
excluded = map[string]struct{}{}
|
excluded = map[string]struct{}{}
|
||||||
collMap = map[string]map[string]*drive.Collection{
|
itemColls = map[string]map[string]string{
|
||||||
|
driveID: {},
|
||||||
|
}
|
||||||
|
collMap = map[string]map[string]*drive.Collection{
|
||||||
driveID: {},
|
driveID: {},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -106,14 +109,15 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
|||||||
|
|
||||||
c.CollectionMap = collMap
|
c.CollectionMap = collMap
|
||||||
|
|
||||||
_, err := c.UpdateCollections(
|
err := c.UpdateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
driveID,
|
driveID,
|
||||||
"General",
|
"General",
|
||||||
test.items,
|
test.items,
|
||||||
paths,
|
paths,
|
||||||
currPaths,
|
newPaths,
|
||||||
excluded,
|
excluded,
|
||||||
|
itemColls,
|
||||||
true,
|
true,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
@ -163,28 +164,29 @@ func CollectionsForInfo(
|
|||||||
func backupOutputPathFromRestore(
|
func backupOutputPathFromRestore(
|
||||||
restoreCfg control.RestoreConfig,
|
restoreCfg control.RestoreConfig,
|
||||||
inputPath path.Path,
|
inputPath path.Path,
|
||||||
) (path.Path, error) {
|
) (*path.Builder, error) {
|
||||||
base := []string{restoreCfg.Location}
|
base := []string{restoreCfg.Location}
|
||||||
|
folders := inputPath.Folders()
|
||||||
|
|
||||||
|
switch inputPath.Service() {
|
||||||
// OneDrive has leading information like the drive ID.
|
// OneDrive has leading information like the drive ID.
|
||||||
if inputPath.Service() == path.OneDriveService || inputPath.Service() == path.SharePointService {
|
case path.OneDriveService, path.SharePointService:
|
||||||
folders := inputPath.Folders()
|
p, err := path.ToDrivePath(inputPath)
|
||||||
base = append(append([]string{}, folders[:3]...), restoreCfg.Location)
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
if len(folders) > 3 {
|
// Remove driveID, root, etc.
|
||||||
base = append(base, folders[3:]...)
|
folders = p.Folders
|
||||||
|
// Re-add root, but it needs to be in front of the restore folder.
|
||||||
|
base = append([]string{p.Root}, base...)
|
||||||
|
|
||||||
|
// Currently contacts restore doesn't have nested folders.
|
||||||
|
case path.ExchangeService:
|
||||||
|
if inputPath.Category() == path.ContactsCategory {
|
||||||
|
folders = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if inputPath.Service() == path.ExchangeService && inputPath.Category() == path.EmailCategory {
|
return path.Builder{}.Append(append(base, folders...)...), nil
|
||||||
base = append(base, inputPath.Folders()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return path.Build(
|
|
||||||
inputPath.Tenant(),
|
|
||||||
inputPath.ProtectedResource(),
|
|
||||||
inputPath.Service(),
|
|
||||||
inputPath.Category(),
|
|
||||||
false,
|
|
||||||
base...)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -762,11 +762,10 @@ func runDriveIncrementalTest(
|
|||||||
true)
|
true)
|
||||||
|
|
||||||
// do some additional checks to ensure the incremental dealt with fewer items.
|
// do some additional checks to ensure the incremental dealt with fewer items.
|
||||||
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
|
||||||
var (
|
var (
|
||||||
expectWrites = test.itemsWritten + 2
|
expectWrites = test.itemsWritten
|
||||||
expectNonMetaWrites = test.nonMetaItemsWritten
|
expectNonMetaWrites = test.nonMetaItemsWritten
|
||||||
expectReads = test.itemsRead + 2
|
expectReads = test.itemsRead
|
||||||
assertReadWrite = assert.Equal
|
assertReadWrite = assert.Equal
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -775,6 +774,17 @@ func runDriveIncrementalTest(
|
|||||||
// /libraries/sites/previouspath
|
// /libraries/sites/previouspath
|
||||||
expectWrites++
|
expectWrites++
|
||||||
expectReads++
|
expectReads++
|
||||||
|
|
||||||
|
// +2 on read/writes to account for metadata: 1 delta and 1 path (for each site)
|
||||||
|
sites, err := ac.Groups().GetAllSites(ctx, owner, fault.New(true))
|
||||||
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
expectWrites += len(sites) * 2
|
||||||
|
expectReads += len(sites) * 2
|
||||||
|
} else {
|
||||||
|
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||||
|
expectWrites += 2
|
||||||
|
expectReads += 2
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sharepoint can produce a superset of permissions by nature of
|
// Sharepoint can produce a superset of permissions by nature of
|
||||||
|
|||||||
@ -182,12 +182,17 @@ func collect(
|
|||||||
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx)
|
return nil, clues.Wrap(err, "marshalling body").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
item, err := data.NewUnindexedPrefetchedItem(
|
||||||
|
io.NopCloser(bytes.NewReader(bs)),
|
||||||
|
col.itemName,
|
||||||
|
time.Now())
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err).WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
dc := streamCollection{
|
dc := streamCollection{
|
||||||
folderPath: p,
|
folderPath: p,
|
||||||
item: data.NewUnindexedPrefetchedItem(
|
item: item,
|
||||||
io.NopCloser(bytes.NewReader(bs)),
|
|
||||||
col.itemName,
|
|
||||||
time.Now()),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &dc, nil
|
return &dc, nil
|
||||||
|
|||||||
@ -384,20 +384,20 @@ func (pec printableErrCore) Values() []string {
|
|||||||
// funcs, and the function that spawned the local bus should always
|
// funcs, and the function that spawned the local bus should always
|
||||||
// return `local.Failure()` to ensure that hard failures are propagated
|
// return `local.Failure()` to ensure that hard failures are propagated
|
||||||
// back upstream.
|
// back upstream.
|
||||||
func (e *Bus) Local() *LocalBus {
|
func (e *Bus) Local() *localBus {
|
||||||
return &LocalBus{
|
return &localBus{
|
||||||
mu: &sync.Mutex{},
|
mu: &sync.Mutex{},
|
||||||
bus: e,
|
bus: e,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type LocalBus struct {
|
type localBus struct {
|
||||||
mu *sync.Mutex
|
mu *sync.Mutex
|
||||||
bus *Bus
|
bus *Bus
|
||||||
current error
|
current error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *LocalBus) AddRecoverable(ctx context.Context, err error) {
|
func (e *localBus) AddRecoverable(ctx context.Context, err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -422,7 +422,7 @@ func (e *LocalBus) AddRecoverable(ctx context.Context, err error) {
|
|||||||
// 2. Skipping avoids a permanent and consistent failure. If
|
// 2. Skipping avoids a permanent and consistent failure. If
|
||||||
// the underlying reason is transient or otherwise recoverable,
|
// the underlying reason is transient or otherwise recoverable,
|
||||||
// the item should not be skipped.
|
// the item should not be skipped.
|
||||||
func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) {
|
func (e *localBus) AddSkip(ctx context.Context, s *Skipped) {
|
||||||
if s == nil {
|
if s == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -437,7 +437,7 @@ func (e *LocalBus) AddSkip(ctx context.Context, s *Skipped) {
|
|||||||
// It does not return the underlying bus.Failure(), only the failure
|
// It does not return the underlying bus.Failure(), only the failure
|
||||||
// that was recorded within the local bus instance. This error should
|
// that was recorded within the local bus instance. This error should
|
||||||
// get returned by any func which created a local bus.
|
// get returned by any func which created a local bus.
|
||||||
func (e *LocalBus) Failure() error {
|
func (e *localBus) Failure() error {
|
||||||
return e.current
|
return e.current
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -96,14 +96,10 @@ var serviceCategories = map[ServiceType]map[CategoryType]struct{}{
|
|||||||
ChannelMessagesCategory: {},
|
ChannelMessagesCategory: {},
|
||||||
LibrariesCategory: {},
|
LibrariesCategory: {},
|
||||||
},
|
},
|
||||||
TeamsService: {
|
|
||||||
ChannelMessagesCategory: {},
|
|
||||||
LibrariesCategory: {},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, error) {
|
func validateServiceAndCategoryStrings(s, c string) (ServiceType, CategoryType, error) {
|
||||||
service := toServiceType(s)
|
service := ToServiceType(s)
|
||||||
if service == UnknownService {
|
if service == UnknownService {
|
||||||
return UnknownService, UnknownCategory, clues.Stack(ErrorUnknownService).With("service", fmt.Sprintf("%q", s))
|
return UnknownService, UnknownCategory, clues.Stack(ErrorUnknownService).With("service", fmt.Sprintf("%q", s))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -157,7 +157,7 @@ func (suite *ServiceCategoryUnitSuite) TestToServiceType() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
assert.Equal(t, test.expected, toServiceType(test.service))
|
assert.Equal(t, test.expected, ToServiceType(test.service))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -15,27 +15,25 @@ var ErrorUnknownService = clues.New("unknown service string")
|
|||||||
// Metadata services are not considered valid service types for resource paths
|
// Metadata services are not considered valid service types for resource paths
|
||||||
// though they can be used for metadata paths.
|
// though they can be used for metadata paths.
|
||||||
//
|
//
|
||||||
// The order of the enums below can be changed, but the string representation of
|
// The string representaton of each enum _must remain the same_. In case of
|
||||||
// each enum must remain the same or migration code needs to be added to handle
|
// changes to those values, we'll need migration code to handle transitions
|
||||||
// changes to the string format.
|
// across states else we'll get marshalling/unmarshalling errors.
|
||||||
type ServiceType int
|
type ServiceType int
|
||||||
|
|
||||||
//go:generate stringer -type=ServiceType -linecomment
|
//go:generate stringer -type=ServiceType -linecomment
|
||||||
const (
|
const (
|
||||||
UnknownService ServiceType = 0
|
UnknownService ServiceType = 0
|
||||||
ExchangeService ServiceType = 1 // exchange
|
ExchangeService ServiceType = 1 // exchange
|
||||||
OneDriveService ServiceType = 2 // onedrive
|
OneDriveService ServiceType = 2 // onedrive
|
||||||
SharePointService ServiceType = 3 // sharepoint
|
SharePointService ServiceType = 3 // sharepoint
|
||||||
ExchangeMetadataService ServiceType = 4 // exchangeMetadata
|
ExchangeMetadataService ServiceType = 4 // exchangeMetadata
|
||||||
OneDriveMetadataService ServiceType = 5 // onedriveMetadata
|
OneDriveMetadataService ServiceType = 5 // onedriveMetadata
|
||||||
SharePointMetadataService ServiceType = 6 // sharepointMetadata
|
SharePointMetadataService ServiceType = 6 // sharepointMetadata
|
||||||
GroupsService ServiceType = 7 // groups
|
GroupsService ServiceType = 7 // groups
|
||||||
GroupsMetadataService ServiceType = 8 // groupsMetadata
|
GroupsMetadataService ServiceType = 8 // groupsMetadata
|
||||||
TeamsService ServiceType = 9 // teams
|
|
||||||
TeamsMetadataService ServiceType = 10 // teamsMetadata
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func toServiceType(service string) ServiceType {
|
func ToServiceType(service string) ServiceType {
|
||||||
s := strings.ToLower(service)
|
s := strings.ToLower(service)
|
||||||
|
|
||||||
switch s {
|
switch s {
|
||||||
@ -47,8 +45,6 @@ func toServiceType(service string) ServiceType {
|
|||||||
return SharePointService
|
return SharePointService
|
||||||
case strings.ToLower(GroupsService.String()):
|
case strings.ToLower(GroupsService.String()):
|
||||||
return GroupsService
|
return GroupsService
|
||||||
case strings.ToLower(TeamsService.String()):
|
|
||||||
return TeamsService
|
|
||||||
case strings.ToLower(ExchangeMetadataService.String()):
|
case strings.ToLower(ExchangeMetadataService.String()):
|
||||||
return ExchangeMetadataService
|
return ExchangeMetadataService
|
||||||
case strings.ToLower(OneDriveMetadataService.String()):
|
case strings.ToLower(OneDriveMetadataService.String()):
|
||||||
@ -57,8 +53,6 @@ func toServiceType(service string) ServiceType {
|
|||||||
return SharePointMetadataService
|
return SharePointMetadataService
|
||||||
case strings.ToLower(GroupsMetadataService.String()):
|
case strings.ToLower(GroupsMetadataService.String()):
|
||||||
return GroupsMetadataService
|
return GroupsMetadataService
|
||||||
case strings.ToLower(TeamsMetadataService.String()):
|
|
||||||
return TeamsMetadataService
|
|
||||||
default:
|
default:
|
||||||
return UnknownService
|
return UnknownService
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,13 +17,11 @@ func _() {
|
|||||||
_ = x[SharePointMetadataService-6]
|
_ = x[SharePointMetadataService-6]
|
||||||
_ = x[GroupsService-7]
|
_ = x[GroupsService-7]
|
||||||
_ = x[GroupsMetadataService-8]
|
_ = x[GroupsMetadataService-8]
|
||||||
_ = x[TeamsService-9]
|
|
||||||
_ = x[TeamsMetadataService-10]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadatateamsteamsMetadata"
|
const _ServiceType_name = "UnknownServiceexchangeonedrivesharepointexchangeMetadataonedriveMetadatasharepointMetadatagroupsgroupsMetadata"
|
||||||
|
|
||||||
var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110, 115, 128}
|
var _ServiceType_index = [...]uint8{0, 14, 22, 30, 40, 56, 72, 90, 96, 110}
|
||||||
|
|
||||||
func (i ServiceType) String() string {
|
func (i ServiceType) String() string {
|
||||||
if i < 0 || i >= ServiceType(len(_ServiceType_index)-1) {
|
if i < 0 || i >= ServiceType(len(_ServiceType_index)-1) {
|
||||||
|
|||||||
359
src/pkg/repository/backups.go
Normal file
359
src/pkg/repository/backups.go
Normal file
@ -0,0 +1,359 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
"github.com/kopia/kopia/repo/manifest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
|
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
||||||
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations"
|
||||||
|
"github.com/alcionai/corso/src/internal/streamstore"
|
||||||
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BackupGetter deals with retrieving metadata about backups from the
|
||||||
|
// repository.
|
||||||
|
type BackupGetter interface {
|
||||||
|
Backup(ctx context.Context, id string) (*backup.Backup, error)
|
||||||
|
Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus)
|
||||||
|
BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error)
|
||||||
|
GetBackupDetails(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
) (*details.Details, *backup.Backup, *fault.Bus)
|
||||||
|
GetBackupErrors(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
) (*fault.Errors, *backup.Backup, *fault.Bus)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Backuper interface {
|
||||||
|
NewBackup(
|
||||||
|
ctx context.Context,
|
||||||
|
self selectors.Selector,
|
||||||
|
) (operations.BackupOperation, error)
|
||||||
|
NewBackupWithLookup(
|
||||||
|
ctx context.Context,
|
||||||
|
self selectors.Selector,
|
||||||
|
ins idname.Cacher,
|
||||||
|
) (operations.BackupOperation, error)
|
||||||
|
DeleteBackups(
|
||||||
|
ctx context.Context,
|
||||||
|
failOnMissing bool,
|
||||||
|
ids ...string,
|
||||||
|
) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBackup generates a BackupOperation runner.
|
||||||
|
func (r repository) NewBackup(
|
||||||
|
ctx context.Context,
|
||||||
|
sel selectors.Selector,
|
||||||
|
) (operations.BackupOperation, error) {
|
||||||
|
return r.NewBackupWithLookup(ctx, sel, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBackupWithLookup generates a BackupOperation runner.
|
||||||
|
// ownerIDToName and ownerNameToID are optional populations, in case the caller has
|
||||||
|
// already generated those values.
|
||||||
|
func (r repository) NewBackupWithLookup(
|
||||||
|
ctx context.Context,
|
||||||
|
sel selectors.Selector,
|
||||||
|
ins idname.Cacher,
|
||||||
|
) (operations.BackupOperation, error) {
|
||||||
|
err := r.ConnectDataProvider(ctx, sel.PathService())
|
||||||
|
if err != nil {
|
||||||
|
return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365")
|
||||||
|
}
|
||||||
|
|
||||||
|
ownerID, ownerName, err := r.Provider.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
|
||||||
|
if err != nil {
|
||||||
|
return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: retrieve display name from gc
|
||||||
|
sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName)
|
||||||
|
|
||||||
|
return operations.NewBackupOperation(
|
||||||
|
ctx,
|
||||||
|
r.Opts,
|
||||||
|
r.dataLayer,
|
||||||
|
store.NewWrapper(r.modelStore),
|
||||||
|
r.Provider,
|
||||||
|
r.Account,
|
||||||
|
sel,
|
||||||
|
sel, // the selector acts as an IDNamer for its discrete resource owner.
|
||||||
|
r.Bus)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backup retrieves a backup by id.
|
||||||
|
func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) {
|
||||||
|
return getBackup(ctx, id, store.NewWrapper(r.modelStore))
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBackup handles the processing for Backup.
|
||||||
|
func getBackup(
|
||||||
|
ctx context.Context,
|
||||||
|
id string,
|
||||||
|
sw store.BackupGetter,
|
||||||
|
) (*backup.Backup, error) {
|
||||||
|
b, err := sw.GetBackup(ctx, model.StableID(id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errWrapper(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backups lists backups by ID. Returns as many backups as possible with
|
||||||
|
// errors for the backups it was unable to retrieve.
|
||||||
|
func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) {
|
||||||
|
var (
|
||||||
|
bups []*backup.Backup
|
||||||
|
errs = fault.New(false)
|
||||||
|
sw = store.NewWrapper(r.modelStore)
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, id := range ids {
|
||||||
|
ictx := clues.Add(ctx, "backup_id", id)
|
||||||
|
|
||||||
|
b, err := sw.GetBackup(ictx, model.StableID(id))
|
||||||
|
if err != nil {
|
||||||
|
errs.AddRecoverable(ctx, errWrapper(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
bups = append(bups, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bups, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupsByTag lists all backups in a repository that contain all the tags
|
||||||
|
// specified.
|
||||||
|
func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) {
|
||||||
|
sw := store.NewWrapper(r.modelStore)
|
||||||
|
return backupsByTag(ctx, sw, fs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// backupsByTag returns all backups matching all provided tags.
|
||||||
|
//
|
||||||
|
// TODO(ashmrtn): This exists mostly for testing, but we could restructure the
|
||||||
|
// code in this file so there's a more elegant mocking solution.
|
||||||
|
func backupsByTag(
|
||||||
|
ctx context.Context,
|
||||||
|
sw store.BackupWrapper,
|
||||||
|
fs []store.FilterOption,
|
||||||
|
) ([]*backup.Backup, error) {
|
||||||
|
bs, err := sw.GetBackups(ctx, fs...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter out assist backup bases as they're considered incomplete and we
|
||||||
|
// haven't been displaying them before now.
|
||||||
|
res := make([]*backup.Backup, 0, len(bs))
|
||||||
|
|
||||||
|
for _, b := range bs {
|
||||||
|
if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup {
|
||||||
|
res = append(res, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupDetails returns the specified backup.Details
|
||||||
|
func (r repository) GetBackupDetails(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
) (*details.Details, *backup.Backup, *fault.Bus) {
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
deets, bup, err := getBackupDetails(
|
||||||
|
ctx,
|
||||||
|
backupID,
|
||||||
|
r.Account.ID(),
|
||||||
|
r.dataLayer,
|
||||||
|
store.NewWrapper(r.modelStore),
|
||||||
|
errs)
|
||||||
|
|
||||||
|
return deets, bup, errs.Fail(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBackupDetails handles the processing for GetBackupDetails.
|
||||||
|
func getBackupDetails(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID, tenantID string,
|
||||||
|
kw *kopia.Wrapper,
|
||||||
|
sw store.BackupGetter,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) (*details.Details, *backup.Backup, error) {
|
||||||
|
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errWrapper(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ssid := b.StreamStoreID
|
||||||
|
if len(ssid) == 0 {
|
||||||
|
ssid = b.DetailsID
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ssid) == 0 {
|
||||||
|
return nil, b, clues.New("no streamstore id in backup").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
||||||
|
deets details.Details
|
||||||
|
)
|
||||||
|
|
||||||
|
err = sstore.Read(
|
||||||
|
ctx,
|
||||||
|
ssid,
|
||||||
|
streamstore.DetailsReader(details.UnmarshalTo(&deets)),
|
||||||
|
errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retroactively fill in isMeta information for items in older
|
||||||
|
// backup versions without that info
|
||||||
|
// version.Restore2 introduces the IsMeta flag, so only v1 needs a check.
|
||||||
|
if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker {
|
||||||
|
for _, d := range deets.Entries {
|
||||||
|
if d.OneDrive != nil {
|
||||||
|
d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deets.DetailsModel = deets.FilterMetaFiles()
|
||||||
|
|
||||||
|
return &deets, b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupErrors returns the specified backup's fault.Errors
|
||||||
|
func (r repository) GetBackupErrors(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
) (*fault.Errors, *backup.Backup, *fault.Bus) {
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
fe, bup, err := getBackupErrors(
|
||||||
|
ctx,
|
||||||
|
backupID,
|
||||||
|
r.Account.ID(),
|
||||||
|
r.dataLayer,
|
||||||
|
store.NewWrapper(r.modelStore),
|
||||||
|
errs)
|
||||||
|
|
||||||
|
return fe, bup, errs.Fail(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBackupErrors handles the processing for GetBackupErrors.
|
||||||
|
func getBackupErrors(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID, tenantID string,
|
||||||
|
kw *kopia.Wrapper,
|
||||||
|
sw store.BackupGetter,
|
||||||
|
errs *fault.Bus,
|
||||||
|
) (*fault.Errors, *backup.Backup, error) {
|
||||||
|
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errWrapper(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ssid := b.StreamStoreID
|
||||||
|
if len(ssid) == 0 {
|
||||||
|
return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
||||||
|
fe fault.Errors
|
||||||
|
)
|
||||||
|
|
||||||
|
err = sstore.Read(
|
||||||
|
ctx,
|
||||||
|
ssid,
|
||||||
|
streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)),
|
||||||
|
errs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &fe, b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBackups removes the backups from both the model store and the backup
|
||||||
|
// storage.
|
||||||
|
//
|
||||||
|
// If failOnMissing is true then returns an error if a backup model can't be
|
||||||
|
// found. Otherwise ignores missing backup models.
|
||||||
|
//
|
||||||
|
// Missing models or snapshots during the actual deletion do not cause errors.
|
||||||
|
//
|
||||||
|
// All backups are delete as an atomic unit so any failures will result in no
|
||||||
|
// deletions.
|
||||||
|
func (r repository) DeleteBackups(
|
||||||
|
ctx context.Context,
|
||||||
|
failOnMissing bool,
|
||||||
|
ids ...string,
|
||||||
|
) error {
|
||||||
|
return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteBackup handles the processing for backup deletion.
|
||||||
|
func deleteBackups(
|
||||||
|
ctx context.Context,
|
||||||
|
sw store.BackupGetterModelDeleter,
|
||||||
|
failOnMissing bool,
|
||||||
|
ids ...string,
|
||||||
|
) error {
|
||||||
|
// Although we haven't explicitly stated it, snapshots are technically
|
||||||
|
// manifests in kopia. This means we can use the same delete API to remove
|
||||||
|
// them and backup models. Deleting all of them together gives us both
|
||||||
|
// atomicity guarantees (around when data will be flushed) and helps reduce
|
||||||
|
// the number of manifest blobs that kopia will create.
|
||||||
|
var toDelete []manifest.ID
|
||||||
|
|
||||||
|
for _, id := range ids {
|
||||||
|
b, err := sw.GetBackup(ctx, model.StableID(id))
|
||||||
|
if err != nil {
|
||||||
|
if !failOnMissing && errors.Is(err, data.ErrNotFound) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return clues.Stack(errWrapper(err)).
|
||||||
|
WithClues(ctx).
|
||||||
|
With("delete_backup_id", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
toDelete = append(toDelete, b.ModelStoreID)
|
||||||
|
|
||||||
|
if len(b.SnapshotID) > 0 {
|
||||||
|
toDelete = append(toDelete, manifest.ID(b.SnapshotID))
|
||||||
|
}
|
||||||
|
|
||||||
|
ssid := b.StreamStoreID
|
||||||
|
if len(ssid) == 0 {
|
||||||
|
ssid = b.DetailsID
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ssid) > 0 {
|
||||||
|
toDelete = append(toDelete, manifest.ID(ssid))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sw.DeleteWithModelStoreIDs(ctx, toDelete...)
|
||||||
|
}
|
||||||
88
src/pkg/repository/data_providers.go
Normal file
88
src/pkg/repository/data_providers.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/m365"
|
||||||
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DataProvider interface {
|
||||||
|
inject.BackupProducer
|
||||||
|
inject.ExportConsumer
|
||||||
|
inject.RestoreConsumer
|
||||||
|
|
||||||
|
VerifyAccess(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataProviderConnector interface {
|
||||||
|
// ConnectDataProvider initializes configurations
|
||||||
|
// and establishes the client connection with the
|
||||||
|
// data provider for this operation.
|
||||||
|
ConnectDataProvider(
|
||||||
|
ctx context.Context,
|
||||||
|
pst path.ServiceType,
|
||||||
|
) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *repository) ConnectDataProvider(
|
||||||
|
ctx context.Context,
|
||||||
|
pst path.ServiceType,
|
||||||
|
) error {
|
||||||
|
var (
|
||||||
|
provider DataProvider
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
switch r.Account.Provider {
|
||||||
|
case account.ProviderM365:
|
||||||
|
provider, err = connectToM365(ctx, *r, pst)
|
||||||
|
default:
|
||||||
|
err = clues.New("unrecognized provider").WithClues(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return clues.Wrap(err, "connecting data provider")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := provider.VerifyAccess(ctx); err != nil {
|
||||||
|
return clues.Wrap(err, fmt.Sprintf("verifying %s account connection", r.Account.Provider))
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Provider = provider
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func connectToM365(
|
||||||
|
ctx context.Context,
|
||||||
|
r repository,
|
||||||
|
pst path.ServiceType,
|
||||||
|
) (*m365.Controller, error) {
|
||||||
|
if r.Provider != nil {
|
||||||
|
ctrl, ok := r.Provider.(*m365.Controller)
|
||||||
|
if !ok {
|
||||||
|
// if the provider is initialized to a non-m365 controller, we should not
|
||||||
|
// attempt to connnect to m365 afterward.
|
||||||
|
return nil, clues.New("Attempted to connect to multiple data providers")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365")
|
||||||
|
defer close(progressBar)
|
||||||
|
|
||||||
|
ctrl, err := m365.NewController(ctx, r.Account, pst, r.Opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, clues.Wrap(err, "creating m365 client controller")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl, nil
|
||||||
|
}
|
||||||
40
src/pkg/repository/exports.go
Normal file
40
src/pkg/repository/exports.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Exporter interface {
|
||||||
|
NewExport(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
sel selectors.Selector,
|
||||||
|
exportCfg control.ExportConfig,
|
||||||
|
) (operations.ExportOperation, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExport generates a exportOperation runner.
|
||||||
|
func (r repository) NewExport(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
sel selectors.Selector,
|
||||||
|
exportCfg control.ExportConfig,
|
||||||
|
) (operations.ExportOperation, error) {
|
||||||
|
return operations.NewExportOperation(
|
||||||
|
ctx,
|
||||||
|
r.Opts,
|
||||||
|
r.dataLayer,
|
||||||
|
store.NewWrapper(r.modelStore),
|
||||||
|
r.Provider,
|
||||||
|
r.Account,
|
||||||
|
model.StableID(backupID),
|
||||||
|
sel,
|
||||||
|
exportCfg,
|
||||||
|
r.Bus)
|
||||||
|
}
|
||||||
@ -21,7 +21,6 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
|
||||||
ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata"
|
ctrlTD "github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -111,7 +110,7 @@ func initM365Repo(t *testing.T) (
|
|||||||
repository.NewRepoID)
|
repository.NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, repository.InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
return ctx, r, ac, st
|
return ctx, r, ac, st
|
||||||
|
|||||||
@ -6,31 +6,20 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/kopia/kopia/repo/manifest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/crash"
|
"github.com/alcionai/corso/src/internal/common/crash"
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/events"
|
"github.com/alcionai/corso/src/internal/events"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
"github.com/alcionai/corso/src/internal/m365"
|
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/drive/metadata"
|
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/internal/operations"
|
"github.com/alcionai/corso/src/internal/operations"
|
||||||
"github.com/alcionai/corso/src/internal/streamstore"
|
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/count"
|
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
"github.com/alcionai/corso/src/pkg/store"
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
)
|
)
|
||||||
@ -42,48 +31,24 @@ var (
|
|||||||
ErrorBackupNotFound = clues.New("no backup exists with that id")
|
ErrorBackupNotFound = clues.New("no backup exists with that id")
|
||||||
)
|
)
|
||||||
|
|
||||||
// BackupGetter deals with retrieving metadata about backups from the
|
|
||||||
// repository.
|
|
||||||
type BackupGetter interface {
|
|
||||||
Backup(ctx context.Context, id string) (*backup.Backup, error)
|
|
||||||
Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus)
|
|
||||||
BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error)
|
|
||||||
GetBackupDetails(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
) (*details.Details, *backup.Backup, *fault.Bus)
|
|
||||||
GetBackupErrors(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
) (*fault.Errors, *backup.Backup, *fault.Bus)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Repositoryer interface {
|
type Repositoryer interface {
|
||||||
Initialize(ctx context.Context, retentionOpts ctrlRepo.Retention) error
|
Backuper
|
||||||
Connect(ctx context.Context) error
|
BackupGetter
|
||||||
|
Restorer
|
||||||
|
Exporter
|
||||||
|
DataProviderConnector
|
||||||
|
|
||||||
|
Initialize(
|
||||||
|
ctx context.Context,
|
||||||
|
cfg InitConfig,
|
||||||
|
) error
|
||||||
|
Connect(
|
||||||
|
ctx context.Context,
|
||||||
|
cfg ConnConfig,
|
||||||
|
) error
|
||||||
GetID() string
|
GetID() string
|
||||||
Close(context.Context) error
|
Close(context.Context) error
|
||||||
NewBackup(
|
|
||||||
ctx context.Context,
|
|
||||||
self selectors.Selector,
|
|
||||||
) (operations.BackupOperation, error)
|
|
||||||
NewBackupWithLookup(
|
|
||||||
ctx context.Context,
|
|
||||||
self selectors.Selector,
|
|
||||||
ins idname.Cacher,
|
|
||||||
) (operations.BackupOperation, error)
|
|
||||||
NewRestore(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
sel selectors.Selector,
|
|
||||||
restoreCfg control.RestoreConfig,
|
|
||||||
) (operations.RestoreOperation, error)
|
|
||||||
NewExport(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
sel selectors.Selector,
|
|
||||||
exportCfg control.ExportConfig,
|
|
||||||
) (operations.ExportOperation, error)
|
|
||||||
NewMaintenance(
|
NewMaintenance(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
mOpts ctrlRepo.Maintenance,
|
mOpts ctrlRepo.Maintenance,
|
||||||
@ -92,14 +57,6 @@ type Repositoryer interface {
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
rcOpts ctrlRepo.Retention,
|
rcOpts ctrlRepo.Retention,
|
||||||
) (operations.RetentionConfigOperation, error)
|
) (operations.RetentionConfigOperation, error)
|
||||||
DeleteBackups(ctx context.Context, failOnMissing bool, ids ...string) error
|
|
||||||
BackupGetter
|
|
||||||
// ConnectToM365 establishes graph api connections
|
|
||||||
// and initializes api client configurations.
|
|
||||||
ConnectToM365(
|
|
||||||
ctx context.Context,
|
|
||||||
pst path.ServiceType,
|
|
||||||
) (*m365.Controller, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Repository contains storage provider information.
|
// Repository contains storage provider information.
|
||||||
@ -108,9 +65,10 @@ type repository struct {
|
|||||||
CreatedAt time.Time
|
CreatedAt time.Time
|
||||||
Version string // in case of future breaking changes
|
Version string // in case of future breaking changes
|
||||||
|
|
||||||
Account account.Account // the user's m365 account connection details
|
Account account.Account // the user's m365 account connection details
|
||||||
Storage storage.Storage // the storage provider details and configuration
|
Storage storage.Storage // the storage provider details and configuration
|
||||||
Opts control.Options
|
Opts control.Options
|
||||||
|
Provider DataProvider // the client controller used for external user data CRUD
|
||||||
|
|
||||||
Bus events.Eventer
|
Bus events.Eventer
|
||||||
dataLayer *kopia.Wrapper
|
dataLayer *kopia.Wrapper
|
||||||
@ -125,7 +83,7 @@ func (r repository) GetID() string {
|
|||||||
func New(
|
func New(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
acct account.Account,
|
acct account.Account,
|
||||||
s storage.Storage,
|
st storage.Storage,
|
||||||
opts control.Options,
|
opts control.Options,
|
||||||
configFileRepoID string,
|
configFileRepoID string,
|
||||||
) (singleRepo *repository, err error) {
|
) (singleRepo *repository, err error) {
|
||||||
@ -133,16 +91,16 @@ func New(
|
|||||||
ctx,
|
ctx,
|
||||||
"acct_provider", acct.Provider.String(),
|
"acct_provider", acct.Provider.String(),
|
||||||
"acct_id", clues.Hide(acct.ID()),
|
"acct_id", clues.Hide(acct.ID()),
|
||||||
"storage_provider", s.Provider.String())
|
"storage_provider", st.Provider.String())
|
||||||
|
|
||||||
bus, err := events.NewBus(ctx, s, acct.ID(), opts)
|
bus, err := events.NewBus(ctx, st, acct.ID(), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "constructing event bus").WithClues(ctx)
|
return nil, clues.Wrap(err, "constructing event bus").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
repoID := configFileRepoID
|
repoID := configFileRepoID
|
||||||
if len(configFileRepoID) == 0 {
|
if len(configFileRepoID) == 0 {
|
||||||
repoID = newRepoID(s)
|
repoID = newRepoID(st)
|
||||||
}
|
}
|
||||||
|
|
||||||
bus.SetRepoID(repoID)
|
bus.SetRepoID(repoID)
|
||||||
@ -151,7 +109,7 @@ func New(
|
|||||||
ID: repoID,
|
ID: repoID,
|
||||||
Version: "v1",
|
Version: "v1",
|
||||||
Account: acct,
|
Account: acct,
|
||||||
Storage: s,
|
Storage: st,
|
||||||
Bus: bus,
|
Bus: bus,
|
||||||
Opts: opts,
|
Opts: opts,
|
||||||
}
|
}
|
||||||
@ -163,17 +121,22 @@ func New(
|
|||||||
return &r, nil
|
return &r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type InitConfig struct {
|
||||||
|
// tells the data provider which service to
|
||||||
|
// use for its connection pattern. Optional.
|
||||||
|
Service path.ServiceType
|
||||||
|
RetentionOpts ctrlRepo.Retention
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize will:
|
// Initialize will:
|
||||||
// - validate the m365 account & secrets
|
|
||||||
// - connect to the m365 account to ensure communication capability
|
// - connect to the m365 account to ensure communication capability
|
||||||
// - validate the provider config & secrets
|
|
||||||
// - initialize the kopia repo with the provider and retention parameters
|
// - initialize the kopia repo with the provider and retention parameters
|
||||||
// - update maintenance retention parameters as needed
|
// - update maintenance retention parameters as needed
|
||||||
// - store the configuration details
|
// - store the configuration details
|
||||||
// - connect to the provider
|
// - connect to the provider
|
||||||
func (r *repository) Initialize(
|
func (r *repository) Initialize(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
retentionOpts ctrlRepo.Retention,
|
cfg InitConfig,
|
||||||
) (err error) {
|
) (err error) {
|
||||||
ctx = clues.Add(
|
ctx = clues.Add(
|
||||||
ctx,
|
ctx,
|
||||||
@ -187,10 +150,14 @@ func (r *repository) Initialize(
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil {
|
||||||
|
return clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
observe.Message(ctx, "Initializing repository")
|
observe.Message(ctx, "Initializing repository")
|
||||||
|
|
||||||
kopiaRef := kopia.NewConn(r.Storage)
|
kopiaRef := kopia.NewConn(r.Storage)
|
||||||
if err := kopiaRef.Initialize(ctx, r.Opts.Repo, retentionOpts); err != nil {
|
if err := kopiaRef.Initialize(ctx, r.Opts.Repo, cfg.RetentionOpts); err != nil {
|
||||||
// replace common internal errors so that sdk users can check results with errors.Is()
|
// replace common internal errors so that sdk users can check results with errors.Is()
|
||||||
if errors.Is(err, kopia.ErrorRepoAlreadyExists) {
|
if errors.Is(err, kopia.ErrorRepoAlreadyExists) {
|
||||||
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
return clues.Stack(ErrorRepoAlreadyExists, err).WithClues(ctx)
|
||||||
@ -221,12 +188,21 @@ func (r *repository) Initialize(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ConnConfig struct {
|
||||||
|
// tells the data provider which service to
|
||||||
|
// use for its connection pattern. Leave empty
|
||||||
|
// to skip the provider connection.
|
||||||
|
Service path.ServiceType
|
||||||
|
}
|
||||||
|
|
||||||
// Connect will:
|
// Connect will:
|
||||||
// - validate the m365 account details
|
// - connect to the m365 account
|
||||||
// - connect to the m365 account to ensure communication capability
|
|
||||||
// - connect to the provider storage
|
// - connect to the provider storage
|
||||||
// - return the connected repository
|
// - return the connected repository
|
||||||
func (r *repository) Connect(ctx context.Context) (err error) {
|
func (r *repository) Connect(
|
||||||
|
ctx context.Context,
|
||||||
|
cfg ConnConfig,
|
||||||
|
) (err error) {
|
||||||
ctx = clues.Add(
|
ctx = clues.Add(
|
||||||
ctx,
|
ctx,
|
||||||
"acct_provider", r.Account.Provider.String(),
|
"acct_provider", r.Account.Provider.String(),
|
||||||
@ -239,6 +215,10 @@ func (r *repository) Connect(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err := r.ConnectDataProvider(ctx, cfg.Service); err != nil {
|
||||||
|
return clues.Stack(err)
|
||||||
|
}
|
||||||
|
|
||||||
observe.Message(ctx, "Connecting to repository")
|
observe.Message(ctx, "Connecting to repository")
|
||||||
|
|
||||||
kopiaRef := kopia.NewConn(r.Storage)
|
kopiaRef := kopia.NewConn(r.Storage)
|
||||||
@ -297,14 +277,13 @@ func (r *repository) UpdatePassword(ctx context.Context, password string) (err e
|
|||||||
return clues.Wrap(err, "connecting kopia client")
|
return clues.Wrap(err, "connecting kopia client")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := kopiaRef.UpdatePassword(ctx, password, r.Opts.Repo); err != nil {
|
err = kopiaRef.UpdatePassword(ctx, password, r.Opts.Repo)
|
||||||
|
if err != nil {
|
||||||
return clues.Wrap(err, "updating on kopia")
|
return clues.Wrap(err, "updating on kopia")
|
||||||
}
|
}
|
||||||
|
|
||||||
defer kopiaRef.Close(ctx)
|
defer kopiaRef.Close(ctx)
|
||||||
|
|
||||||
r.Bus.Event(ctx, events.RepoUpdate, nil)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,98 +311,6 @@ func (r *repository) Close(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBackup generates a BackupOperation runner.
|
|
||||||
func (r repository) NewBackup(
|
|
||||||
ctx context.Context,
|
|
||||||
sel selectors.Selector,
|
|
||||||
) (operations.BackupOperation, error) {
|
|
||||||
return r.NewBackupWithLookup(ctx, sel, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBackupWithLookup generates a BackupOperation runner.
|
|
||||||
// ownerIDToName and ownerNameToID are optional populations, in case the caller has
|
|
||||||
// already generated those values.
|
|
||||||
func (r repository) NewBackupWithLookup(
|
|
||||||
ctx context.Context,
|
|
||||||
sel selectors.Selector,
|
|
||||||
ins idname.Cacher,
|
|
||||||
) (operations.BackupOperation, error) {
|
|
||||||
ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts)
|
|
||||||
if err != nil {
|
|
||||||
return operations.BackupOperation{}, clues.Wrap(err, "connecting to m365")
|
|
||||||
}
|
|
||||||
|
|
||||||
ownerID, ownerName, err := ctrl.PopulateProtectedResourceIDAndName(ctx, sel.DiscreteOwner, ins)
|
|
||||||
if err != nil {
|
|
||||||
return operations.BackupOperation{}, clues.Wrap(err, "resolving resource owner details")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: retrieve display name from gc
|
|
||||||
sel = sel.SetDiscreteOwnerIDName(ownerID, ownerName)
|
|
||||||
|
|
||||||
return operations.NewBackupOperation(
|
|
||||||
ctx,
|
|
||||||
r.Opts,
|
|
||||||
r.dataLayer,
|
|
||||||
store.NewWrapper(r.modelStore),
|
|
||||||
ctrl,
|
|
||||||
r.Account,
|
|
||||||
sel,
|
|
||||||
sel, // the selector acts as an IDNamer for its discrete resource owner.
|
|
||||||
r.Bus)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewExport generates a exportOperation runner.
|
|
||||||
func (r repository) NewExport(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
sel selectors.Selector,
|
|
||||||
exportCfg control.ExportConfig,
|
|
||||||
) (operations.ExportOperation, error) {
|
|
||||||
ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts)
|
|
||||||
if err != nil {
|
|
||||||
return operations.ExportOperation{}, clues.Wrap(err, "connecting to m365")
|
|
||||||
}
|
|
||||||
|
|
||||||
return operations.NewExportOperation(
|
|
||||||
ctx,
|
|
||||||
r.Opts,
|
|
||||||
r.dataLayer,
|
|
||||||
store.NewWrapper(r.modelStore),
|
|
||||||
ctrl,
|
|
||||||
r.Account,
|
|
||||||
model.StableID(backupID),
|
|
||||||
sel,
|
|
||||||
exportCfg,
|
|
||||||
r.Bus)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRestore generates a restoreOperation runner.
|
|
||||||
func (r repository) NewRestore(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
sel selectors.Selector,
|
|
||||||
restoreCfg control.RestoreConfig,
|
|
||||||
) (operations.RestoreOperation, error) {
|
|
||||||
ctrl, err := connectToM365(ctx, sel.PathService(), r.Account, r.Opts)
|
|
||||||
if err != nil {
|
|
||||||
return operations.RestoreOperation{}, clues.Wrap(err, "connecting to m365")
|
|
||||||
}
|
|
||||||
|
|
||||||
return operations.NewRestoreOperation(
|
|
||||||
ctx,
|
|
||||||
r.Opts,
|
|
||||||
r.dataLayer,
|
|
||||||
store.NewWrapper(r.modelStore),
|
|
||||||
ctrl,
|
|
||||||
r.Account,
|
|
||||||
model.StableID(backupID),
|
|
||||||
sel,
|
|
||||||
restoreCfg,
|
|
||||||
r.Bus,
|
|
||||||
count.New())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r repository) NewMaintenance(
|
func (r repository) NewMaintenance(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
mOpts ctrlRepo.Maintenance,
|
mOpts ctrlRepo.Maintenance,
|
||||||
@ -449,280 +336,6 @@ func (r repository) NewRetentionConfig(
|
|||||||
r.Bus)
|
r.Bus)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backup retrieves a backup by id.
|
|
||||||
func (r repository) Backup(ctx context.Context, id string) (*backup.Backup, error) {
|
|
||||||
return getBackup(ctx, id, store.NewWrapper(r.modelStore))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBackup handles the processing for Backup.
|
|
||||||
func getBackup(
|
|
||||||
ctx context.Context,
|
|
||||||
id string,
|
|
||||||
sw store.BackupGetter,
|
|
||||||
) (*backup.Backup, error) {
|
|
||||||
b, err := sw.GetBackup(ctx, model.StableID(id))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errWrapper(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backups lists backups by ID. Returns as many backups as possible with
|
|
||||||
// errors for the backups it was unable to retrieve.
|
|
||||||
func (r repository) Backups(ctx context.Context, ids []string) ([]*backup.Backup, *fault.Bus) {
|
|
||||||
var (
|
|
||||||
bups []*backup.Backup
|
|
||||||
errs = fault.New(false)
|
|
||||||
sw = store.NewWrapper(r.modelStore)
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, id := range ids {
|
|
||||||
ictx := clues.Add(ctx, "backup_id", id)
|
|
||||||
|
|
||||||
b, err := sw.GetBackup(ictx, model.StableID(id))
|
|
||||||
if err != nil {
|
|
||||||
errs.AddRecoverable(ctx, errWrapper(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
bups = append(bups, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return bups, errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// BackupsByTag lists all backups in a repository that contain all the tags
|
|
||||||
// specified.
|
|
||||||
func (r repository) BackupsByTag(ctx context.Context, fs ...store.FilterOption) ([]*backup.Backup, error) {
|
|
||||||
sw := store.NewWrapper(r.modelStore)
|
|
||||||
return backupsByTag(ctx, sw, fs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// backupsByTag returns all backups matching all provided tags.
|
|
||||||
//
|
|
||||||
// TODO(ashmrtn): This exists mostly for testing, but we could restructure the
|
|
||||||
// code in this file so there's a more elegant mocking solution.
|
|
||||||
func backupsByTag(
|
|
||||||
ctx context.Context,
|
|
||||||
sw store.BackupWrapper,
|
|
||||||
fs []store.FilterOption,
|
|
||||||
) ([]*backup.Backup, error) {
|
|
||||||
bs, err := sw.GetBackups(ctx, fs...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.Stack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter out assist backup bases as they're considered incomplete and we
|
|
||||||
// haven't been displaying them before now.
|
|
||||||
res := make([]*backup.Backup, 0, len(bs))
|
|
||||||
|
|
||||||
for _, b := range bs {
|
|
||||||
if t := b.Tags[model.BackupTypeTag]; t != model.AssistBackup {
|
|
||||||
res = append(res, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BackupDetails returns the specified backup.Details
|
|
||||||
func (r repository) GetBackupDetails(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
) (*details.Details, *backup.Backup, *fault.Bus) {
|
|
||||||
errs := fault.New(false)
|
|
||||||
|
|
||||||
deets, bup, err := getBackupDetails(
|
|
||||||
ctx,
|
|
||||||
backupID,
|
|
||||||
r.Account.ID(),
|
|
||||||
r.dataLayer,
|
|
||||||
store.NewWrapper(r.modelStore),
|
|
||||||
errs)
|
|
||||||
|
|
||||||
return deets, bup, errs.Fail(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBackupDetails handles the processing for GetBackupDetails.
|
|
||||||
func getBackupDetails(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID, tenantID string,
|
|
||||||
kw *kopia.Wrapper,
|
|
||||||
sw store.BackupGetter,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) (*details.Details, *backup.Backup, error) {
|
|
||||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errWrapper(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ssid := b.StreamStoreID
|
|
||||||
if len(ssid) == 0 {
|
|
||||||
ssid = b.DetailsID
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ssid) == 0 {
|
|
||||||
return nil, b, clues.New("no streamstore id in backup").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
|
||||||
deets details.Details
|
|
||||||
)
|
|
||||||
|
|
||||||
err = sstore.Read(
|
|
||||||
ctx,
|
|
||||||
ssid,
|
|
||||||
streamstore.DetailsReader(details.UnmarshalTo(&deets)),
|
|
||||||
errs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retroactively fill in isMeta information for items in older
|
|
||||||
// backup versions without that info
|
|
||||||
// version.Restore2 introduces the IsMeta flag, so only v1 needs a check.
|
|
||||||
if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker {
|
|
||||||
for _, d := range deets.Entries {
|
|
||||||
if d.OneDrive != nil {
|
|
||||||
d.OneDrive.IsMeta = metadata.HasMetaSuffix(d.RepoRef)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
deets.DetailsModel = deets.FilterMetaFiles()
|
|
||||||
|
|
||||||
return &deets, b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BackupErrors returns the specified backup's fault.Errors
|
|
||||||
func (r repository) GetBackupErrors(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID string,
|
|
||||||
) (*fault.Errors, *backup.Backup, *fault.Bus) {
|
|
||||||
errs := fault.New(false)
|
|
||||||
|
|
||||||
fe, bup, err := getBackupErrors(
|
|
||||||
ctx,
|
|
||||||
backupID,
|
|
||||||
r.Account.ID(),
|
|
||||||
r.dataLayer,
|
|
||||||
store.NewWrapper(r.modelStore),
|
|
||||||
errs)
|
|
||||||
|
|
||||||
return fe, bup, errs.Fail(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBackupErrors handles the processing for GetBackupErrors.
|
|
||||||
func getBackupErrors(
|
|
||||||
ctx context.Context,
|
|
||||||
backupID, tenantID string,
|
|
||||||
kw *kopia.Wrapper,
|
|
||||||
sw store.BackupGetter,
|
|
||||||
errs *fault.Bus,
|
|
||||||
) (*fault.Errors, *backup.Backup, error) {
|
|
||||||
b, err := sw.GetBackup(ctx, model.StableID(backupID))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errWrapper(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ssid := b.StreamStoreID
|
|
||||||
if len(ssid) == 0 {
|
|
||||||
return nil, b, clues.New("missing streamstore id in backup").WithClues(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
sstore = streamstore.NewStreamer(kw, tenantID, b.Selector.PathService())
|
|
||||||
fe fault.Errors
|
|
||||||
)
|
|
||||||
|
|
||||||
err = sstore.Read(
|
|
||||||
ctx,
|
|
||||||
ssid,
|
|
||||||
streamstore.FaultErrorsReader(fault.UnmarshalErrorsTo(&fe)),
|
|
||||||
errs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &fe, b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteBackups removes the backups from both the model store and the backup
|
|
||||||
// storage.
|
|
||||||
//
|
|
||||||
// If failOnMissing is true then returns an error if a backup model can't be
|
|
||||||
// found. Otherwise ignores missing backup models.
|
|
||||||
//
|
|
||||||
// Missing models or snapshots during the actual deletion do not cause errors.
|
|
||||||
//
|
|
||||||
// All backups are delete as an atomic unit so any failures will result in no
|
|
||||||
// deletions.
|
|
||||||
func (r repository) DeleteBackups(
|
|
||||||
ctx context.Context,
|
|
||||||
failOnMissing bool,
|
|
||||||
ids ...string,
|
|
||||||
) error {
|
|
||||||
return deleteBackups(ctx, store.NewWrapper(r.modelStore), failOnMissing, ids...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteBackup handles the processing for backup deletion.
|
|
||||||
func deleteBackups(
|
|
||||||
ctx context.Context,
|
|
||||||
sw store.BackupGetterModelDeleter,
|
|
||||||
failOnMissing bool,
|
|
||||||
ids ...string,
|
|
||||||
) error {
|
|
||||||
// Although we haven't explicitly stated it, snapshots are technically
|
|
||||||
// manifests in kopia. This means we can use the same delete API to remove
|
|
||||||
// them and backup models. Deleting all of them together gives us both
|
|
||||||
// atomicity guarantees (around when data will be flushed) and helps reduce
|
|
||||||
// the number of manifest blobs that kopia will create.
|
|
||||||
var toDelete []manifest.ID
|
|
||||||
|
|
||||||
for _, id := range ids {
|
|
||||||
b, err := sw.GetBackup(ctx, model.StableID(id))
|
|
||||||
if err != nil {
|
|
||||||
if !failOnMissing && errors.Is(err, data.ErrNotFound) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return clues.Stack(errWrapper(err)).
|
|
||||||
WithClues(ctx).
|
|
||||||
With("delete_backup_id", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
toDelete = append(toDelete, b.ModelStoreID)
|
|
||||||
|
|
||||||
if len(b.SnapshotID) > 0 {
|
|
||||||
toDelete = append(toDelete, manifest.ID(b.SnapshotID))
|
|
||||||
}
|
|
||||||
|
|
||||||
ssid := b.StreamStoreID
|
|
||||||
if len(ssid) == 0 {
|
|
||||||
ssid = b.DetailsID
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ssid) > 0 {
|
|
||||||
toDelete = append(toDelete, manifest.ID(ssid))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sw.DeleteWithModelStoreIDs(ctx, toDelete...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r repository) ConnectToM365(
|
|
||||||
ctx context.Context,
|
|
||||||
pst path.ServiceType,
|
|
||||||
) (*m365.Controller, error) {
|
|
||||||
ctrl, err := connectToM365(ctx, pst, r.Account, r.Opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, clues.Wrap(err, "connecting to m365")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Repository ID Model
|
// Repository ID Model
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -771,29 +384,6 @@ func newRepoID(s storage.Storage) string {
|
|||||||
// helpers
|
// helpers
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
var m365nonce bool
|
|
||||||
|
|
||||||
func connectToM365(
|
|
||||||
ctx context.Context,
|
|
||||||
pst path.ServiceType,
|
|
||||||
acct account.Account,
|
|
||||||
co control.Options,
|
|
||||||
) (*m365.Controller, error) {
|
|
||||||
if !m365nonce {
|
|
||||||
m365nonce = true
|
|
||||||
|
|
||||||
progressBar := observe.MessageWithCompletion(ctx, "Connecting to M365")
|
|
||||||
defer close(progressBar)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctrl, err := m365.NewController(ctx, acct, pst, co)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func errWrapper(err error) error {
|
func errWrapper(err error) error {
|
||||||
if errors.Is(err, data.ErrNotFound) {
|
if errors.Is(err, data.ErrNotFound) {
|
||||||
return clues.Stack(ErrorBackupNotFound, err)
|
return clues.Stack(ErrorBackupNotFound, err)
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import (
|
|||||||
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
ctrlRepo "github.com/alcionai/corso/src/pkg/control/repository"
|
||||||
"github.com/alcionai/corso/src/pkg/control/testdata"
|
"github.com/alcionai/corso/src/pkg/control/testdata"
|
||||||
"github.com/alcionai/corso/src/pkg/extensions"
|
"github.com/alcionai/corso/src/pkg/extensions"
|
||||||
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
"github.com/alcionai/corso/src/pkg/storage"
|
"github.com/alcionai/corso/src/pkg/storage"
|
||||||
@ -69,7 +70,7 @@ func (suite *RepositoryUnitSuite) TestInitialize() {
|
|||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
test.errCheck(t, err, clues.ToCore(err))
|
test.errCheck(t, err, clues.ToCore(err))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -85,12 +86,12 @@ func (suite *RepositoryUnitSuite) TestConnect() {
|
|||||||
errCheck assert.ErrorAssertionFunc
|
errCheck assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
storage.ProviderUnknown.String(),
|
name: storage.ProviderUnknown.String(),
|
||||||
func() (storage.Storage, error) {
|
storage: func() (storage.Storage, error) {
|
||||||
return storage.NewStorage(storage.ProviderUnknown)
|
return storage.NewStorage(storage.ProviderUnknown)
|
||||||
},
|
},
|
||||||
account.Account{},
|
account: account.Account{},
|
||||||
assert.Error,
|
errCheck: assert.Error,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
@ -111,7 +112,7 @@ func (suite *RepositoryUnitSuite) TestConnect() {
|
|||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Connect(ctx)
|
err = r.Connect(ctx, ConnConfig{})
|
||||||
test.errCheck(t, err, clues.ToCore(err))
|
test.errCheck(t, err, clues.ToCore(err))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -136,12 +137,13 @@ func TestRepositoryIntegrationSuite(t *testing.T) {
|
|||||||
func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
account account.Account
|
account func(*testing.T) account.Account
|
||||||
storage func(tester.TestT) storage.Storage
|
storage func(tester.TestT) storage.Storage
|
||||||
errCheck assert.ErrorAssertionFunc
|
errCheck assert.ErrorAssertionFunc
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "success",
|
name: "success",
|
||||||
|
account: tconfig.NewM365Account,
|
||||||
storage: storeTD.NewPrefixedS3Storage,
|
storage: storeTD.NewPrefixedS3Storage,
|
||||||
errCheck: assert.NoError,
|
errCheck: assert.NoError,
|
||||||
},
|
},
|
||||||
@ -156,13 +158,13 @@ func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
|||||||
st := test.storage(t)
|
st := test.storage(t)
|
||||||
r, err := New(
|
r, err := New(
|
||||||
ctx,
|
ctx,
|
||||||
test.account,
|
test.account(t),
|
||||||
st,
|
st,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
defer func() {
|
defer func() {
|
||||||
err := r.Close(ctx)
|
err := r.Close(ctx)
|
||||||
@ -204,7 +206,7 @@ func (suite *RepositoryIntegrationSuite) TestInitializeWithRole() {
|
|||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -218,21 +220,23 @@ func (suite *RepositoryIntegrationSuite) TestConnect() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
acct := tconfig.NewM365Account(t)
|
||||||
|
|
||||||
// need to initialize the repository before we can test connecting to it.
|
// need to initialize the repository before we can test connecting to it.
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewPrefixedS3Storage(t)
|
||||||
r, err := New(
|
r, err := New(
|
||||||
ctx,
|
ctx,
|
||||||
account.Account{},
|
acct,
|
||||||
st,
|
st,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// now re-connect
|
// now re-connect
|
||||||
err = r.Connect(ctx)
|
err = r.Connect(ctx, ConnConfig{})
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,29 +246,36 @@ func (suite *RepositoryIntegrationSuite) TestRepository_UpdatePassword() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
acct := tconfig.NewM365Account(t)
|
||||||
|
|
||||||
// need to initialize the repository before we can test connecting to it.
|
// need to initialize the repository before we can test connecting to it.
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewPrefixedS3Storage(t)
|
||||||
r, err := New(
|
r, err := New(
|
||||||
ctx,
|
ctx,
|
||||||
account.Account{},
|
acct,
|
||||||
st,
|
st,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// now re-connect
|
// now re-connect
|
||||||
err = r.Connect(ctx)
|
err = r.Connect(ctx, ConnConfig{})
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.UpdatePassword(ctx, "newpass")
|
err = r.UpdatePassword(ctx, "newpass")
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
tmp := st.Config["common_corsoPassphrase"]
|
||||||
|
st.Config["common_corsoPassphrase"] = "newpass"
|
||||||
|
|
||||||
// now reconnect with new pass
|
// now reconnect with new pass
|
||||||
err = r.Connect(ctx)
|
err = r.Connect(ctx, ConnConfig{})
|
||||||
assert.Error(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
st.Config["common_corsoPassphrase"] = tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
||||||
@ -273,17 +284,19 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
|||||||
ctx, flush := tester.NewContext(t)
|
ctx, flush := tester.NewContext(t)
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
acct := tconfig.NewM365Account(t)
|
||||||
|
|
||||||
// need to initialize the repository before we can test connecting to it.
|
// need to initialize the repository before we can test connecting to it.
|
||||||
st := storeTD.NewPrefixedS3Storage(t)
|
st := storeTD.NewPrefixedS3Storage(t)
|
||||||
r, err := New(
|
r, err := New(
|
||||||
ctx,
|
ctx,
|
||||||
account.Account{},
|
acct,
|
||||||
st,
|
st,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
oldID := r.GetID()
|
oldID := r.GetID()
|
||||||
@ -292,7 +305,7 @@ func (suite *RepositoryIntegrationSuite) TestConnect_sameID() {
|
|||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
// now re-connect
|
// now re-connect
|
||||||
err = r.Connect(ctx)
|
err = r.Connect(ctx, ConnConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.Equal(t, oldID, r.GetID())
|
assert.Equal(t, oldID, r.GetID())
|
||||||
}
|
}
|
||||||
@ -315,7 +328,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackup() {
|
|||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
// service doesn't matter here, we just need a valid value.
|
||||||
|
err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
userID := tconfig.M365UserID(t)
|
userID := tconfig.M365UserID(t)
|
||||||
@ -344,7 +358,7 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() {
|
|||||||
"")
|
"")
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
ro, err := r.NewRestore(
|
ro, err := r.NewRestore(
|
||||||
@ -374,7 +388,8 @@ func (suite *RepositoryIntegrationSuite) TestNewBackupAndDelete() {
|
|||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
// service doesn't matter here, we just need a valid value.
|
||||||
|
err = r.Initialize(ctx, InitConfig{Service: path.ExchangeService})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
userID := tconfig.M365UserID(t)
|
userID := tconfig.M365UserID(t)
|
||||||
@ -427,7 +442,7 @@ func (suite *RepositoryIntegrationSuite) TestNewMaintenance() {
|
|||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
mo, err := r.NewMaintenance(ctx, ctrlRepo.Maintenance{})
|
mo, err := r.NewMaintenance(ctx, ctrlRepo.Maintenance{})
|
||||||
@ -496,11 +511,11 @@ func (suite *RepositoryIntegrationSuite) Test_Options() {
|
|||||||
NewRepoID)
|
NewRepoID)
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
err = r.Initialize(ctx, ctrlRepo.Retention{})
|
err = r.Initialize(ctx, InitConfig{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory))
|
assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory))
|
||||||
|
|
||||||
err = r.Connect(ctx)
|
err = r.Connect(ctx, ConnConfig{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory))
|
assert.Equal(t, test.expectedLen, len(r.Opts.ItemExtensionFactory))
|
||||||
})
|
})
|
||||||
|
|||||||
42
src/pkg/repository/restores.go
Normal file
42
src/pkg/repository/restores.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations"
|
||||||
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Restorer interface {
|
||||||
|
NewRestore(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
sel selectors.Selector,
|
||||||
|
restoreCfg control.RestoreConfig,
|
||||||
|
) (operations.RestoreOperation, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRestore generates a restoreOperation runner.
|
||||||
|
func (r repository) NewRestore(
|
||||||
|
ctx context.Context,
|
||||||
|
backupID string,
|
||||||
|
sel selectors.Selector,
|
||||||
|
restoreCfg control.RestoreConfig,
|
||||||
|
) (operations.RestoreOperation, error) {
|
||||||
|
return operations.NewRestoreOperation(
|
||||||
|
ctx,
|
||||||
|
r.Opts,
|
||||||
|
r.dataLayer,
|
||||||
|
store.NewWrapper(r.modelStore),
|
||||||
|
r.Provider,
|
||||||
|
r.Account,
|
||||||
|
model.StableID(backupID),
|
||||||
|
sel,
|
||||||
|
restoreCfg,
|
||||||
|
r.Bus,
|
||||||
|
count.New())
|
||||||
|
}
|
||||||
@ -697,7 +697,7 @@ func (s ExchangeScope) IncludesCategory(cat exchangeCategory) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s ExchangeScope) IsAny(cat exchangeCategory) bool {
|
func (s ExchangeScope) IsAny(cat exchangeCategory) bool {
|
||||||
return IsAnyTarget(s, cat)
|
return isAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
@ -699,7 +699,7 @@ func (s GroupsScope) IncludesCategory(cat groupsCategory) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s GroupsScope) IsAny(cat groupsCategory) bool {
|
func (s GroupsScope) IsAny(cat groupsCategory) bool {
|
||||||
return IsAnyTarget(s, cat)
|
return isAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
@ -484,7 +484,7 @@ func (s OneDriveScope) Matches(cat oneDriveCategory, target string) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s OneDriveScope) IsAny(cat oneDriveCategory) bool {
|
func (s OneDriveScope) IsAny(cat oneDriveCategory) bool {
|
||||||
return IsAnyTarget(s, cat)
|
return isAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
@ -694,7 +694,7 @@ func matchesPathValues[T scopeT, C categoryT](
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if IsAnyTarget(sc, cc) {
|
if isAnyTarget(sc, cc) {
|
||||||
// continue, not return: all path keys must match the entry to succeed
|
// continue, not return: all path keys must match the entry to succeed
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -795,7 +795,7 @@ func isNoneTarget[T scopeT, C categoryT](s T, cat C) bool {
|
|||||||
|
|
||||||
// returns true if the category is included in the scope's category type,
|
// returns true if the category is included in the scope's category type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func IsAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
func isAnyTarget[T scopeT, C categoryT](s T, cat C) bool {
|
||||||
if !typeAndCategoryMatches(cat, s.categorizer()) {
|
if !typeAndCategoryMatches(cat, s.categorizer()) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@ -125,14 +125,14 @@ func (suite *SelectorScopesSuite) TestGetCatValue() {
|
|||||||
func (suite *SelectorScopesSuite) TestIsAnyTarget() {
|
func (suite *SelectorScopesSuite) TestIsAnyTarget() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
stub := stubScope("")
|
stub := stubScope("")
|
||||||
assert.True(t, IsAnyTarget(stub, rootCatStub))
|
assert.True(t, isAnyTarget(stub, rootCatStub))
|
||||||
assert.True(t, IsAnyTarget(stub, leafCatStub))
|
assert.True(t, isAnyTarget(stub, leafCatStub))
|
||||||
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
||||||
|
|
||||||
stub = stubScope("none")
|
stub = stubScope("none")
|
||||||
assert.False(t, IsAnyTarget(stub, rootCatStub))
|
assert.False(t, isAnyTarget(stub, rootCatStub))
|
||||||
assert.False(t, IsAnyTarget(stub, leafCatStub))
|
assert.False(t, isAnyTarget(stub, leafCatStub))
|
||||||
assert.False(t, IsAnyTarget(stub, mockCategorizer("smarf")))
|
assert.False(t, isAnyTarget(stub, mockCategorizer("smarf")))
|
||||||
}
|
}
|
||||||
|
|
||||||
var reduceTestTable = []struct {
|
var reduceTestTable = []struct {
|
||||||
|
|||||||
@ -625,7 +625,7 @@ func (s SharePointScope) IncludesCategory(cat sharePointCategory) bool {
|
|||||||
// returns true if the category is included in the scope's data type,
|
// returns true if the category is included in the scope's data type,
|
||||||
// and the value is set to Any().
|
// and the value is set to Any().
|
||||||
func (s SharePointScope) IsAny(cat sharePointCategory) bool {
|
func (s SharePointScope) IsAny(cat sharePointCategory) bool {
|
||||||
return IsAnyTarget(s, cat)
|
return isAnyTarget(s, cat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the data category in the scope. If the scope
|
// Get returns the data category in the scope. If the scope
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user