Dry-run support for Backup
This commit is contained in:
parent
fd3a4eb6ff
commit
2cf44ee649
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/cli/flags"
|
"github.com/alcionai/corso/src/cli/flags"
|
||||||
|
"github.com/alcionai/corso/src/cli/print"
|
||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
@ -214,6 +215,12 @@ func genericCreateCommand(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if bo.Options.DryRun {
|
||||||
|
// Print backup stats results here
|
||||||
|
print.All(ctx, bo.Results.Stats)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
bIDs = append(bIDs, string(bo.Results.BackupID))
|
bIDs = append(bIDs, string(bo.Results.BackupID))
|
||||||
|
|
||||||
if !DisplayJSONFormat() {
|
if !DisplayJSONFormat() {
|
||||||
@ -401,6 +408,7 @@ func printBackupStats(ctx context.Context, r repository.Repositoryer, bid string
|
|||||||
b, err := r.Backup(ctx, bid)
|
b, err := r.Backup(ctx, bid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.CtxErr(ctx, err).Error("finding backup immediately after backup operation completion")
|
logger.CtxErr(ctx, err).Error("finding backup immediately after backup operation completion")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
b.ToPrintable().Stats.Print(ctx)
|
b.ToPrintable().Stats.Print(ctx)
|
||||||
|
|||||||
@ -45,6 +45,7 @@ var (
|
|||||||
// well-known flag values
|
// well-known flag values
|
||||||
const (
|
const (
|
||||||
RunModeFlagTest = "flag-test"
|
RunModeFlagTest = "flag-test"
|
||||||
|
RunModeDryRun = "dry"
|
||||||
RunModeRun = "run"
|
RunModeRun = "run"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -28,6 +28,7 @@ func Control() control.Options {
|
|||||||
opt.ToggleFeatures.ExchangeImmutableIDs = flags.EnableImmutableIDFV
|
opt.ToggleFeatures.ExchangeImmutableIDs = flags.EnableImmutableIDFV
|
||||||
opt.ToggleFeatures.DisableConcurrencyLimiter = flags.DisableConcurrencyLimiterFV
|
opt.ToggleFeatures.DisableConcurrencyLimiter = flags.DisableConcurrencyLimiterFV
|
||||||
opt.Parallelism.ItemFetch = flags.FetchParallelismFV
|
opt.Parallelism.ItemFetch = flags.FetchParallelismFV
|
||||||
|
opt.DryRun = flags.RunModeFV == flags.RunModeDryRun
|
||||||
|
|
||||||
return opt
|
return opt
|
||||||
}
|
}
|
||||||
|
|||||||
@ -68,6 +68,11 @@ func GetAccountAndConnectWithOverrides(
|
|||||||
|
|
||||||
opts := ControlWithConfig(cfg)
|
opts := ControlWithConfig(cfg)
|
||||||
|
|
||||||
|
if opts.DryRun {
|
||||||
|
logger.CtxErr(ctx, err).Info("--dry-run is set")
|
||||||
|
opts.Repo.ReadOnly = true
|
||||||
|
}
|
||||||
|
|
||||||
r, err := repository.New(
|
r, err := repository.New(
|
||||||
ctx,
|
ctx,
|
||||||
cfg.Account,
|
cfg.Account,
|
||||||
|
|||||||
@ -1,5 +1,12 @@
|
|||||||
package data
|
package data
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/cli/print"
|
||||||
|
)
|
||||||
|
|
||||||
type CollectionStats struct {
|
type CollectionStats struct {
|
||||||
Folders,
|
Folders,
|
||||||
Objects,
|
Objects,
|
||||||
@ -15,3 +22,34 @@ func (cs CollectionStats) IsZero() bool {
|
|||||||
func (cs CollectionStats) String() string {
|
func (cs CollectionStats) String() string {
|
||||||
return cs.Details
|
return cs.Details
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// interface compliance checks
|
||||||
|
var _ print.Printable = &CollectionStats{}
|
||||||
|
|
||||||
|
// Print writes the Backup to StdOut, in the format requested by the caller.
|
||||||
|
func (cs CollectionStats) Print(ctx context.Context) {
|
||||||
|
print.Item(ctx, cs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinimumPrintable reduces the Backup to its minimally printable details.
|
||||||
|
func (cs CollectionStats) MinimumPrintable() any {
|
||||||
|
return cs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the human-readable names of properties in a Backup
|
||||||
|
// for printing out to a terminal in a columnar display.
|
||||||
|
func (cs CollectionStats) Headers() []string {
|
||||||
|
return []string{
|
||||||
|
"Folders",
|
||||||
|
"Objects",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns the values matching the Headers list for printing
|
||||||
|
// out to a terminal in a columnar display.
|
||||||
|
func (cs CollectionStats) Values() []string {
|
||||||
|
return []string{
|
||||||
|
strconv.Itoa(cs.Folders),
|
||||||
|
strconv.Itoa(cs.Objects),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -36,7 +36,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bpc inject.BackupProducerConfig,
|
bpc inject.BackupProducerConfig,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
) (inject.BackupProducerResults, error) {
|
||||||
service := bpc.Selector.PathService()
|
service := bpc.Selector.PathService()
|
||||||
|
|
||||||
ctx, end := diagnostics.Span(
|
ctx, end := diagnostics.Span(
|
||||||
@ -53,18 +53,19 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
|
|
||||||
err := verifyBackupInputs(bpc.Selector, ctrl.IDNameLookup.IDs())
|
err := verifyBackupInputs(bpc.Selector, ctrl.IDNameLookup.IDs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Stack(err).WithClues(ctx)
|
return inject.BackupProducerResults{}, clues.Stack(err).WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
colls []data.BackupCollection
|
colls []data.BackupCollection
|
||||||
ssmb *prefixmatcher.StringSetMatcher
|
ssmb *prefixmatcher.StringSetMatcher
|
||||||
canUsePreviousBackup bool
|
canUsePreviousBackup bool
|
||||||
|
results inject.BackupProducerResults
|
||||||
)
|
)
|
||||||
|
|
||||||
switch service {
|
switch service {
|
||||||
case path.ExchangeService:
|
case path.ExchangeService:
|
||||||
colls, ssmb, canUsePreviousBackup, err = exchange.ProduceBackupCollections(
|
results, err = exchange.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
@ -72,11 +73,11 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case path.OneDriveService:
|
case path.OneDriveService:
|
||||||
colls, ssmb, canUsePreviousBackup, err = onedrive.ProduceBackupCollections(
|
results, err = onedrive.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
@ -84,7 +85,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case path.SharePointService:
|
case path.SharePointService:
|
||||||
@ -96,8 +97,9 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
results = inject.BackupProducerResults{Collections: colls, Excludes: ssmb, CanUsePreviousBackup: canUsePreviousBackup}
|
||||||
|
|
||||||
case path.GroupsService:
|
case path.GroupsService:
|
||||||
colls, ssmb, err = groups.ProduceBackupCollections(
|
colls, ssmb, err = groups.ProduceBackupCollections(
|
||||||
@ -108,15 +110,15 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// canUsePreviousBacukp can be always returned true for groups as we
|
// canUsePreviousBacukp can be always returned true for groups as we
|
||||||
// return a tombstone collection in case the metadata read fails
|
// return a tombstone collection in case the metadata read fails
|
||||||
canUsePreviousBackup = true
|
canUsePreviousBackup = true
|
||||||
|
results = inject.BackupProducerResults{Collections: colls, Excludes: ssmb, CanUsePreviousBackup: canUsePreviousBackup}
|
||||||
default:
|
default:
|
||||||
return nil, nil, false, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
|
return inject.BackupProducerResults{}, clues.Wrap(clues.New(service.String()), "service not supported").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range colls {
|
for _, c := range colls {
|
||||||
@ -131,7 +133,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return colls, ssmb, canUsePreviousBackup, nil
|
return results, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ctrl *Controller) IsServiceEnabled(
|
func (ctrl *Controller) IsServiceEnabled(
|
||||||
|
|||||||
@ -248,14 +248,14 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
|||||||
ProtectedResource: test.getSelector(t),
|
ProtectedResource: test.getSelector(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
results, err := ctrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
assert.Error(t, err, clues.ToCore(err))
|
assert.Error(t, err, clues.ToCore(err))
|
||||||
assert.False(t, canUsePreviousBackup, "can use previous backup")
|
assert.False(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
assert.Empty(t, collections)
|
assert.Empty(t, results.Collections)
|
||||||
assert.Nil(t, excludes)
|
assert.Nil(t, results.Excludes)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -395,27 +395,27 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
|||||||
Selector: sel.Selector,
|
Selector: sel.Selector,
|
||||||
}
|
}
|
||||||
|
|
||||||
cols, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
results, err := ctrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
require.Len(t, cols, 2) // 1 collection, 1 path prefix directory to ensure the root path exists.
|
require.Len(t, results.Collections, 2) // 1 collection, 1 path prefix directory to ensure the root path exists.
|
||||||
// No excludes yet as this isn't an incremental backup.
|
// No excludes yet as this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, results.Excludes.Empty())
|
||||||
|
|
||||||
t.Logf("cols[0] Path: %s\n", cols[0].FullPath().String())
|
t.Logf("cols[0] Path: %s\n", results.Collections[0].FullPath().String())
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
path.SharePointMetadataService.String(),
|
path.SharePointMetadataService.String(),
|
||||||
cols[0].FullPath().Service().String())
|
results.Collections[0].FullPath().Service().String())
|
||||||
|
|
||||||
t.Logf("cols[1] Path: %s\n", cols[1].FullPath().String())
|
t.Logf("cols[1] Path: %s\n", results.Collections[1].FullPath().String())
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t,
|
t,
|
||||||
path.SharePointService.String(),
|
path.SharePointService.String(),
|
||||||
cols[1].FullPath().Service().String())
|
results.Collections[1].FullPath().Service().String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||||
@ -445,17 +445,17 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
|||||||
Selector: sel.Selector,
|
Selector: sel.Selector,
|
||||||
}
|
}
|
||||||
|
|
||||||
cols, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
results, err := ctrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
assert.Less(t, 0, len(cols))
|
assert.Less(t, 0, len(results.Collections))
|
||||||
// No excludes yet as this isn't an incremental backup.
|
// No excludes yet as this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, results.Excludes.Empty())
|
||||||
|
|
||||||
for _, collection := range cols {
|
for _, collection := range results.Collections {
|
||||||
t.Logf("Path: %s\n", collection.FullPath().String())
|
t.Logf("Path: %s\n", collection.FullPath().String())
|
||||||
|
|
||||||
for item := range collection.Items(ctx, fault.New(true)) {
|
for item := range collection.Items(ctx, fault.New(true)) {
|
||||||
@ -531,18 +531,18 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint()
|
|||||||
Selector: sel.Selector,
|
Selector: sel.Selector,
|
||||||
}
|
}
|
||||||
|
|
||||||
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
results, err := ctrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet as this isn't an incremental backup.
|
// No excludes yet as this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, results.Excludes.Empty())
|
||||||
|
|
||||||
// we don't know an exact count of drives this will produce,
|
// we don't know an exact count of drives this will produce,
|
||||||
// but it should be more than one.
|
// but it should be more than one.
|
||||||
assert.Greater(t, len(collections), 1)
|
assert.Greater(t, len(results.Collections), 1)
|
||||||
|
|
||||||
p, err := path.BuildMetadata(
|
p, err := path.BuildMetadata(
|
||||||
suite.tenantID,
|
suite.tenantID,
|
||||||
@ -557,7 +557,7 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint()
|
|||||||
|
|
||||||
foundSitesMetadata := false
|
foundSitesMetadata := false
|
||||||
|
|
||||||
for _, coll := range collections {
|
for _, coll := range results.Collections {
|
||||||
sitesMetadataCollection := coll.FullPath().String() == p.String()
|
sitesMetadataCollection := coll.FullPath().String() == p.String()
|
||||||
|
|
||||||
for object := range coll.Items(ctx, fault.New(true)) {
|
for object := range coll.Items(ctx, fault.New(true)) {
|
||||||
@ -631,18 +631,18 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_In
|
|||||||
MetadataCollections: mmc,
|
MetadataCollections: mmc,
|
||||||
}
|
}
|
||||||
|
|
||||||
collections, excludes, canUsePreviousBackup, err := ctrl.ProduceBackupCollections(
|
results, err := ctrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet as this isn't an incremental backup.
|
// No excludes yet as this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, results.Excludes.Empty())
|
||||||
|
|
||||||
// we don't know an exact count of drives this will produce,
|
// we don't know an exact count of drives this will produce,
|
||||||
// but it should be more than one.
|
// but it should be more than one.
|
||||||
assert.Greater(t, len(collections), 1)
|
assert.Greater(t, len(results.Collections), 1)
|
||||||
|
|
||||||
p, err := path.BuildMetadata(
|
p, err := path.BuildMetadata(
|
||||||
suite.tenantID,
|
suite.tenantID,
|
||||||
@ -668,7 +668,7 @@ func (suite *GroupsCollectionIntgSuite) TestCreateGroupsCollection_SharePoint_In
|
|||||||
sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId()))
|
sp, err = sp.Append(false, odConsts.SitesPathDir, ptr.Val(site.GetId()))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
for _, coll := range collections {
|
for _, coll := range results.Collections {
|
||||||
if coll.State() == data.DeletedState {
|
if coll.State() == data.DeletedState {
|
||||||
if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() {
|
if coll.PreviousPath() != nil && coll.PreviousPath().String() == sp.String() {
|
||||||
foundRootTombstone = true
|
foundRootTombstone = true
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import (
|
|||||||
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
odConsts "github.com/alcionai/corso/src/internal/m365/service/onedrive/consts"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
@ -226,10 +227,10 @@ func (c *Collections) Get(
|
|||||||
prevMetadata []data.RestoreCollection,
|
prevMetadata []data.RestoreCollection,
|
||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, inject.OneDriveStats, error) {
|
||||||
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, inject.OneDriveStats{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
||||||
@ -250,7 +251,7 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
drives, err := api.GetAllDrives(ctx, pager)
|
drives, err := api.GetAllDrives(ctx, pager)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, inject.OneDriveStats{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -259,6 +260,7 @@ func (c *Collections) Get(
|
|||||||
// Drive ID -> folder ID -> folder path
|
// Drive ID -> folder ID -> folder path
|
||||||
folderPaths = map[string]map[string]string{}
|
folderPaths = map[string]map[string]string{}
|
||||||
numPrevItems = 0
|
numPrevItems = 0
|
||||||
|
stats = inject.OneDriveStats{}
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, d := range drives {
|
for _, d := range drives {
|
||||||
@ -296,7 +298,7 @@ func (c *Collections) Get(
|
|||||||
prevDelta,
|
prevDelta,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, inject.OneDriveStats{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for logging below.
|
// Used for logging below.
|
||||||
@ -338,7 +340,7 @@ func (c *Collections) Get(
|
|||||||
prevDelta,
|
prevDelta,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, inject.OneDriveStats{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,7 +353,7 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID)
|
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
return nil, false, inject.OneDriveStats{}, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
ssmb.Add(p.String(), excluded)
|
ssmb.Add(p.String(), excluded)
|
||||||
@ -379,7 +381,7 @@ func (c *Collections) Get(
|
|||||||
prevPath, err := path.FromDataLayerPath(p, false)
|
prevPath, err := path.FromDataLayerPath(p, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||||
return nil, false, err
|
return nil, false, inject.OneDriveStats{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
col, err := NewCollection(
|
col, err := NewCollection(
|
||||||
@ -393,14 +395,19 @@ func (c *Collections) Get(
|
|||||||
true,
|
true,
|
||||||
nil)
|
nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Wrap(err, "making collection").WithClues(ictx)
|
return nil, false, inject.OneDriveStats{}, clues.Wrap(err, "making collection").WithClues(ictx)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.CollectionMap[driveID][fldID] = col
|
c.CollectionMap[driveID][fldID] = col
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stats.Folders += c.NumContainers
|
||||||
|
stats.Items += c.NumFiles
|
||||||
|
|
||||||
observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems))
|
observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems))
|
||||||
|
observe.Message(ctx, fmt.Sprintf("Discovered %d stats to backup", stats.Items))
|
||||||
|
observe.Message(ctx, fmt.Sprintf("Discovered %d folder stats to backup", stats.Folders))
|
||||||
|
|
||||||
collections := []data.BackupCollection{}
|
collections := []data.BackupCollection{}
|
||||||
|
|
||||||
@ -415,7 +422,7 @@ func (c *Collections) Get(
|
|||||||
for driveID := range driveTombstones {
|
for driveID := range driveTombstones {
|
||||||
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, driveID)
|
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, driveID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Wrap(err, "making drive tombstone for previous path").WithClues(ctx)
|
return nil, false, inject.OneDriveStats{}, clues.Wrap(err, "making drive tombstone for previous path").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
coll, err := NewCollection(
|
coll, err := NewCollection(
|
||||||
@ -429,7 +436,7 @@ func (c *Collections) Get(
|
|||||||
true,
|
true,
|
||||||
nil)
|
nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
return nil, false, inject.OneDriveStats{}, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, coll)
|
collections = append(collections, coll)
|
||||||
@ -443,7 +450,7 @@ func (c *Collections) Get(
|
|||||||
// empty/missing and default to a full backup.
|
// empty/missing and default to a full backup.
|
||||||
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
logger.CtxErr(ctx, err).Info("making metadata collection path prefixes")
|
||||||
|
|
||||||
return collections, canUsePreviousBackup, nil
|
return collections, canUsePreviousBackup, inject.OneDriveStats{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
md, err := graph.MakeMetadataCollection(
|
md, err := graph.MakeMetadataCollection(
|
||||||
@ -463,7 +470,7 @@ func (c *Collections) Get(
|
|||||||
collections = append(collections, md)
|
collections = append(collections, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, canUsePreviousBackup, nil
|
return collections, canUsePreviousBackup, stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
// addURLCacheToDriveCollections adds an URL cache to all collections belonging to
|
||||||
|
|||||||
@ -2306,7 +2306,7 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
|||||||
|
|
||||||
delList := prefixmatcher.NewStringSetBuilder()
|
delList := prefixmatcher.NewStringSetBuilder()
|
||||||
|
|
||||||
cols, canUsePreviousBackup, err := c.Get(ctx, prevMetadata, delList, errs)
|
cols, canUsePreviousBackup, _, err := c.Get(ctx, prevMetadata, delList, errs)
|
||||||
test.errCheck(t, err)
|
test.errCheck(t, err)
|
||||||
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||||
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()))
|
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()))
|
||||||
|
|||||||
@ -275,7 +275,7 @@ func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
|||||||
|
|
||||||
ssmb := prefixmatcher.NewStringSetBuilder()
|
ssmb := prefixmatcher.NewStringSetBuilder()
|
||||||
|
|
||||||
odcs, _, err := colls.Get(ctx, nil, ssmb, fault.New(true))
|
odcs, _, _, err := colls.Get(ctx, nil, ssmb, fault.New(true))
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
// Don't expect excludes as this isn't an incremental backup.
|
// Don't expect excludes as this isn't an incremental backup.
|
||||||
assert.True(t, ssmb.Empty())
|
assert.True(t, ssmb.Empty())
|
||||||
|
|||||||
@ -33,7 +33,7 @@ func CreateCollections(
|
|||||||
dps metadata.DeltaPaths,
|
dps metadata.DeltaPaths,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, inject.ExchangeStats, error) {
|
||||||
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -48,7 +48,7 @@ func CreateCollections(
|
|||||||
|
|
||||||
handler, ok := handlers[category]
|
handler, ok := handlers[category]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, clues.New("unsupported backup category type").WithClues(ctx)
|
return nil, inject.ExchangeStats{}, clues.New("unsupported backup category type").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
foldersComplete := observe.MessageWithCompletion(
|
foldersComplete := observe.MessageWithCompletion(
|
||||||
@ -59,10 +59,10 @@ func CreateCollections(
|
|||||||
rootFolder, cc := handler.NewContainerCache(bpc.ProtectedResource.ID())
|
rootFolder, cc := handler.NewContainerCache(bpc.ProtectedResource.ID())
|
||||||
|
|
||||||
if err := cc.Populate(ctx, errs, rootFolder); err != nil {
|
if err := cc.Populate(ctx, errs, rootFolder); err != nil {
|
||||||
return nil, clues.Wrap(err, "populating container cache")
|
return nil, inject.ExchangeStats{}, clues.Wrap(err, "populating container cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
collections, err := populateCollections(
|
collections, stats, err := populateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
qp,
|
qp,
|
||||||
handler,
|
handler,
|
||||||
@ -73,14 +73,14 @@ func CreateCollections(
|
|||||||
bpc.Options,
|
bpc.Options,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "filling collections")
|
return nil, stats, clues.Wrap(err, "filling collections")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, coll := range collections {
|
for _, coll := range collections {
|
||||||
allCollections = append(allCollections, coll)
|
allCollections = append(allCollections, coll)
|
||||||
}
|
}
|
||||||
|
|
||||||
return allCollections, nil
|
return allCollections, stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// populateCollections is a utility function
|
// populateCollections is a utility function
|
||||||
@ -102,7 +102,7 @@ func populateCollections(
|
|||||||
dps metadata.DeltaPaths,
|
dps metadata.DeltaPaths,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (map[string]data.BackupCollection, error) {
|
) (map[string]data.BackupCollection, inject.ExchangeStats, error) {
|
||||||
var (
|
var (
|
||||||
// folder ID -> BackupCollection.
|
// folder ID -> BackupCollection.
|
||||||
collections = map[string]data.BackupCollection{}
|
collections = map[string]data.BackupCollection{}
|
||||||
@ -113,6 +113,7 @@ func populateCollections(
|
|||||||
// deleted from this map, leaving only the deleted folders behind
|
// deleted from this map, leaving only the deleted folders behind
|
||||||
tombstones = makeTombstones(dps)
|
tombstones = makeTombstones(dps)
|
||||||
category = qp.Category
|
category = qp.Category
|
||||||
|
stats = inject.ExchangeStats{}
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
|
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
|
||||||
@ -121,7 +122,7 @@ func populateCollections(
|
|||||||
|
|
||||||
for _, c := range resolver.Items() {
|
for _, c := range resolver.Items() {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
return nil, el.Failure()
|
return nil, stats, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
cID := ptr.Val(c.GetId())
|
cID := ptr.Val(c.GetId())
|
||||||
@ -209,6 +210,21 @@ func populateCollections(
|
|||||||
// add the current path for the container ID to be used in the next backup
|
// add the current path for the container ID to be used in the next backup
|
||||||
// as the "previous path", for reference in case of a rename or relocation.
|
// as the "previous path", for reference in case of a rename or relocation.
|
||||||
currPaths[cID] = currPath.String()
|
currPaths[cID] = currPath.String()
|
||||||
|
|
||||||
|
switch category {
|
||||||
|
case path.EmailCategory:
|
||||||
|
stats.EmailFolders++
|
||||||
|
stats.EmailsAdded += len(added)
|
||||||
|
stats.EmailsDeleted += len(removed)
|
||||||
|
case path.ContactsCategory:
|
||||||
|
stats.ContactFolders++
|
||||||
|
stats.ContactsAdded += len(added)
|
||||||
|
stats.ContactsDeleted += len(removed)
|
||||||
|
case path.EventsCategory:
|
||||||
|
stats.EventFolders++
|
||||||
|
stats.EventsAdded += len(added)
|
||||||
|
stats.EventsDeleted += len(removed)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A tombstone is a folder that needs to be marked for deletion.
|
// A tombstone is a folder that needs to be marked for deletion.
|
||||||
@ -217,7 +233,7 @@ func populateCollections(
|
|||||||
// resolver (which contains all the resource owners' current containers).
|
// resolver (which contains all the resource owners' current containers).
|
||||||
for id, p := range tombstones {
|
for id, p := range tombstones {
|
||||||
if el.Failure() != nil {
|
if el.Failure() != nil {
|
||||||
return nil, el.Failure()
|
return nil, stats, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -258,7 +274,7 @@ func populateCollections(
|
|||||||
qp.Category,
|
qp.Category,
|
||||||
false)
|
false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making metadata path")
|
return nil, stats, clues.Wrap(err, "making metadata path")
|
||||||
}
|
}
|
||||||
|
|
||||||
col, err := graph.MakeMetadataCollection(
|
col, err := graph.MakeMetadataCollection(
|
||||||
@ -269,12 +285,12 @@ func populateCollections(
|
|||||||
},
|
},
|
||||||
statusUpdater)
|
statusUpdater)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making metadata collection")
|
return nil, stats, clues.Wrap(err, "making metadata collection")
|
||||||
}
|
}
|
||||||
|
|
||||||
collections["metadata"] = col
|
collections["metadata"] = col
|
||||||
|
|
||||||
return collections, el.Failure()
|
return collections, stats, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// produces a set of id:path pairs from the deltapaths map.
|
// produces a set of id:path pairs from the deltapaths map.
|
||||||
|
|||||||
@ -43,7 +43,7 @@ func CollectLibraries(
|
|||||||
bpc.Options)
|
bpc.Options)
|
||||||
)
|
)
|
||||||
|
|
||||||
odcs, canUsePreviousBackup, err := colls.Get(ctx, bpc.MetadataCollections, ssmb, errs)
|
odcs, canUsePreviousBackup, _, err := colls.Get(ctx, bpc.MetadataCollections, ssmb, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, graph.Wrap(ctx, err, "getting library")
|
return nil, false, graph.Wrap(ctx, err, "getting library")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -600,14 +600,14 @@ func runBackupAndCompare(
|
|||||||
}
|
}
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
dcs, excludes, canUsePreviousBackup, err := backupCtrl.ProduceBackupCollections(
|
results, err := backupCtrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet because this isn't an incremental backup.
|
// No excludes yet because this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, results.Excludes.Empty())
|
||||||
|
|
||||||
t.Logf("Backup enumeration complete in %v\n", time.Since(start))
|
t.Logf("Backup enumeration complete in %v\n", time.Since(start))
|
||||||
|
|
||||||
@ -618,7 +618,7 @@ func runBackupAndCompare(
|
|||||||
ctx,
|
ctx,
|
||||||
totalKopiaItems,
|
totalKopiaItems,
|
||||||
expectedData,
|
expectedData,
|
||||||
dcs,
|
results.Collections,
|
||||||
sci)
|
sci)
|
||||||
|
|
||||||
status := backupCtrl.Wait()
|
status := backupCtrl.Wait()
|
||||||
@ -1195,14 +1195,14 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
|||||||
Selector: backupSel,
|
Selector: backupSel,
|
||||||
}
|
}
|
||||||
|
|
||||||
dcs, excludes, canUsePreviousBackup, err := backupCtrl.ProduceBackupCollections(
|
results, err := backupCtrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet because this isn't an incremental backup.
|
// No excludes yet because this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, results.Excludes.Empty())
|
||||||
|
|
||||||
t.Log("Backup enumeration complete")
|
t.Log("Backup enumeration complete")
|
||||||
|
|
||||||
@ -1217,7 +1217,7 @@ func (suite *ControllerIntegrationSuite) TestMultiFolderBackupDifferentNames() {
|
|||||||
|
|
||||||
// Pull the data prior to waiting for the status as otherwise it will
|
// Pull the data prior to waiting for the status as otherwise it will
|
||||||
// deadlock.
|
// deadlock.
|
||||||
skipped := checkCollections(t, ctx, allItems, allExpectedData, dcs, ci)
|
skipped := checkCollections(t, ctx, allItems, allExpectedData, results.Collections, ci)
|
||||||
|
|
||||||
status := backupCtrl.Wait()
|
status := backupCtrl.Wait()
|
||||||
assert.Equal(t, allItems+skipped, status.Objects, "status.Objects")
|
assert.Equal(t, allItems+skipped, status.Objects, "status.Objects")
|
||||||
@ -1374,20 +1374,20 @@ func (suite *ControllerIntegrationSuite) TestBackup_CreatesPrefixCollections() {
|
|||||||
Selector: backupSel,
|
Selector: backupSel,
|
||||||
}
|
}
|
||||||
|
|
||||||
dcs, excludes, canUsePreviousBackup, err := backupCtrl.ProduceBackupCollections(
|
results, err := backupCtrl.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, results.CanUsePreviousBackup, "can use previous backup")
|
||||||
// No excludes yet because this isn't an incremental backup.
|
// No excludes yet because this isn't an incremental backup.
|
||||||
assert.True(t, excludes.Empty())
|
assert.True(t, results.Excludes.Empty())
|
||||||
|
|
||||||
t.Logf("Backup enumeration complete in %v\n", time.Since(start))
|
t.Logf("Backup enumeration complete in %v\n", time.Since(start))
|
||||||
|
|
||||||
// Use a map to find duplicates.
|
// Use a map to find duplicates.
|
||||||
foundCategories := []string{}
|
foundCategories := []string{}
|
||||||
for _, col := range dcs {
|
for _, col := range results.Collections {
|
||||||
// TODO(ashmrtn): We should be able to remove the below if we change how
|
// TODO(ashmrtn): We should be able to remove the below if we change how
|
||||||
// status updates are done. Ideally we shouldn't have to fetch items in
|
// status updates are done. Ideally we shouldn't have to fetch items in
|
||||||
// these collections to avoid deadlocking.
|
// these collections to avoid deadlocking.
|
||||||
|
|||||||
@ -42,12 +42,10 @@ func (ctrl Controller) ProduceBackupCollections(
|
|||||||
_ inject.BackupProducerConfig,
|
_ inject.BackupProducerConfig,
|
||||||
_ *fault.Bus,
|
_ *fault.Bus,
|
||||||
) (
|
) (
|
||||||
[]data.BackupCollection,
|
inject.BackupProducerResults,
|
||||||
prefixmatcher.StringSetReader,
|
|
||||||
bool,
|
|
||||||
error,
|
error,
|
||||||
) {
|
) {
|
||||||
return ctrl.Collections, ctrl.Exclude, ctrl.Err == nil, ctrl.Err
|
return inject.BackupProducerResults{Collections: ctrl.Collections, Excludes: ctrl.Exclude, CanUsePreviousBackup: ctrl.Err == nil}, ctrl.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ctrl *Controller) GetMetadataPaths(
|
func (ctrl *Controller) GetMetadataPaths(
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/exchange"
|
"github.com/alcionai/corso/src/internal/m365/collection/exchange"
|
||||||
"github.com/alcionai/corso/src/internal/m365/graph"
|
"github.com/alcionai/corso/src/internal/m365/graph"
|
||||||
@ -26,22 +25,23 @@ func ProduceBackupCollections(
|
|||||||
tenantID string,
|
tenantID string,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
) (inject.BackupProducerResults, error) {
|
||||||
eb, err := bpc.Selector.ToExchangeBackup()
|
eb, err := bpc.Selector.ToExchangeBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
return inject.BackupProducerResults{}, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
categories = map[path.CategoryType]struct{}{}
|
categories = map[path.CategoryType]struct{}{}
|
||||||
|
mergedStats = inject.ExchangeStats{}
|
||||||
handlers = exchange.BackupHandlers(ac)
|
handlers = exchange.BackupHandlers(ac)
|
||||||
)
|
)
|
||||||
|
|
||||||
canMakeDeltaQueries, err := canMakeDeltaQueries(ctx, ac.Users(), bpc.ProtectedResource.ID())
|
canMakeDeltaQueries, err := canMakeDeltaQueries(ctx, ac.Users(), bpc.ProtectedResource.ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Stack(err)
|
return inject.BackupProducerResults{}, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !canMakeDeltaQueries {
|
if !canMakeDeltaQueries {
|
||||||
@ -59,7 +59,7 @@ func ProduceBackupCollections(
|
|||||||
|
|
||||||
cdps, canUsePreviousBackup, err := exchange.ParseMetadataCollections(ctx, bpc.MetadataCollections)
|
cdps, canUsePreviousBackup, err := exchange.ParseMetadataCollections(ctx, bpc.MetadataCollections)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
ctx = clues.Add(ctx, "can_use_previous_backup", canUsePreviousBackup)
|
||||||
@ -69,7 +69,7 @@ func ProduceBackupCollections(
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
dcs, err := exchange.CreateCollections(
|
dcs, stats, err := exchange.CreateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
handlers,
|
handlers,
|
||||||
@ -86,6 +86,16 @@ func ProduceBackupCollections(
|
|||||||
categories[scope.Category().PathType()] = struct{}{}
|
categories[scope.Category().PathType()] = struct{}{}
|
||||||
|
|
||||||
collections = append(collections, dcs...)
|
collections = append(collections, dcs...)
|
||||||
|
|
||||||
|
mergedStats.ContactFolders += stats.ContactFolders
|
||||||
|
mergedStats.ContactsAdded += stats.ContactsAdded
|
||||||
|
mergedStats.ContactsDeleted += stats.ContactsDeleted
|
||||||
|
mergedStats.EventFolders += stats.EventFolders
|
||||||
|
mergedStats.EventsAdded += stats.EventsAdded
|
||||||
|
mergedStats.EventsDeleted += stats.EventsDeleted
|
||||||
|
mergedStats.EmailFolders += stats.EmailFolders
|
||||||
|
mergedStats.EmailsAdded += stats.EmailsAdded
|
||||||
|
mergedStats.EmailsDeleted += stats.EmailsDeleted
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(collections) > 0 {
|
if len(collections) > 0 {
|
||||||
@ -99,13 +109,18 @@ func ProduceBackupCollections(
|
|||||||
su,
|
su,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, nil, canUsePreviousBackup, el.Failure()
|
return inject.BackupProducerResults{
|
||||||
|
Collections: collections,
|
||||||
|
Excludes: nil,
|
||||||
|
CanUsePreviousBackup: canUsePreviousBackup,
|
||||||
|
DiscoveredItems: inject.Stats{Exchange: &mergedStats}},
|
||||||
|
el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
func canMakeDeltaQueries(
|
func canMakeDeltaQueries(
|
||||||
|
|||||||
@ -25,10 +25,10 @@ func ProduceBackupCollections(
|
|||||||
tenant string,
|
tenant string,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
) (inject.BackupProducerResults, error) {
|
||||||
odb, err := bpc.Selector.ToOneDriveBackup()
|
odb, err := bpc.Selector.ToOneDriveBackup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, clues.Wrap(err, "parsing selector").WithClues(ctx)
|
return inject.BackupProducerResults{}, clues.Wrap(err, "parsing selector").WithClues(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -38,6 +38,7 @@ func ProduceBackupCollections(
|
|||||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||||
odcs []data.BackupCollection
|
odcs []data.BackupCollection
|
||||||
canUsePreviousBackup bool
|
canUsePreviousBackup bool
|
||||||
|
stats = inject.OneDriveStats{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// for each scope that includes oneDrive items, get all
|
// for each scope that includes oneDrive items, get all
|
||||||
@ -55,7 +56,7 @@ func ProduceBackupCollections(
|
|||||||
su,
|
su,
|
||||||
bpc.Options)
|
bpc.Options)
|
||||||
|
|
||||||
odcs, canUsePreviousBackup, err = nc.Get(ctx, bpc.MetadataCollections, ssmb, errs)
|
odcs, canUsePreviousBackup, stats, err = nc.Get(ctx, bpc.MetadataCollections, ssmb, errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(ctx, clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
@ -67,7 +68,7 @@ func ProduceBackupCollections(
|
|||||||
|
|
||||||
mcs, err := migrationCollections(bpc, tenant, su)
|
mcs, err := migrationCollections(bpc, tenant, su)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, mcs...)
|
collections = append(collections, mcs...)
|
||||||
@ -83,13 +84,13 @@ func ProduceBackupCollections(
|
|||||||
su,
|
su,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return inject.BackupProducerResults{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
return inject.BackupProducerResults{Collections: collections, Excludes: ssmb.ToReader(), CanUsePreviousBackup: canUsePreviousBackup, DiscoveredItems: inject.Stats{OneDrive: &stats}}, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
// adds data migrations to the collection set.
|
// adds data migrations to the collection set.
|
||||||
|
|||||||
@ -67,6 +67,7 @@ type BackupOperation struct {
|
|||||||
type BackupResults struct {
|
type BackupResults struct {
|
||||||
stats.ReadWrites
|
stats.ReadWrites
|
||||||
stats.StartAndEndTime
|
stats.StartAndEndTime
|
||||||
|
inject.Stats
|
||||||
BackupID model.StableID `json:"backupID"`
|
BackupID model.StableID `json:"backupID"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,6 +292,11 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
LogFaultErrors(ctx, op.Errors.Errors(), "running backup")
|
LogFaultErrors(ctx, op.Errors.Errors(), "running backup")
|
||||||
|
|
||||||
|
// Don't persist any results for a dry-run operation
|
||||||
|
if op.Options.DryRun {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// -----
|
// -----
|
||||||
// Persistence
|
// Persistence
|
||||||
// -----
|
// -----
|
||||||
@ -382,7 +388,7 @@ func (op *BackupOperation) do(
|
|||||||
// the entire subtree instead of returning an additional bool. That way base
|
// the entire subtree instead of returning an additional bool. That way base
|
||||||
// selection is controlled completely by flags and merging is controlled
|
// selection is controlled completely by flags and merging is controlled
|
||||||
// completely by collections.
|
// completely by collections.
|
||||||
cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections(
|
producerResults, err := produceBackupDataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
op.bp,
|
op.bp,
|
||||||
op.ResourceOwner,
|
op.ResourceOwner,
|
||||||
@ -397,8 +403,14 @@ func (op *BackupOperation) do(
|
|||||||
|
|
||||||
ctx = clues.Add(
|
ctx = clues.Add(
|
||||||
ctx,
|
ctx,
|
||||||
"can_use_previous_backup", canUsePreviousBackup,
|
"can_use_previous_backup", producerResults.CanUsePreviousBackup,
|
||||||
"collection_count", len(cs))
|
"collection_count", len(producerResults.Collections))
|
||||||
|
|
||||||
|
// Do nothing with the data collections
|
||||||
|
if op.Options.DryRun {
|
||||||
|
op.Results.Stats = producerResults.DiscoveredItems
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
writeStats, deets, toMerge, err := consumeBackupCollections(
|
writeStats, deets, toMerge, err := consumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
@ -406,10 +418,10 @@ func (op *BackupOperation) do(
|
|||||||
op.account.ID(),
|
op.account.ID(),
|
||||||
reasons,
|
reasons,
|
||||||
mans,
|
mans,
|
||||||
cs,
|
producerResults.Collections,
|
||||||
ssmb,
|
producerResults.Excludes,
|
||||||
backupID,
|
backupID,
|
||||||
op.incremental && canUseMetadata && canUsePreviousBackup,
|
op.incremental && canUseMetadata && producerResults.CanUsePreviousBackup,
|
||||||
op.Errors)
|
op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "persisting collection backups")
|
return nil, clues.Wrap(err, "persisting collection backups")
|
||||||
@ -439,6 +451,33 @@ func (op *BackupOperation) do(
|
|||||||
return deets, nil
|
return deets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// func summarizeBackupCollections(ctx context.Context, results inject.BackupProducerResults) (stats.BackupItems, error) {
|
||||||
|
// collStats := stats.BackupItems{}
|
||||||
|
// bus := fault.New(false)
|
||||||
|
|
||||||
|
// for _, c := range cs {
|
||||||
|
// switch c.State() {
|
||||||
|
// case data.NewState:
|
||||||
|
// logger.Ctx(ctx).Infow("New Folder:", c.FullPath())
|
||||||
|
// collStats.NewFolders++
|
||||||
|
// case data.NotMovedState, data.MovedState:
|
||||||
|
// logger.Ctx(ctx).Infow("Modified Folder:", c.FullPath())
|
||||||
|
// collStats.ModifiedFolders++
|
||||||
|
// case data.DeletedState:
|
||||||
|
// logger.Ctx(ctx).Infow("Deleted Folder:", c.FullPath())
|
||||||
|
// collStats.DeletedFolders++
|
||||||
|
// }
|
||||||
|
|
||||||
|
// for i := range c.Items(ctx, bus) {
|
||||||
|
// logger.Ctx(ctx).Infow("Item", i.ID())
|
||||||
|
// collStats.Items++
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return collStats, nil
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
func makeFallbackReasons(tenant string, sel selectors.Selector) ([]identity.Reasoner, error) {
|
func makeFallbackReasons(tenant string, sel selectors.Selector) ([]identity.Reasoner, error) {
|
||||||
if sel.PathService() != path.SharePointService &&
|
if sel.PathService() != path.SharePointService &&
|
||||||
sel.DiscreteOwner != sel.DiscreteOwnerName {
|
sel.DiscreteOwner != sel.DiscreteOwnerName {
|
||||||
@ -469,7 +508,7 @@ func produceBackupDataCollections(
|
|||||||
lastBackupVersion int,
|
lastBackupVersion int,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
) (inject.BackupProducerResults, error) {
|
||||||
progressBar := observe.MessageWithCompletion(ctx, "Discovering items to backup")
|
progressBar := observe.MessageWithCompletion(ctx, "Discovering items to backup")
|
||||||
defer close(progressBar)
|
defer close(progressBar)
|
||||||
|
|
||||||
|
|||||||
@ -1,7 +1,12 @@
|
|||||||
package inject
|
package inject
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
|
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -28,3 +33,86 @@ type BackupProducerConfig struct {
|
|||||||
ProtectedResource idname.Provider
|
ProtectedResource idname.Provider
|
||||||
Selector selectors.Selector
|
Selector selectors.Selector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BackupProducerResults struct {
|
||||||
|
Collections []data.BackupCollection
|
||||||
|
Excludes prefixmatcher.StringSetReader
|
||||||
|
CanUsePreviousBackup bool
|
||||||
|
DiscoveredItems Stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats is a oneOf that contains service specific
|
||||||
|
// information
|
||||||
|
type Stats struct {
|
||||||
|
Exchange *ExchangeStats `json:"exchange,omitempty"`
|
||||||
|
SharePoint *SharePointStats `json:"sharePoint,omitempty"`
|
||||||
|
OneDrive *OneDriveStats `json:"oneDrive,omitempty"`
|
||||||
|
Groups *GroupsStats `json:"groups,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExchangeStats struct {
|
||||||
|
ContactsAdded int `json:"contactsAdded,omitempty"`
|
||||||
|
ContactsDeleted int `json:"contactsDeleted,omitempty"`
|
||||||
|
ContactFolders int `json:"contactFolders,omitempty"`
|
||||||
|
|
||||||
|
EventsAdded int `json:"eventsAdded,omitempty"`
|
||||||
|
EventsDeleted int `json:"eventsDeleted,omitempty"`
|
||||||
|
EventFolders int `json:"eventFolders,omitempty"`
|
||||||
|
|
||||||
|
EmailsAdded int `json:"emailsAdded,omitempty"`
|
||||||
|
EmailsDeleted int `json:"emailsDeleted,omitempty"`
|
||||||
|
EmailFolders int `json:"emailFolders,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SharePointStats struct {
|
||||||
|
}
|
||||||
|
type OneDriveStats struct {
|
||||||
|
Folders int `json:"folders,omitempty"`
|
||||||
|
Items int `json:"items,omitempty"`
|
||||||
|
}
|
||||||
|
type GroupsStats struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// interface compliance checks
|
||||||
|
var _ print.Printable = &Stats{}
|
||||||
|
|
||||||
|
// Print writes the Backup to StdOut, in the format requested by the caller.
|
||||||
|
func (s Stats) Print(ctx context.Context) {
|
||||||
|
print.Item(ctx, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinimumPrintable reduces the Backup to its minimally printable details.
|
||||||
|
func (s Stats) MinimumPrintable() any {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the human-readable names of properties in a Backup
|
||||||
|
// for printing out to a terminal in a columnar display.
|
||||||
|
func (s Stats) Headers() []string {
|
||||||
|
switch {
|
||||||
|
case s.OneDrive != nil:
|
||||||
|
return []string{
|
||||||
|
"Folders",
|
||||||
|
"Items",
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values returns the values matching the Headers list for printing
|
||||||
|
// out to a terminal in a columnar display.
|
||||||
|
func (s Stats) Values() []string {
|
||||||
|
switch {
|
||||||
|
case s.OneDrive != nil:
|
||||||
|
return []string{
|
||||||
|
strconv.Itoa(s.OneDrive.Folders),
|
||||||
|
strconv.Itoa(s.OneDrive.Items),
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return []string{}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/idname"
|
"github.com/alcionai/corso/src/internal/common/idname"
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
"github.com/alcionai/corso/src/internal/kopia/inject"
|
"github.com/alcionai/corso/src/internal/kopia/inject"
|
||||||
@ -24,7 +23,7 @@ type (
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bpc BackupProducerConfig,
|
bpc BackupProducerConfig,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error)
|
) (BackupProducerResults, error)
|
||||||
|
|
||||||
IsServiceEnableder
|
IsServiceEnableder
|
||||||
|
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
kinject "github.com/alcionai/corso/src/internal/kopia/inject"
|
kinject "github.com/alcionai/corso/src/internal/kopia/inject"
|
||||||
@ -39,12 +38,12 @@ func (mbp *mockBackupProducer) ProduceBackupCollections(
|
|||||||
context.Context,
|
context.Context,
|
||||||
inject.BackupProducerConfig,
|
inject.BackupProducerConfig,
|
||||||
*fault.Bus,
|
*fault.Bus,
|
||||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
) (inject.BackupProducerResults, error) {
|
||||||
if mbp.injectNonRecoverableErr {
|
if mbp.injectNonRecoverableErr {
|
||||||
return nil, nil, false, clues.New("non-recoverable error")
|
return inject.BackupProducerResults{}, clues.New("non-recoverable error")
|
||||||
}
|
}
|
||||||
|
|
||||||
return mbp.colls, nil, true, nil
|
return inject.BackupProducerResults{Collections: mbp.colls, Excludes: nil, CanUsePreviousBackup: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mbp *mockBackupProducer) IsServiceEnabled(
|
func (mbp *mockBackupProducer) IsServiceEnabled(
|
||||||
|
|||||||
@ -17,6 +17,7 @@ type Options struct {
|
|||||||
Repo repository.Options `json:"repo"`
|
Repo repository.Options `json:"repo"`
|
||||||
SkipReduce bool `json:"skipReduce"`
|
SkipReduce bool `json:"skipReduce"`
|
||||||
ToggleFeatures Toggles `json:"toggleFeatures"`
|
ToggleFeatures Toggles `json:"toggleFeatures"`
|
||||||
|
DryRun bool `json:"dryRun"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Parallelism struct {
|
type Parallelism struct {
|
||||||
|
|||||||
@ -192,7 +192,8 @@ type ConnConfig struct {
|
|||||||
// tells the data provider which service to
|
// tells the data provider which service to
|
||||||
// use for its connection pattern. Leave empty
|
// use for its connection pattern. Leave empty
|
||||||
// to skip the provider connection.
|
// to skip the provider connection.
|
||||||
Service path.ServiceType
|
Service path.ServiceType
|
||||||
|
ReadOnly bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect will:
|
// Connect will:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user