use counter in exchange backup enum (#4661)
Increase proliferation of the count bus to record runtime stats. --- #### Does this PR need a docs update or release note? - [x] ⛔ No #### Type of change - [x] 🤖 Supportability/Tests #### Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
7a21424ca7
commit
9ecefd2569
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ func (suite *CollectionSuite) TestStateOf() {
|
|||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
state := StateOf(test.prev, test.curr)
|
state := StateOf(test.prev, test.curr, count.New())
|
||||||
assert.Equal(suite.T(), test.expect, state)
|
assert.Equal(suite.T(), test.expect, state)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -142,7 +143,8 @@ func (suite *CollectionSuite) TestNewBaseCollection() {
|
|||||||
test.previous,
|
test.previous,
|
||||||
loc,
|
loc,
|
||||||
control.Options{},
|
control.Options{},
|
||||||
test.doNotMerge)
|
test.doNotMerge,
|
||||||
|
count.New())
|
||||||
|
|
||||||
assert.Equal(t, test.expectCurrent, b.FullPath(), "full path")
|
assert.Equal(t, test.expectCurrent, b.FullPath(), "full path")
|
||||||
assert.Equal(t, test.expectPrev, b.PreviousPath(), "previous path")
|
assert.Equal(t, test.expectPrev, b.PreviousPath(), "previous path")
|
||||||
@ -160,7 +162,7 @@ func (suite *CollectionSuite) TestNewTombstoneCollection() {
|
|||||||
fooP, err := path.Build("t", "u", path.ExchangeService, path.EmailCategory, false, "foo")
|
fooP, err := path.Build("t", "u", path.ExchangeService, path.EmailCategory, false, "foo")
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
c := NewTombstoneCollection(fooP, control.Options{})
|
c := NewTombstoneCollection(fooP, control.Options{}, count.New())
|
||||||
assert.Nil(t, c.FullPath(), "full path")
|
assert.Nil(t, c.FullPath(), "full path")
|
||||||
assert.Equal(t, fooP, c.PreviousPath(), "previous path")
|
assert.Equal(t, fooP, c.PreviousPath(), "previous path")
|
||||||
assert.Nil(t, c.LocationPath(), "location path")
|
assert.Nil(t, c.LocationPath(), "location path")
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"github.com/alcionai/clues"
|
"github.com/alcionai/clues"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -38,19 +39,27 @@ func (c NoFetchRestoreCollection) FetchItemByName(context.Context, string) (Item
|
|||||||
|
|
||||||
// StateOf lets us figure out the state of the collection from the
|
// StateOf lets us figure out the state of the collection from the
|
||||||
// previous and current path
|
// previous and current path
|
||||||
func StateOf(prev, curr path.Path) CollectionState {
|
func StateOf(
|
||||||
|
prev, curr path.Path,
|
||||||
|
counter *count.Bus,
|
||||||
|
) CollectionState {
|
||||||
if curr == nil || len(curr.String()) == 0 {
|
if curr == nil || len(curr.String()) == 0 {
|
||||||
|
counter.Inc(count.CollectionTombstoned)
|
||||||
return DeletedState
|
return DeletedState
|
||||||
}
|
}
|
||||||
|
|
||||||
if prev == nil || len(prev.String()) == 0 {
|
if prev == nil || len(prev.String()) == 0 {
|
||||||
|
counter.Inc(count.CollectionNew)
|
||||||
return NewState
|
return NewState
|
||||||
}
|
}
|
||||||
|
|
||||||
if curr.String() != prev.String() {
|
if curr.String() != prev.String() {
|
||||||
|
counter.Inc(count.CollectionMoved)
|
||||||
return MovedState
|
return MovedState
|
||||||
}
|
}
|
||||||
|
|
||||||
|
counter.Inc(count.CollectionNotMoved)
|
||||||
|
|
||||||
return NotMovedState
|
return NotMovedState
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,6 +72,7 @@ func NewBaseCollection(
|
|||||||
location *path.Builder,
|
location *path.Builder,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
doNotMergeItems bool,
|
doNotMergeItems bool,
|
||||||
|
counter *count.Bus,
|
||||||
) BaseCollection {
|
) BaseCollection {
|
||||||
return BaseCollection{
|
return BaseCollection{
|
||||||
opts: ctrlOpts,
|
opts: ctrlOpts,
|
||||||
@ -70,7 +80,8 @@ func NewBaseCollection(
|
|||||||
fullPath: curr,
|
fullPath: curr,
|
||||||
locationPath: location,
|
locationPath: location,
|
||||||
prevPath: prev,
|
prevPath: prev,
|
||||||
state: StateOf(prev, curr),
|
state: StateOf(prev, curr, counter),
|
||||||
|
Counter: counter.Local(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,6 +109,8 @@ type BaseCollection struct {
|
|||||||
|
|
||||||
// doNotMergeItems should only be true if the old delta token expired.
|
// doNotMergeItems should only be true if the old delta token expired.
|
||||||
doNotMergeItems bool
|
doNotMergeItems bool
|
||||||
|
|
||||||
|
Counter *count.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
// FullPath returns the BaseCollection's fullPath []string
|
// FullPath returns the BaseCollection's fullPath []string
|
||||||
@ -145,9 +158,16 @@ func (col BaseCollection) Opts() control.Options {
|
|||||||
func NewTombstoneCollection(
|
func NewTombstoneCollection(
|
||||||
prev path.Path,
|
prev path.Path,
|
||||||
opts control.Options,
|
opts control.Options,
|
||||||
|
counter *count.Bus,
|
||||||
) *tombstoneCollection {
|
) *tombstoneCollection {
|
||||||
return &tombstoneCollection{
|
return &tombstoneCollection{
|
||||||
BaseCollection: NewBaseCollection(nil, prev, nil, opts, false),
|
BaseCollection: NewBaseCollection(
|
||||||
|
nil,
|
||||||
|
prev,
|
||||||
|
nil,
|
||||||
|
opts,
|
||||||
|
false,
|
||||||
|
counter),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/readers"
|
"github.com/alcionai/corso/src/internal/common/readers"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
@ -136,6 +137,7 @@ func NewLazyItem(
|
|||||||
itemGetter ItemDataGetter,
|
itemGetter ItemDataGetter,
|
||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) *lazyItem {
|
) *lazyItem {
|
||||||
return &lazyItem{
|
return &lazyItem{
|
||||||
@ -143,6 +145,7 @@ func NewLazyItem(
|
|||||||
id: itemID,
|
id: itemID,
|
||||||
itemGetter: itemGetter,
|
itemGetter: itemGetter,
|
||||||
modTime: modTime,
|
modTime: modTime,
|
||||||
|
counter: counter,
|
||||||
errs: errs,
|
errs: errs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -157,6 +160,7 @@ type lazyItem struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
id string
|
id string
|
||||||
|
counter *count.Bus
|
||||||
errs *fault.Bus
|
errs *fault.Bus
|
||||||
itemGetter ItemDataGetter
|
itemGetter ItemDataGetter
|
||||||
|
|
||||||
@ -203,6 +207,7 @@ func (i *lazyItem) ToReader() io.ReadCloser {
|
|||||||
// etc.) and the item isn't enumerated in that set.
|
// etc.) and the item isn't enumerated in that set.
|
||||||
if delInFlight {
|
if delInFlight {
|
||||||
logger.Ctx(i.ctx).Info("item not found")
|
logger.Ctx(i.ctx).Info("item not found")
|
||||||
|
i.counter.Inc(count.LazyDeletedInFlight)
|
||||||
|
|
||||||
i.delInFlight = true
|
i.delInFlight = true
|
||||||
format.DelInFlight = true
|
format.DelInFlight = true
|
||||||
@ -232,6 +237,7 @@ func NewLazyItemWithInfo(
|
|||||||
itemGetter ItemDataGetter,
|
itemGetter ItemDataGetter,
|
||||||
itemID string,
|
itemID string,
|
||||||
modTime time.Time,
|
modTime time.Time,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) *lazyItemWithInfo {
|
) *lazyItemWithInfo {
|
||||||
return &lazyItemWithInfo{
|
return &lazyItemWithInfo{
|
||||||
@ -240,6 +246,7 @@ func NewLazyItemWithInfo(
|
|||||||
itemGetter,
|
itemGetter,
|
||||||
itemID,
|
itemID,
|
||||||
modTime,
|
modTime,
|
||||||
|
counter,
|
||||||
errs),
|
errs),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -74,6 +75,7 @@ func (suite *ItemUnitSuite) TestUnindexedLazyItem() {
|
|||||||
nil,
|
nil,
|
||||||
"foo",
|
"foo",
|
||||||
time.Time{},
|
time.Time{},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
var item data.Item = lazy
|
var item data.Item = lazy
|
||||||
@ -296,6 +298,7 @@ func (suite *ItemUnitSuite) TestLazyItem() {
|
|||||||
test.mid,
|
test.mid,
|
||||||
id,
|
id,
|
||||||
now,
|
now,
|
||||||
|
count.New(),
|
||||||
errs)
|
errs)
|
||||||
|
|
||||||
assert.Equal(t, id, item.ID(), "ID")
|
assert.Equal(t, id, item.ID(), "ID")
|
||||||
@ -354,7 +357,7 @@ func (suite *ItemUnitSuite) TestLazyItem_DeletedInFlight() {
|
|||||||
mid := &mockItemDataGetter{delInFlight: true}
|
mid := &mockItemDataGetter{delInFlight: true}
|
||||||
defer mid.check(t, true)
|
defer mid.check(t, true)
|
||||||
|
|
||||||
item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs)
|
item := data.NewLazyItemWithInfo(ctx, mid, id, now, count.New(), errs)
|
||||||
|
|
||||||
assert.Equal(t, id, item.ID(), "ID")
|
assert.Equal(t, id, item.ID(), "ID")
|
||||||
assert.False(t, item.Deleted(), "deleted")
|
assert.False(t, item.Deleted(), "deleted")
|
||||||
@ -400,7 +403,7 @@ func (suite *ItemUnitSuite) TestLazyItem_InfoBeforeReadErrors() {
|
|||||||
mid := &mockItemDataGetter{}
|
mid := &mockItemDataGetter{}
|
||||||
defer mid.check(t, false)
|
defer mid.check(t, false)
|
||||||
|
|
||||||
item := data.NewLazyItemWithInfo(ctx, mid, id, now, errs)
|
item := data.NewLazyItemWithInfo(ctx, mid, id, now, count.New(), errs)
|
||||||
|
|
||||||
assert.Equal(t, id, item.ID(), "ID")
|
assert.Equal(t, id, item.ID(), "ID")
|
||||||
assert.False(t, item.Deleted(), "deleted")
|
assert.False(t, item.Deleted(), "deleted")
|
||||||
|
|||||||
@ -58,37 +58,39 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
colls []data.BackupCollection
|
colls []data.BackupCollection
|
||||||
ssmb *prefixmatcher.StringSetMatcher
|
excludeItems *prefixmatcher.StringSetMatcher
|
||||||
canUsePreviousBackup bool
|
canUsePreviousBackup bool
|
||||||
)
|
)
|
||||||
|
|
||||||
switch service {
|
switch service {
|
||||||
case path.ExchangeService:
|
case path.ExchangeService:
|
||||||
colls, ssmb, canUsePreviousBackup, err = exchange.ProduceBackupCollections(
|
colls, excludeItems, canUsePreviousBackup, err = exchange.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
ctrl.credentials.AzureTenantID,
|
ctrl.credentials,
|
||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case path.OneDriveService:
|
case path.OneDriveService:
|
||||||
colls, ssmb, canUsePreviousBackup, err = onedrive.ProduceBackupCollections(
|
colls, excludeItems, canUsePreviousBackup, err = onedrive.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
ctrl.credentials.AzureTenantID,
|
ctrl.credentials,
|
||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case path.SharePointService:
|
case path.SharePointService:
|
||||||
colls, ssmb, canUsePreviousBackup, err = sharepoint.ProduceBackupCollections(
|
colls, excludeItems, canUsePreviousBackup, err = sharepoint.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
@ -101,12 +103,13 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
case path.GroupsService:
|
case path.GroupsService:
|
||||||
colls, ssmb, err = groups.ProduceBackupCollections(
|
colls, excludeItems, err = groups.ProduceBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
ctrl.AC,
|
ctrl.AC,
|
||||||
ctrl.credentials,
|
ctrl.credentials,
|
||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
@ -132,7 +135,7 @@ func (ctrl *Controller) ProduceBackupCollections(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return colls, ssmb, canUsePreviousBackup, nil
|
return colls, excludeItems, canUsePreviousBackup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ctrl *Controller) IsServiceEnabled(
|
func (ctrl *Controller) IsServiceEnabled(
|
||||||
|
|||||||
@ -143,8 +143,9 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
|||||||
ctx,
|
ctx,
|
||||||
bpc,
|
bpc,
|
||||||
suite.ac,
|
suite.ac,
|
||||||
suite.tenantID,
|
suite.ac.Credentials,
|
||||||
ctrl.UpdateStatus,
|
ctrl.UpdateStatus,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||||
|
|||||||
@ -1,4 +1,3 @@
|
|||||||
// Package drive provides support for retrieving M365 Drive objects
|
|
||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -22,6 +21,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/extensions"
|
"github.com/alcionai/corso/src/pkg/extensions"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -88,6 +88,8 @@ type Collection struct {
|
|||||||
doNotMergeItems bool
|
doNotMergeItems bool
|
||||||
|
|
||||||
urlCache getItemPropertyer
|
urlCache getItemPropertyer
|
||||||
|
|
||||||
|
counter *count.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
func pathToLocation(p path.Path) (*path.Builder, error) {
|
func pathToLocation(p path.Path) (*path.Builder, error) {
|
||||||
@ -115,6 +117,7 @@ func NewCollection(
|
|||||||
isPackageOrChildOfPackage bool,
|
isPackageOrChildOfPackage bool,
|
||||||
doNotMergeItems bool,
|
doNotMergeItems bool,
|
||||||
urlCache getItemPropertyer,
|
urlCache getItemPropertyer,
|
||||||
|
counter *count.Bus,
|
||||||
) (*Collection, error) {
|
) (*Collection, error) {
|
||||||
// TODO(ashmrtn): If OneDrive switches to using folder IDs then this will need
|
// TODO(ashmrtn): If OneDrive switches to using folder IDs then this will need
|
||||||
// to be changed as we won't be able to extract path information from the
|
// to be changed as we won't be able to extract path information from the
|
||||||
@ -140,7 +143,8 @@ func NewCollection(
|
|||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
isPackageOrChildOfPackage,
|
isPackageOrChildOfPackage,
|
||||||
doNotMergeItems,
|
doNotMergeItems,
|
||||||
urlCache)
|
urlCache,
|
||||||
|
counter)
|
||||||
|
|
||||||
c.locPath = locPath
|
c.locPath = locPath
|
||||||
c.prevLocPath = prevLocPath
|
c.prevLocPath = prevLocPath
|
||||||
@ -159,6 +163,7 @@ func newColl(
|
|||||||
isPackageOrChildOfPackage bool,
|
isPackageOrChildOfPackage bool,
|
||||||
doNotMergeItems bool,
|
doNotMergeItems bool,
|
||||||
urlCache getItemPropertyer,
|
urlCache getItemPropertyer,
|
||||||
|
counter *count.Bus,
|
||||||
) *Collection {
|
) *Collection {
|
||||||
dataCh := make(chan data.Item, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize())
|
dataCh := make(chan data.Item, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize())
|
||||||
|
|
||||||
@ -172,10 +177,11 @@ func newColl(
|
|||||||
data: dataCh,
|
data: dataCh,
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
ctrl: ctrlOpts,
|
ctrl: ctrlOpts,
|
||||||
state: data.StateOf(prevPath, currPath),
|
state: data.StateOf(prevPath, currPath, counter),
|
||||||
isPackageOrChildOfPackage: isPackageOrChildOfPackage,
|
isPackageOrChildOfPackage: isPackageOrChildOfPackage,
|
||||||
doNotMergeItems: doNotMergeItems,
|
doNotMergeItems: doNotMergeItems,
|
||||||
urlCache: urlCache,
|
urlCache: urlCache,
|
||||||
|
counter: counter,
|
||||||
}
|
}
|
||||||
|
|
||||||
return c
|
return c
|
||||||
@ -228,7 +234,7 @@ func (oc Collection) PreviousPath() path.Path {
|
|||||||
|
|
||||||
func (oc *Collection) SetFullPath(curPath path.Path) {
|
func (oc *Collection) SetFullPath(curPath path.Path) {
|
||||||
oc.folderPath = curPath
|
oc.folderPath = curPath
|
||||||
oc.state = data.StateOf(oc.prevPath, curPath)
|
oc.state = data.StateOf(oc.prevPath, curPath, oc.counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oc Collection) LocationPath() *path.Builder {
|
func (oc Collection) LocationPath() *path.Builder {
|
||||||
@ -263,7 +269,13 @@ func (oc *Collection) getDriveItemContent(
|
|||||||
itemName = ptr.Val(item.GetName())
|
itemName = ptr.Val(item.GetName())
|
||||||
)
|
)
|
||||||
|
|
||||||
itemData, err := downloadContent(ctx, oc.handler, oc.urlCache, item, oc.driveID)
|
itemData, err := downloadContent(
|
||||||
|
ctx,
|
||||||
|
oc.handler,
|
||||||
|
oc.urlCache,
|
||||||
|
item,
|
||||||
|
oc.driveID,
|
||||||
|
oc.counter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if clues.HasLabel(err, graph.LabelsMalware) || (item != nil && item.GetMalware() != nil) {
|
if clues.HasLabel(err, graph.LabelsMalware) || (item != nil && item.GetMalware() != nil) {
|
||||||
logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipMalware).Info("item flagged as malware")
|
logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipMalware).Info("item flagged as malware")
|
||||||
@ -334,6 +346,7 @@ func downloadContent(
|
|||||||
uc getItemPropertyer,
|
uc getItemPropertyer,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
driveID string,
|
driveID string,
|
||||||
|
counter *count.Bus,
|
||||||
) (io.ReadCloser, error) {
|
) (io.ReadCloser, error) {
|
||||||
itemID := ptr.Val(item.GetId())
|
itemID := ptr.Val(item.GetId())
|
||||||
ctx = clues.Add(ctx, "item_id", itemID)
|
ctx = clues.Add(ctx, "item_id", itemID)
|
||||||
@ -359,6 +372,7 @@ func downloadContent(
|
|||||||
// to preserve existing behavior. Fallback to refetching the item using the
|
// to preserve existing behavior. Fallback to refetching the item using the
|
||||||
// API.
|
// API.
|
||||||
logger.CtxErr(ctx, err).Info("url cache miss: refetching from API")
|
logger.CtxErr(ctx, err).Info("url cache miss: refetching from API")
|
||||||
|
counter.Inc(count.ItemDownloadURLRefetch)
|
||||||
|
|
||||||
di, err := iaag.GetItem(ctx, driveID, ptr.Val(item.GetId()))
|
di, err := iaag.GetItem(ctx, driveID, ptr.Val(item.GetId()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -428,7 +442,9 @@ func (oc *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
// `details.OneDriveInfo`
|
// `details.OneDriveInfo`
|
||||||
parentPath, err := path.GetDriveFolderPath(oc.folderPath)
|
parentPath, err := path.GetDriveFolderPath(oc.folderPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logger.CtxErr(ctx, err).Info("getting drive folder path")
|
||||||
oc.reportAsCompleted(ctx, 0, 0, 0)
|
oc.reportAsCompleted(ctx, 0, 0, 0)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -559,10 +575,15 @@ func (oc *Collection) streamDriveItem(
|
|||||||
if isFile {
|
if isFile {
|
||||||
atomic.AddInt64(&stats.itemsFound, 1)
|
atomic.AddInt64(&stats.itemsFound, 1)
|
||||||
|
|
||||||
|
if oc.counter.Inc(count.StreamItemsFound)%1000 == 0 {
|
||||||
|
logger.Ctx(ctx).Infow("item stream progress", "stats", oc.counter.Values())
|
||||||
|
}
|
||||||
|
|
||||||
metaFileName = itemID
|
metaFileName = itemID
|
||||||
metaSuffix = metadata.MetaFileSuffix
|
metaSuffix = metadata.MetaFileSuffix
|
||||||
} else {
|
} else {
|
||||||
atomic.AddInt64(&stats.dirsFound, 1)
|
atomic.AddInt64(&stats.dirsFound, 1)
|
||||||
|
oc.counter.Inc(count.StreamDirsFound)
|
||||||
|
|
||||||
// metaFileName not set for directories so we get just ".dirmeta"
|
// metaFileName not set for directories so we get just ".dirmeta"
|
||||||
metaSuffix = metadata.DirMetaFileSuffix
|
metaSuffix = metadata.DirMetaFileSuffix
|
||||||
@ -588,6 +609,15 @@ func (oc *Collection) streamDriveItem(
|
|||||||
|
|
||||||
ctx = clues.Add(ctx, "item_info", itemInfo)
|
ctx = clues.Add(ctx, "item_info", itemInfo)
|
||||||
|
|
||||||
|
// Drive content download requests are also rate limited by graph api.
|
||||||
|
// Ensure that this request goes through the drive limiter & not the default
|
||||||
|
// limiter.
|
||||||
|
ctx = graph.BindRateLimiterConfig(
|
||||||
|
ctx,
|
||||||
|
graph.LimiterCfg{
|
||||||
|
Service: path.OneDriveService,
|
||||||
|
})
|
||||||
|
|
||||||
if isFile {
|
if isFile {
|
||||||
dataSuffix := metadata.DataFileSuffix
|
dataSuffix := metadata.DataFileSuffix
|
||||||
|
|
||||||
@ -607,6 +637,7 @@ func (oc *Collection) streamDriveItem(
|
|||||||
},
|
},
|
||||||
itemID+dataSuffix,
|
itemID+dataSuffix,
|
||||||
itemInfo.Modified(),
|
itemInfo.Modified(),
|
||||||
|
oc.counter,
|
||||||
errs)
|
errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -640,11 +671,14 @@ func (oc *Collection) streamDriveItem(
|
|||||||
|
|
||||||
// Item read successfully, add to collection
|
// Item read successfully, add to collection
|
||||||
if isFile {
|
if isFile {
|
||||||
|
oc.counter.Inc(count.StreamItemsAdded)
|
||||||
atomic.AddInt64(&stats.itemsRead, 1)
|
atomic.AddInt64(&stats.itemsRead, 1)
|
||||||
} else {
|
} else {
|
||||||
|
oc.counter.Inc(count.StreamDirsAdded)
|
||||||
atomic.AddInt64(&stats.dirsRead, 1)
|
atomic.AddInt64(&stats.dirsRead, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
oc.counter.Add(count.StreamBytesAdded, itemSize)
|
||||||
atomic.AddInt64(&stats.byteCount, itemSize)
|
atomic.AddInt64(&stats.byteCount, itemSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/extensions"
|
"github.com/alcionai/corso/src/pkg/extensions"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -215,7 +216,8 @@ func (suite *CollectionUnitSuite) TestCollection() {
|
|||||||
control.Options{ToggleFeatures: control.Toggles{}},
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
nil)
|
nil,
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotNil(t, coll)
|
require.NotNil(t, coll)
|
||||||
assert.Equal(t, folderPath, coll.FullPath())
|
assert.Equal(t, folderPath, coll.FullPath())
|
||||||
@ -337,7 +339,8 @@ func (suite *CollectionUnitSuite) TestCollectionReadError() {
|
|||||||
control.Options{ToggleFeatures: control.Toggles{}},
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
nil)
|
nil,
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
stubItem := odTD.NewStubDriveItem(
|
stubItem := odTD.NewStubDriveItem(
|
||||||
@ -415,7 +418,8 @@ func (suite *CollectionUnitSuite) TestCollectionReadUnauthorizedErrorRetry() {
|
|||||||
control.Options{ToggleFeatures: control.Toggles{}},
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
nil)
|
nil,
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
coll.Add(stubItem)
|
coll.Add(stubItem)
|
||||||
@ -471,7 +475,8 @@ func (suite *CollectionUnitSuite) TestCollectionPermissionBackupLatestModTime()
|
|||||||
control.Options{ToggleFeatures: control.Toggles{}},
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
nil)
|
nil,
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
mtime := time.Now().AddDate(0, -1, 0)
|
mtime := time.Now().AddDate(0, -1, 0)
|
||||||
@ -814,7 +819,7 @@ func (suite *GetDriveItemUnitTestSuite) TestDownloadContent() {
|
|||||||
mbh.GetResps = resps
|
mbh.GetResps = resps
|
||||||
mbh.GetErrs = test.getErr
|
mbh.GetErrs = test.getErr
|
||||||
|
|
||||||
r, err := downloadContent(ctx, mbh, test.muc, item, driveID)
|
r, err := downloadContent(ctx, mbh, test.muc, item, driveID, count.New())
|
||||||
test.expect(t, r)
|
test.expect(t, r)
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
})
|
})
|
||||||
@ -1002,7 +1007,8 @@ func (suite *CollectionUnitSuite) TestItemExtensions() {
|
|||||||
opts,
|
opts,
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
nil)
|
nil,
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
stubItem := odTD.NewStubDriveItem(
|
stubItem := odTD.NewStubDriveItem(
|
||||||
|
|||||||
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/filters"
|
"github.com/alcionai/corso/src/pkg/filters"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -51,6 +52,8 @@ type Collections struct {
|
|||||||
NumItems int
|
NumItems int
|
||||||
NumFiles int
|
NumFiles int
|
||||||
NumContainers int
|
NumContainers int
|
||||||
|
|
||||||
|
counter *count.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCollections(
|
func NewCollections(
|
||||||
@ -59,6 +62,7 @@ func NewCollections(
|
|||||||
protectedResource idname.Provider,
|
protectedResource idname.Provider,
|
||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
|
counter *count.Bus,
|
||||||
) *Collections {
|
) *Collections {
|
||||||
return &Collections{
|
return &Collections{
|
||||||
handler: bh,
|
handler: bh,
|
||||||
@ -67,6 +71,7 @@ func NewCollections(
|
|||||||
CollectionMap: map[string]map[string]*Collection{},
|
CollectionMap: map[string]map[string]*Collection{},
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
ctrl: ctrlOpts,
|
ctrl: ctrlOpts,
|
||||||
|
counter: counter,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,6 +84,7 @@ func (c *Collections) resetStats() {
|
|||||||
func deserializeAndValidateMetadata(
|
func deserializeAndValidateMetadata(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cols []data.RestoreCollection,
|
cols []data.RestoreCollection,
|
||||||
|
counter *count.Bus,
|
||||||
fb *fault.Bus,
|
fb *fault.Bus,
|
||||||
) (map[string]string, map[string]map[string]string, bool, error) {
|
) (map[string]string, map[string]map[string]string, bool, error) {
|
||||||
deltas, prevs, canUse, err := DeserializeMetadata(ctx, cols)
|
deltas, prevs, canUse, err := DeserializeMetadata(ctx, cols)
|
||||||
@ -117,7 +123,7 @@ func deserializeAndValidateMetadata(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
alertIfPrevPathsHaveCollisions(ctx, prevs, fb)
|
alertIfPrevPathsHaveCollisions(ctx, prevs, counter, fb)
|
||||||
|
|
||||||
return deltas, prevs, canUse, nil
|
return deltas, prevs, canUse, nil
|
||||||
}
|
}
|
||||||
@ -125,6 +131,7 @@ func deserializeAndValidateMetadata(
|
|||||||
func alertIfPrevPathsHaveCollisions(
|
func alertIfPrevPathsHaveCollisions(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
prevs map[string]map[string]string,
|
prevs map[string]map[string]string,
|
||||||
|
counter *count.Bus,
|
||||||
fb *fault.Bus,
|
fb *fault.Bus,
|
||||||
) {
|
) {
|
||||||
for driveID, folders := range prevs {
|
for driveID, folders := range prevs {
|
||||||
@ -150,6 +157,8 @@ func alertIfPrevPathsHaveCollisions(
|
|||||||
"collision_drive_id": driveID,
|
"collision_drive_id": driveID,
|
||||||
"collision_prev_path": prev,
|
"collision_prev_path": prev,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
counter.Inc(count.PreviousPathMetadataCollision)
|
||||||
}
|
}
|
||||||
|
|
||||||
prevPathCollisions[prev] = fid
|
prevPathCollisions[prev] = fid
|
||||||
@ -274,7 +283,11 @@ func (c *Collections) Get(
|
|||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
deltasByDriveID, prevPathsByDriveID, canUsePrevBackup, err := deserializeAndValidateMetadata(ctx, prevMetadata, errs)
|
deltasByDriveID, prevPathsByDriveID, canUsePrevBackup, err := deserializeAndValidateMetadata(
|
||||||
|
ctx,
|
||||||
|
prevMetadata,
|
||||||
|
c.counter,
|
||||||
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
@ -295,6 +308,9 @@ func (c *Collections) Get(
|
|||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.counter.Add(count.Drives, int64(len(drives)))
|
||||||
|
c.counter.Add(count.PrevDeltas, int64(len(deltasByDriveID)))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
driveIDToDeltaLink = map[string]string{}
|
driveIDToDeltaLink = map[string]string{}
|
||||||
driveIDToPrevPaths = map[string]map[string]string{}
|
driveIDToPrevPaths = map[string]map[string]string{}
|
||||||
@ -303,6 +319,7 @@ func (c *Collections) Get(
|
|||||||
|
|
||||||
for _, d := range drives {
|
for _, d := range drives {
|
||||||
var (
|
var (
|
||||||
|
cl = c.counter.Local()
|
||||||
driveID = ptr.Val(d.GetId())
|
driveID = ptr.Val(d.GetId())
|
||||||
driveName = ptr.Val(d.GetName())
|
driveName = ptr.Val(d.GetName())
|
||||||
ictx = clues.Add(
|
ictx = clues.Add(
|
||||||
@ -322,6 +339,8 @@ func (c *Collections) Get(
|
|||||||
packagePaths = map[string]struct{}{}
|
packagePaths = map[string]struct{}{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ictx = clues.AddLabelCounter(ictx, cl.PlainAdder())
|
||||||
|
|
||||||
delete(driveTombstones, driveID)
|
delete(driveTombstones, driveID)
|
||||||
|
|
||||||
if _, ok := driveIDToPrevPaths[driveID]; !ok {
|
if _, ok := driveIDToPrevPaths[driveID]; !ok {
|
||||||
@ -332,6 +351,7 @@ func (c *Collections) Get(
|
|||||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cl.Add(count.PrevPaths, int64(len(oldPrevPaths)))
|
||||||
logger.Ctx(ictx).Infow(
|
logger.Ctx(ictx).Infow(
|
||||||
"previous metadata for drive",
|
"previous metadata for drive",
|
||||||
"count_old_prev_paths", len(oldPrevPaths))
|
"count_old_prev_paths", len(oldPrevPaths))
|
||||||
@ -344,6 +364,7 @@ func (c *Collections) Get(
|
|||||||
excludedItemIDs,
|
excludedItemIDs,
|
||||||
packagePaths,
|
packagePaths,
|
||||||
prevDeltaLink,
|
prevDeltaLink,
|
||||||
|
cl.Local(),
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Stack(err)
|
return nil, false, clues.Stack(err)
|
||||||
@ -373,6 +394,8 @@ func (c *Collections) Get(
|
|||||||
numDriveItems := c.NumItems - numPrevItems
|
numDriveItems := c.NumItems - numPrevItems
|
||||||
numPrevItems = c.NumItems
|
numPrevItems = c.NumItems
|
||||||
|
|
||||||
|
cl.Add(count.NewPrevPaths, int64(len(newPrevPaths)))
|
||||||
|
|
||||||
// Attach an url cache to the drive if the number of discovered items is
|
// Attach an url cache to the drive if the number of discovered items is
|
||||||
// below the threshold. Attaching cache to larger drives can cause
|
// below the threshold. Attaching cache to larger drives can cause
|
||||||
// performance issues since cache delta queries start taking up majority of
|
// performance issues since cache delta queries start taking up majority of
|
||||||
@ -387,6 +410,7 @@ func (c *Collections) Get(
|
|||||||
prevDeltaLink,
|
prevDeltaLink,
|
||||||
urlCacheRefreshInterval,
|
urlCacheRefreshInterval,
|
||||||
c.handler,
|
c.handler,
|
||||||
|
cl,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Stack(err)
|
return nil, false, clues.Stack(err)
|
||||||
@ -446,7 +470,8 @@ func (c *Collections) Get(
|
|||||||
c.ctrl,
|
c.ctrl,
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
nil)
|
nil,
|
||||||
|
cl.Local())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.WrapWC(ictx, err, "making collection")
|
return nil, false, clues.WrapWC(ictx, err, "making collection")
|
||||||
}
|
}
|
||||||
@ -464,11 +489,13 @@ func (c *Collections) Get(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.counter.Add(count.DriveTombstones, int64(len(driveTombstones)))
|
||||||
|
|
||||||
// generate tombstones for drives that were removed.
|
// generate tombstones for drives that were removed.
|
||||||
for driveID := range driveTombstones {
|
for driveID := range driveTombstones {
|
||||||
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, driveID)
|
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, driveID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.WrapWC(ctx, err, "making drive tombstone for previous path")
|
return nil, false, clues.WrapWC(ctx, err, "making drive tombstone for previous path").Label(count.BadPathPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
coll, err := NewCollection(
|
coll, err := NewCollection(
|
||||||
@ -481,7 +508,8 @@ func (c *Collections) Get(
|
|||||||
c.ctrl,
|
c.ctrl,
|
||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
nil)
|
nil,
|
||||||
|
c.counter.Local())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.WrapWC(ctx, err, "making drive tombstone")
|
return nil, false, clues.WrapWC(ctx, err, "making drive tombstone")
|
||||||
}
|
}
|
||||||
@ -489,7 +517,7 @@ func (c *Collections) Get(
|
|||||||
collections = append(collections, coll)
|
collections = append(collections, coll)
|
||||||
}
|
}
|
||||||
|
|
||||||
alertIfPrevPathsHaveCollisions(ctx, driveIDToPrevPaths, errs)
|
alertIfPrevPathsHaveCollisions(ctx, driveIDToPrevPaths, c.counter, errs)
|
||||||
|
|
||||||
// add metadata collections
|
// add metadata collections
|
||||||
pathPrefix, err := c.handler.MetadataPathPrefix(c.tenantID)
|
pathPrefix, err := c.handler.MetadataPathPrefix(c.tenantID)
|
||||||
@ -508,7 +536,8 @@ func (c *Collections) Get(
|
|||||||
graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths),
|
graph.NewMetadataEntry(bupMD.PreviousPathFileName, driveIDToPrevPaths),
|
||||||
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink),
|
graph.NewMetadataEntry(bupMD.DeltaURLsFileName, driveIDToDeltaLink),
|
||||||
},
|
},
|
||||||
c.statusUpdater)
|
c.statusUpdater,
|
||||||
|
count.New())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Technically it's safe to continue here because the logic for starting an
|
// Technically it's safe to continue here because the logic for starting an
|
||||||
@ -563,13 +592,17 @@ func updateCollectionPaths(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Collections) handleDelete(
|
func (c *Collections) handleDelete(
|
||||||
|
ctx context.Context,
|
||||||
itemID, driveID string,
|
itemID, driveID string,
|
||||||
oldPrevPaths, currPrevPaths, newPrevPaths map[string]string,
|
oldPrevPaths, currPrevPaths, newPrevPaths map[string]string,
|
||||||
isFolder bool,
|
isFolder bool,
|
||||||
excluded map[string]struct{},
|
excluded map[string]struct{},
|
||||||
invalidPrevDelta bool,
|
invalidPrevDelta bool,
|
||||||
|
counter *count.Bus,
|
||||||
) error {
|
) error {
|
||||||
if !isFolder {
|
if !isFolder {
|
||||||
|
counter.Inc(count.DeleteItemMarker)
|
||||||
|
|
||||||
// Try to remove the item from the Collection if an entry exists for this
|
// Try to remove the item from the Collection if an entry exists for this
|
||||||
// item. This handles cases where an item was created and deleted during the
|
// item. This handles cases where an item was created and deleted during the
|
||||||
// same delta query.
|
// same delta query.
|
||||||
@ -597,6 +630,8 @@ func (c *Collections) handleDelete(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
counter.Inc(count.DeleteFolderMarker)
|
||||||
|
|
||||||
var prevPath path.Path
|
var prevPath path.Path
|
||||||
|
|
||||||
prevPathStr, ok := oldPrevPaths[itemID]
|
prevPathStr, ok := oldPrevPaths[itemID]
|
||||||
@ -605,11 +640,12 @@ func (c *Collections) handleDelete(
|
|||||||
|
|
||||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Wrap(err, "invalid previous path").
|
return clues.WrapWC(ctx, err, "invalid previous path").
|
||||||
With(
|
With(
|
||||||
"drive_id", driveID,
|
"drive_id", driveID,
|
||||||
"item_id", itemID,
|
"item_id", itemID,
|
||||||
"path_string", prevPathStr)
|
"path_string", prevPathStr).
|
||||||
|
Label(count.BadPrevPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -644,7 +680,8 @@ func (c *Collections) handleDelete(
|
|||||||
false,
|
false,
|
||||||
// DoNotMerge is not checked for deleted items.
|
// DoNotMerge is not checked for deleted items.
|
||||||
false,
|
false,
|
||||||
nil)
|
nil,
|
||||||
|
counter.Local())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.Wrap(err, "making collection").With(
|
return clues.Wrap(err, "making collection").With(
|
||||||
"drive_id", driveID,
|
"drive_id", driveID,
|
||||||
@ -716,6 +753,7 @@ func (c *Collections) PopulateDriveCollections(
|
|||||||
excludedItemIDs map[string]struct{},
|
excludedItemIDs map[string]struct{},
|
||||||
topLevelPackages map[string]struct{},
|
topLevelPackages map[string]struct{},
|
||||||
prevDeltaLink string,
|
prevDeltaLink string,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (pagers.DeltaUpdate, map[string]string, error) {
|
) (pagers.DeltaUpdate, map[string]string, error) {
|
||||||
var (
|
var (
|
||||||
@ -755,7 +793,11 @@ func (c *Collections) PopulateDriveCollections(
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
counter.Inc(count.PagesEnumerated)
|
||||||
|
|
||||||
if reset {
|
if reset {
|
||||||
|
counter.Inc(count.PagerResets)
|
||||||
|
|
||||||
ctx = clues.Add(ctx, "delta_reset_occurred", true)
|
ctx = clues.Add(ctx, "delta_reset_occurred", true)
|
||||||
newPrevPaths = map[string]string{}
|
newPrevPaths = map[string]string{}
|
||||||
currPrevPaths = map[string]string{}
|
currPrevPaths = map[string]string{}
|
||||||
@ -783,6 +825,7 @@ func (c *Collections) PopulateDriveCollections(
|
|||||||
excludedItemIDs,
|
excludedItemIDs,
|
||||||
topLevelPackages,
|
topLevelPackages,
|
||||||
invalidPrevDelta,
|
invalidPrevDelta,
|
||||||
|
counter,
|
||||||
el)
|
el)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Stack(err))
|
el.AddRecoverable(ctx, clues.Stack(err))
|
||||||
@ -795,6 +838,8 @@ func (c *Collections) PopulateDriveCollections(
|
|||||||
return du, nil, clues.Stack(err)
|
return du, nil, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow("populated collection", "stats", counter.Values())
|
||||||
|
|
||||||
return du, newPrevPaths, el.Failure()
|
return du, newPrevPaths, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -807,6 +852,7 @@ func (c *Collections) processItem(
|
|||||||
excludedItemIDs map[string]struct{},
|
excludedItemIDs map[string]struct{},
|
||||||
topLevelPackages map[string]struct{},
|
topLevelPackages map[string]struct{},
|
||||||
invalidPrevDelta bool,
|
invalidPrevDelta bool,
|
||||||
|
counter *count.Bus,
|
||||||
skipper fault.AddSkipper,
|
skipper fault.AddSkipper,
|
||||||
) error {
|
) error {
|
||||||
var (
|
var (
|
||||||
@ -831,6 +877,7 @@ func (c *Collections) processItem(
|
|||||||
|
|
||||||
skipper.AddSkip(ctx, skip)
|
skipper.AddSkip(ctx, skip)
|
||||||
logger.Ctx(ctx).Infow("malware detected", "item_details", addtl)
|
logger.Ctx(ctx).Infow("malware detected", "item_details", addtl)
|
||||||
|
counter.Inc(count.Malware)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -838,6 +885,7 @@ func (c *Collections) processItem(
|
|||||||
// Deleted file or folder.
|
// Deleted file or folder.
|
||||||
if item.GetDeleted() != nil {
|
if item.GetDeleted() != nil {
|
||||||
err := c.handleDelete(
|
err := c.handleDelete(
|
||||||
|
ctx,
|
||||||
itemID,
|
itemID,
|
||||||
driveID,
|
driveID,
|
||||||
oldPrevPaths,
|
oldPrevPaths,
|
||||||
@ -845,20 +893,22 @@ func (c *Collections) processItem(
|
|||||||
newPrevPaths,
|
newPrevPaths,
|
||||||
isFolder,
|
isFolder,
|
||||||
excludedItemIDs,
|
excludedItemIDs,
|
||||||
invalidPrevDelta)
|
invalidPrevDelta,
|
||||||
|
counter)
|
||||||
|
|
||||||
return clues.StackWC(ctx, err).OrNil()
|
return clues.StackWC(ctx, err).OrNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
collectionPath, err := c.getCollectionPath(driveID, item)
|
collectionPath, err := c.getCollectionPath(driveID, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.StackWC(ctx, err).
|
return clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation, count.BadCollPath)
|
||||||
Label(fault.LabelForceNoBackupCreation)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip items that don't match the folder selectors we were given.
|
// Skip items that don't match the folder selectors we were given.
|
||||||
if shouldSkip(ctx, collectionPath, c.handler, driveName) {
|
if shouldSkip(ctx, collectionPath, c.handler, driveName) {
|
||||||
|
counter.Inc(count.SkippedContainers)
|
||||||
logger.Ctx(ctx).Debugw("path not selected", "skipped_path", collectionPath.String())
|
logger.Ctx(ctx).Debugw("path not selected", "skipped_path", collectionPath.String())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -872,7 +922,8 @@ func (c *Collections) processItem(
|
|||||||
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
prevPath, err = path.FromDataLayerPath(prevPathStr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.WrapWC(ctx, err, "invalid previous path").
|
return clues.WrapWC(ctx, err, "invalid previous path").
|
||||||
With("prev_path_string", path.LoggableDir(prevPathStr))
|
With("prev_path_string", path.LoggableDir(prevPathStr)).
|
||||||
|
Label(count.BadPrevPath)
|
||||||
}
|
}
|
||||||
} else if item.GetRoot() != nil {
|
} else if item.GetRoot() != nil {
|
||||||
// Root doesn't move or get renamed.
|
// Root doesn't move or get renamed.
|
||||||
@ -899,9 +950,12 @@ func (c *Collections) processItem(
|
|||||||
|
|
||||||
isPackage := item.GetPackageEscaped() != nil
|
isPackage := item.GetPackageEscaped() != nil
|
||||||
if isPackage {
|
if isPackage {
|
||||||
|
counter.Inc(count.Packages)
|
||||||
// mark this path as a package type for all other collections.
|
// mark this path as a package type for all other collections.
|
||||||
// any subfolder should get marked as a childOfPackage below.
|
// any subfolder should get marked as a childOfPackage below.
|
||||||
topLevelPackages[collectionPath.String()] = struct{}{}
|
topLevelPackages[collectionPath.String()] = struct{}{}
|
||||||
|
} else {
|
||||||
|
counter.Inc(count.Folders)
|
||||||
}
|
}
|
||||||
|
|
||||||
childOfPackage := filters.
|
childOfPackage := filters.
|
||||||
@ -944,7 +998,8 @@ func (c *Collections) processItem(
|
|||||||
c.ctrl,
|
c.ctrl,
|
||||||
isPackage || childOfPackage,
|
isPackage || childOfPackage,
|
||||||
invalidPrevDelta || collPathAlreadyExists,
|
invalidPrevDelta || collPathAlreadyExists,
|
||||||
nil)
|
nil,
|
||||||
|
counter.Local())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return clues.StackWC(ctx, err)
|
return clues.StackWC(ctx, err)
|
||||||
}
|
}
|
||||||
@ -966,9 +1021,11 @@ func (c *Collections) processItem(
|
|||||||
}
|
}
|
||||||
|
|
||||||
case item.GetFile() != nil:
|
case item.GetFile() != nil:
|
||||||
|
counter.Inc(count.Files)
|
||||||
|
|
||||||
// Deletions are handled above so this is just moves/renames.
|
// Deletions are handled above so this is just moves/renames.
|
||||||
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
if len(ptr.Val(item.GetParentReference().GetId())) == 0 {
|
||||||
return clues.NewWC(ctx, "file without parent ID")
|
return clues.NewWC(ctx, "file without parent ID").Label(count.MissingParent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the collection for this item.
|
// Get the collection for this item.
|
||||||
@ -977,7 +1034,7 @@ func (c *Collections) processItem(
|
|||||||
|
|
||||||
collection, ok := c.CollectionMap[driveID][parentID]
|
collection, ok := c.CollectionMap[driveID][parentID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return clues.NewWC(ctx, "item seen before parent folder")
|
return clues.NewWC(ctx, "item seen before parent folder").Label(count.ItemBeforeParent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This will only kick in if the file was moved multiple times
|
// This will only kick in if the file was moved multiple times
|
||||||
@ -1021,7 +1078,7 @@ func (c *Collections) processItem(
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
return clues.NewWC(ctx, "item is neither folder nor file").
|
return clues.NewWC(ctx, "item is neither folder nor file").
|
||||||
Label(fault.LabelForceNoBackupCreation)
|
Label(fault.LabelForceNoBackupCreation, count.UnknownItemType)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -1158,7 +1159,8 @@ func (suite *CollectionsUnitSuite) TestPopulateDriveCollections() {
|
|||||||
tenant,
|
tenant,
|
||||||
idname.NewProvider(user, user),
|
idname.NewProvider(user, user),
|
||||||
nil,
|
nil,
|
||||||
control.Options{ToggleFeatures: control.Toggles{}})
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
|
count.New())
|
||||||
|
|
||||||
c.CollectionMap[driveID] = map[string]*Collection{}
|
c.CollectionMap[driveID] = map[string]*Collection{}
|
||||||
|
|
||||||
@ -1170,6 +1172,7 @@ func (suite *CollectionsUnitSuite) TestPopulateDriveCollections() {
|
|||||||
excludes,
|
excludes,
|
||||||
test.topLevelPackages,
|
test.topLevelPackages,
|
||||||
"prevdelta",
|
"prevdelta",
|
||||||
|
count.New(),
|
||||||
errs)
|
errs)
|
||||||
test.expect(t, err, clues.ToCore(err))
|
test.expect(t, err, clues.ToCore(err))
|
||||||
assert.ElementsMatch(
|
assert.ElementsMatch(
|
||||||
@ -1618,7 +1621,8 @@ func (suite *CollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
mc, err := graph.MakeMetadataCollection(
|
mc, err := graph.MakeMetadataCollection(
|
||||||
pathPrefix,
|
pathPrefix,
|
||||||
c(),
|
c(),
|
||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {},
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cols = append(cols, dataMock.NewUnversionedRestoreCollection(
|
cols = append(cols, dataMock.NewUnversionedRestoreCollection(
|
||||||
@ -1628,7 +1632,7 @@ func (suite *CollectionsUnitSuite) TestDeserializeMetadata() {
|
|||||||
|
|
||||||
fb := fault.New(true)
|
fb := fault.New(true)
|
||||||
|
|
||||||
deltas, paths, canUsePreviousBackup, err := deserializeAndValidateMetadata(ctx, cols, fb)
|
deltas, paths, canUsePreviousBackup, err := deserializeAndValidateMetadata(ctx, cols, count.New(), fb)
|
||||||
test.errCheck(t, err)
|
test.errCheck(t, err)
|
||||||
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||||
|
|
||||||
@ -1656,7 +1660,7 @@ func (suite *CollectionsUnitSuite) TestDeserializeMetadata_ReadFailure() {
|
|||||||
|
|
||||||
fc := failingColl{}
|
fc := failingColl{}
|
||||||
|
|
||||||
_, _, canUsePreviousBackup, err := deserializeAndValidateMetadata(ctx, []data.RestoreCollection{fc}, fault.New(true))
|
_, _, canUsePreviousBackup, err := deserializeAndValidateMetadata(ctx, []data.RestoreCollection{fc}, count.New(), fault.New(true))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.False(t, canUsePreviousBackup)
|
require.False(t, canUsePreviousBackup)
|
||||||
}
|
}
|
||||||
@ -3427,7 +3431,8 @@ func (suite *CollectionsUnitSuite) TestGet() {
|
|||||||
tenant,
|
tenant,
|
||||||
idname.NewProvider(user, user),
|
idname.NewProvider(user, user),
|
||||||
func(*support.ControllerOperationStatus) {},
|
func(*support.ControllerOperationStatus) {},
|
||||||
control.Options{ToggleFeatures: control.Toggles{}})
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
|
count.New())
|
||||||
|
|
||||||
prevDelta := "prev-delta"
|
prevDelta := "prev-delta"
|
||||||
|
|
||||||
@ -3447,7 +3452,8 @@ func (suite *CollectionsUnitSuite) TestGet() {
|
|||||||
bupMD.PreviousPathFileName,
|
bupMD.PreviousPathFileName,
|
||||||
test.previousPaths),
|
test.previousPaths),
|
||||||
},
|
},
|
||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {},
|
||||||
|
count.New())
|
||||||
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
|
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
|
||||||
|
|
||||||
prevMetadata := []data.RestoreCollection{
|
prevMetadata := []data.RestoreCollection{
|
||||||
@ -3484,6 +3490,7 @@ func (suite *CollectionsUnitSuite) TestGet() {
|
|||||||
t,
|
t,
|
||||||
data.NoFetchRestoreCollection{Collection: baseCol}),
|
data.NoFetchRestoreCollection{Collection: baseCol}),
|
||||||
},
|
},
|
||||||
|
count.New(),
|
||||||
errs)
|
errs)
|
||||||
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
||||||
continue
|
continue
|
||||||
@ -3616,7 +3623,8 @@ func (suite *CollectionsUnitSuite) TestAddURLCacheToDriveCollections() {
|
|||||||
tenant,
|
tenant,
|
||||||
idname.NewProvider(user, user),
|
idname.NewProvider(user, user),
|
||||||
func(*support.ControllerOperationStatus) {},
|
func(*support.ControllerOperationStatus) {},
|
||||||
control.Options{ToggleFeatures: control.Toggles{}})
|
control.Options{ToggleFeatures: control.Toggles{}},
|
||||||
|
count.New())
|
||||||
|
|
||||||
errs := fault.New(true)
|
errs := fault.New(true)
|
||||||
delList := prefixmatcher.NewStringSetBuilder()
|
delList := prefixmatcher.NewStringSetBuilder()
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
bupMD "github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/store"
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
)
|
)
|
||||||
@ -14,8 +15,9 @@ import (
|
|||||||
func DeserializeMetadataFiles(
|
func DeserializeMetadataFiles(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
colls []data.RestoreCollection,
|
colls []data.RestoreCollection,
|
||||||
|
counter *count.Bus,
|
||||||
) ([]store.MetadataFile, error) {
|
) ([]store.MetadataFile, error) {
|
||||||
deltas, prevs, _, err := deserializeAndValidateMetadata(ctx, colls, fault.New(true))
|
deltas, prevs, _, err := deserializeAndValidateMetadata(ctx, colls, counter, fault.New(true))
|
||||||
|
|
||||||
files := []store.MetadataFile{
|
files := []store.MetadataFile{
|
||||||
{
|
{
|
||||||
|
|||||||
@ -284,7 +284,8 @@ func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
|||||||
service.updateStatus,
|
service.updateStatus,
|
||||||
control.Options{
|
control.Options{
|
||||||
ToggleFeatures: control.Toggles{},
|
ToggleFeatures: control.Toggles{},
|
||||||
})
|
},
|
||||||
|
count.New())
|
||||||
|
|
||||||
ssmb := prefixmatcher.NewStringSetBuilder()
|
ssmb := prefixmatcher.NewStringSetBuilder()
|
||||||
|
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||||
"github.com/alcionai/corso/src/internal/common/str"
|
"github.com/alcionai/corso/src/internal/common/str"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||||
@ -49,7 +50,8 @@ type urlCache struct {
|
|||||||
|
|
||||||
enumerator EnumerateDriveItemsDeltaer
|
enumerator EnumerateDriveItemsDeltaer
|
||||||
|
|
||||||
errs *fault.Bus
|
counter *count.Bus
|
||||||
|
errs *fault.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
// newURLache creates a new URL cache for the specified drive ID
|
// newURLache creates a new URL cache for the specified drive ID
|
||||||
@ -57,6 +59,7 @@ func newURLCache(
|
|||||||
driveID, prevDelta string,
|
driveID, prevDelta string,
|
||||||
refreshInterval time.Duration,
|
refreshInterval time.Duration,
|
||||||
enumerator EnumerateDriveItemsDeltaer,
|
enumerator EnumerateDriveItemsDeltaer,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (*urlCache, error) {
|
) (*urlCache, error) {
|
||||||
err := validateCacheParams(driveID, refreshInterval, enumerator)
|
err := validateCacheParams(driveID, refreshInterval, enumerator)
|
||||||
@ -71,6 +74,7 @@ func newURLCache(
|
|||||||
enumerator: enumerator,
|
enumerator: enumerator,
|
||||||
prevDelta: prevDelta,
|
prevDelta: prevDelta,
|
||||||
refreshInterval: refreshInterval,
|
refreshInterval: refreshInterval,
|
||||||
|
counter: counter,
|
||||||
errs: errs,
|
errs: errs,
|
||||||
},
|
},
|
||||||
nil
|
nil
|
||||||
@ -148,6 +152,8 @@ func (uc *urlCache) refreshCache(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uc.counter.Inc(count.URLCacheRefresh)
|
||||||
|
|
||||||
// Hold cache lock in write mode for the entire duration of the refresh.
|
// Hold cache lock in write mode for the entire duration of the refresh.
|
||||||
// This is to prevent other threads from reading the cache while it is
|
// This is to prevent other threads from reading the cache while it is
|
||||||
// being updated page by page
|
// being updated page by page
|
||||||
@ -201,6 +207,7 @@ func (uc *urlCache) readCache(
|
|||||||
|
|
||||||
props, ok := uc.idToProps[itemID]
|
props, ok := uc.idToProps[itemID]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
uc.counter.Inc(count.URLCacheMiss)
|
||||||
return itemProps{}, clues.NewWC(ctx, "item not found in cache")
|
return itemProps{}, clues.NewWC(ctx, "item not found in cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -148,6 +148,7 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
|||||||
du.URL,
|
du.URL,
|
||||||
1*time.Hour,
|
1*time.Hour,
|
||||||
suite.ac.Drives(),
|
suite.ac.Drives(),
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -578,6 +579,7 @@ func (suite *URLCacheUnitSuite) TestGetItemProperties() {
|
|||||||
"",
|
"",
|
||||||
1*time.Hour,
|
1*time.Hour,
|
||||||
&medi,
|
&medi,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -622,6 +624,7 @@ func (suite *URLCacheUnitSuite) TestNeedsRefresh() {
|
|||||||
"",
|
"",
|
||||||
refreshInterval,
|
refreshInterval,
|
||||||
&mock.EnumerateItemsDeltaByDrive{},
|
&mock.EnumerateItemsDeltaByDrive{},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
@ -694,6 +697,7 @@ func (suite *URLCacheUnitSuite) TestNewURLCache() {
|
|||||||
"",
|
"",
|
||||||
test.refreshInt,
|
test.refreshInt,
|
||||||
test.itemPager,
|
test.itemPager,
|
||||||
|
count.New(),
|
||||||
test.errors)
|
test.errors)
|
||||||
|
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -31,6 +32,7 @@ func CreateCollections(
|
|||||||
scope selectors.ExchangeScope,
|
scope selectors.ExchangeScope,
|
||||||
dps metadata.DeltaPaths,
|
dps metadata.DeltaPaths,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
||||||
@ -78,11 +80,14 @@ func CreateCollections(
|
|||||||
scope,
|
scope,
|
||||||
dps,
|
dps,
|
||||||
bpc.Options,
|
bpc.Options,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "filling collections")
|
return nil, clues.Wrap(err, "filling collections")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
counter.Add(count.Collections, int64(len(collections)))
|
||||||
|
|
||||||
for _, coll := range collections {
|
for _, coll := range collections {
|
||||||
allCollections = append(allCollections, coll)
|
allCollections = append(allCollections, coll)
|
||||||
}
|
}
|
||||||
@ -108,6 +113,7 @@ func populateCollections(
|
|||||||
scope selectors.ExchangeScope,
|
scope selectors.ExchangeScope,
|
||||||
dps metadata.DeltaPaths,
|
dps metadata.DeltaPaths,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (map[string]data.BackupCollection, error) {
|
) (map[string]data.BackupCollection, error) {
|
||||||
var (
|
var (
|
||||||
@ -123,6 +129,7 @@ func populateCollections(
|
|||||||
)
|
)
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
|
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
|
||||||
|
counter.Add(count.PrevDeltas, int64(len(dps)))
|
||||||
|
|
||||||
el := errs.Local()
|
el := errs.Local()
|
||||||
|
|
||||||
@ -133,6 +140,7 @@ func populateCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
|
cl = counter.Local()
|
||||||
itemConfig = api.CallConfig{
|
itemConfig = api.CallConfig{
|
||||||
CanMakeDeltaQueries: !ctrlOpts.ToggleFeatures.DisableDelta,
|
CanMakeDeltaQueries: !ctrlOpts.ToggleFeatures.DisableDelta,
|
||||||
UseImmutableIDs: ctrlOpts.ToggleFeatures.ExchangeImmutableIDs,
|
UseImmutableIDs: ctrlOpts.ToggleFeatures.ExchangeImmutableIDs,
|
||||||
@ -152,9 +160,12 @@ func populateCollections(
|
|||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ictx = clues.AddLabelCounter(ictx, cl.PlainAdder())
|
||||||
|
|
||||||
// Only create a collection if the path matches the scope.
|
// Only create a collection if the path matches the scope.
|
||||||
currPath, locPath, ok := includeContainer(ictx, qp, c, scope, category)
|
currPath, locPath, ok := includeContainer(ictx, qp, c, scope, category)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
cl.Inc(count.SkippedContainers)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,6 +178,7 @@ func populateCollections(
|
|||||||
|
|
||||||
if len(prevPathStr) > 0 {
|
if len(prevPathStr) > 0 {
|
||||||
if prevPath, err = pathFromPrevString(prevPathStr); err != nil {
|
if prevPath, err = pathFromPrevString(prevPathStr); err != nil {
|
||||||
|
err = clues.Stack(err).Label(count.BadPrevPath)
|
||||||
logger.CtxErr(ictx, err).Error("parsing prev path")
|
logger.CtxErr(ictx, err).Error("parsing prev path")
|
||||||
// if the previous path is unusable, then the delta must be, too.
|
// if the previous path is unusable, then the delta must be, too.
|
||||||
prevDelta = ""
|
prevDelta = ""
|
||||||
@ -200,6 +212,7 @@ func populateCollections(
|
|||||||
deltaURLs[cID] = addAndRem.DU.URL
|
deltaURLs[cID] = addAndRem.DU.URL
|
||||||
} else if !addAndRem.DU.Reset {
|
} else if !addAndRem.DU.Reset {
|
||||||
logger.Ctx(ictx).Info("missing delta url")
|
logger.Ctx(ictx).Info("missing delta url")
|
||||||
|
cl.Inc(count.MissingDelta)
|
||||||
}
|
}
|
||||||
|
|
||||||
edc := NewCollection(
|
edc := NewCollection(
|
||||||
@ -208,17 +221,19 @@ func populateCollections(
|
|||||||
prevPath,
|
prevPath,
|
||||||
locPath,
|
locPath,
|
||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
addAndRem.DU.Reset),
|
addAndRem.DU.Reset,
|
||||||
|
cl),
|
||||||
qp.ProtectedResource.ID(),
|
qp.ProtectedResource.ID(),
|
||||||
bh.itemHandler(),
|
bh.itemHandler(),
|
||||||
addAndRem.Added,
|
addAndRem.Added,
|
||||||
addAndRem.Removed,
|
addAndRem.Removed,
|
||||||
// TODO: produce a feature flag that allows selective
|
// TODO: produce a feature flag that allows selective
|
||||||
// enabling of valid modTimes. This currently produces
|
// enabling of valid modTimes. This currently produces
|
||||||
// rare-case failures with incorrect details merging.
|
// rare failures with incorrect details merging.
|
||||||
// Root cause is not yet known.
|
// Root cause is not yet known.
|
||||||
false,
|
false,
|
||||||
statusUpdater)
|
statusUpdater,
|
||||||
|
cl)
|
||||||
|
|
||||||
collections[cID] = edc
|
collections[cID] = edc
|
||||||
|
|
||||||
@ -242,7 +257,10 @@ func populateCollections(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if collections[id] != nil {
|
if collections[id] != nil {
|
||||||
el.AddRecoverable(ctx, clues.WrapWC(ictx, err, "conflict: tombstone exists for a live collection"))
|
err := clues.WrapWC(ictx, err, "conflict: tombstone exists for a live collection").
|
||||||
|
Label(count.CollectionTombstoneConflict)
|
||||||
|
el.AddRecoverable(ctx, err)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,18 +272,18 @@ func populateCollections(
|
|||||||
|
|
||||||
prevPath, err := pathFromPrevString(p)
|
prevPath, err := pathFromPrevString(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
err = clues.StackWC(ctx, err).Label(count.BadPrevPath)
|
||||||
// technically shouldn't ever happen. But just in case...
|
// technically shouldn't ever happen. But just in case...
|
||||||
logger.CtxErr(ictx, err).Error("parsing tombstone prev path")
|
logger.CtxErr(ictx, err).Error("parsing tombstone prev path")
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
collections[id] = data.NewTombstoneCollection(prevPath, ctrlOpts)
|
collections[id] = data.NewTombstoneCollection(prevPath, ctrlOpts, counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow(
|
counter.Add(count.NewDeltas, int64(len(deltaURLs)))
|
||||||
"adding metadata collection entries",
|
counter.Add(count.NewPrevPaths, int64(len(currPaths)))
|
||||||
"num_paths_entries", len(currPaths),
|
|
||||||
"num_deltas_entries", len(deltaURLs))
|
|
||||||
|
|
||||||
pathPrefix, err := path.BuildMetadata(
|
pathPrefix, err := path.BuildMetadata(
|
||||||
qp.TenantID,
|
qp.TenantID,
|
||||||
@ -283,15 +301,14 @@ func populateCollections(
|
|||||||
graph.NewMetadataEntry(metadata.PreviousPathFileName, currPaths),
|
graph.NewMetadataEntry(metadata.PreviousPathFileName, currPaths),
|
||||||
graph.NewMetadataEntry(metadata.DeltaURLsFileName, deltaURLs),
|
graph.NewMetadataEntry(metadata.DeltaURLsFileName, deltaURLs),
|
||||||
},
|
},
|
||||||
statusUpdater)
|
statusUpdater,
|
||||||
|
count.New())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making metadata collection")
|
return nil, clues.Wrap(err, "making metadata collection")
|
||||||
}
|
}
|
||||||
|
|
||||||
collections["metadata"] = col
|
collections["metadata"] = col
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("produced collections", "count_collections", len(collections))
|
|
||||||
|
|
||||||
return collections, el.Failure()
|
return collections, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -332,7 +332,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
coll, err := graph.MakeMetadataCollection(
|
coll, err := graph.MakeMetadataCollection(
|
||||||
pathPrefix,
|
pathPrefix,
|
||||||
entries,
|
entries,
|
||||||
func(cos *support.ControllerOperationStatus) {})
|
func(cos *support.ControllerOperationStatus) {},
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
cdps, canUsePreviousBackup, err := ParseMetadataCollections(ctx, []data.RestoreCollection{
|
||||||
@ -513,6 +514,7 @@ func (suite *BackupIntgSuite) TestMailFetch() {
|
|||||||
test.scope,
|
test.scope,
|
||||||
metadata.DeltaPaths{},
|
metadata.DeltaPaths{},
|
||||||
func(status *support.ControllerOperationStatus) {},
|
func(status *support.ControllerOperationStatus) {},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -593,6 +595,7 @@ func (suite *BackupIntgSuite) TestDelta() {
|
|||||||
test.scope,
|
test.scope,
|
||||||
metadata.DeltaPaths{},
|
metadata.DeltaPaths{},
|
||||||
func(status *support.ControllerOperationStatus) {},
|
func(status *support.ControllerOperationStatus) {},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
assert.Less(t, 1, len(collections), "retrieved metadata and data collections")
|
assert.Less(t, 1, len(collections), "retrieved metadata and data collections")
|
||||||
@ -625,6 +628,7 @@ func (suite *BackupIntgSuite) TestDelta() {
|
|||||||
test.scope,
|
test.scope,
|
||||||
dps,
|
dps,
|
||||||
func(status *support.ControllerOperationStatus) {},
|
func(status *support.ControllerOperationStatus) {},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
})
|
})
|
||||||
@ -664,6 +668,7 @@ func (suite *BackupIntgSuite) TestMailSerializationRegression() {
|
|||||||
sel.Scopes()[0],
|
sel.Scopes()[0],
|
||||||
metadata.DeltaPaths{},
|
metadata.DeltaPaths{},
|
||||||
newStatusUpdater(t, &wg),
|
newStatusUpdater(t, &wg),
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -750,6 +755,7 @@ func (suite *BackupIntgSuite) TestContactSerializationRegression() {
|
|||||||
test.scope,
|
test.scope,
|
||||||
metadata.DeltaPaths{},
|
metadata.DeltaPaths{},
|
||||||
newStatusUpdater(t, &wg),
|
newStatusUpdater(t, &wg),
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -859,6 +865,7 @@ func (suite *BackupIntgSuite) TestEventsSerializationRegression() {
|
|||||||
test.scope,
|
test.scope,
|
||||||
metadata.DeltaPaths{},
|
metadata.DeltaPaths{},
|
||||||
newStatusUpdater(t, &wg),
|
newStatusUpdater(t, &wg),
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.Len(t, collections, 2)
|
require.Len(t, collections, 2)
|
||||||
@ -1135,6 +1142,7 @@ func (suite *CollectionPopulationSuite) TestPopulateCollections() {
|
|||||||
test.scope,
|
test.scope,
|
||||||
dps,
|
dps,
|
||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
|
count.New(),
|
||||||
fault.New(test.failFast == control.FailFast))
|
fault.New(test.failFast == control.FailFast))
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -1477,6 +1485,7 @@ func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_D
|
|||||||
sc.scope,
|
sc.scope,
|
||||||
test.inputMetadata(t, qp.Category),
|
test.inputMetadata(t, qp.Category),
|
||||||
control.Options{FailureHandling: control.FailFast},
|
control.Options{FailureHandling: control.FailFast},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, "getting collections", clues.ToCore(err))
|
require.NoError(t, err, "getting collections", clues.ToCore(err))
|
||||||
|
|
||||||
@ -1643,6 +1652,7 @@ func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_r
|
|||||||
allScope,
|
allScope,
|
||||||
dps,
|
dps,
|
||||||
control.Options{FailureHandling: control.FailFast},
|
control.Options{FailureHandling: control.FailFast},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -2064,6 +2074,7 @@ func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_i
|
|||||||
allScope,
|
allScope,
|
||||||
test.dps,
|
test.dps,
|
||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
|
|||||||
@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -110,6 +111,7 @@ func NewCollection(
|
|||||||
origRemoved []string,
|
origRemoved []string,
|
||||||
validModTimes bool,
|
validModTimes bool,
|
||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
) data.BackupCollection {
|
) data.BackupCollection {
|
||||||
added := maps.Clone(origAdded)
|
added := maps.Clone(origAdded)
|
||||||
removed := make(map[string]struct{}, len(origRemoved))
|
removed := make(map[string]struct{}, len(origRemoved))
|
||||||
@ -127,6 +129,9 @@ func NewCollection(
|
|||||||
removed[r] = struct{}{}
|
removed[r] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
counter.Add(count.ItemsAdded, int64(len(added)))
|
||||||
|
counter.Add(count.ItemsRemoved, int64(len(removed)))
|
||||||
|
|
||||||
if !validModTimes {
|
if !validModTimes {
|
||||||
return &prefetchCollection{
|
return &prefetchCollection{
|
||||||
BaseCollection: bc,
|
BaseCollection: bc,
|
||||||
@ -145,6 +150,7 @@ func NewCollection(
|
|||||||
removed: removed,
|
removed: removed,
|
||||||
getter: items,
|
getter: items,
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
|
counter: counter,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,13 +194,17 @@ func (col *prefetchCollection) streamItems(
|
|||||||
colProgress chan<- struct{}
|
colProgress chan<- struct{}
|
||||||
|
|
||||||
user = col.user
|
user = col.user
|
||||||
log = logger.Ctx(ctx).With(
|
|
||||||
"service", path.ExchangeService.String(),
|
|
||||||
"category", col.Category().String())
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ctx = clues.Add(
|
||||||
|
ctx,
|
||||||
|
"category", col.Category().String())
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
close(stream)
|
close(stream)
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"finished stream backup collection items",
|
||||||
|
"stats", col.Counter.Values())
|
||||||
updateStatus(
|
updateStatus(
|
||||||
ctx,
|
ctx,
|
||||||
col.statusUpdater,
|
col.statusUpdater,
|
||||||
@ -228,6 +238,10 @@ func (col *prefetchCollection) streamItems(
|
|||||||
|
|
||||||
stream <- data.NewDeletedItem(id)
|
stream <- data.NewDeletedItem(id)
|
||||||
|
|
||||||
|
if col.Counter.Inc(count.StreamItemsRemoved)%1000 == 0 {
|
||||||
|
logger.Ctx(ctx).Infow("item removal stream progress", "stats", col.Counter.Values())
|
||||||
|
}
|
||||||
|
|
||||||
atomic.AddInt64(&success, 1)
|
atomic.AddInt64(&success, 1)
|
||||||
|
|
||||||
if colProgress != nil {
|
if colProgress != nil {
|
||||||
@ -268,9 +282,11 @@ func (col *prefetchCollection) streamItems(
|
|||||||
// nothing else we can do, and not reporting it will make the status
|
// nothing else we can do, and not reporting it will make the status
|
||||||
// investigation upset.
|
// investigation upset.
|
||||||
if graph.IsErrDeletedInFlight(err) {
|
if graph.IsErrDeletedInFlight(err) {
|
||||||
|
col.Counter.Inc(count.StreamItemsDeletedInFlight)
|
||||||
atomic.AddInt64(&success, 1)
|
atomic.AddInt64(&success, 1)
|
||||||
log.With("err", err).Infow("item not found", clues.InErr(err).Slice()...)
|
logger.CtxErr(ctx, err).Info("item not found")
|
||||||
} else {
|
} else {
|
||||||
|
col.Counter.Inc(count.StreamItemsErrored)
|
||||||
el.AddRecoverable(ctx, clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation))
|
el.AddRecoverable(ctx, clues.Wrap(err, "fetching item").Label(fault.LabelForceNoBackupCreation))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,6 +298,7 @@ func (col *prefetchCollection) streamItems(
|
|||||||
id,
|
id,
|
||||||
details.ItemInfo{Exchange: info})
|
details.ItemInfo{Exchange: info})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
col.Counter.Inc(count.StreamItemsErrored)
|
||||||
el.AddRecoverable(
|
el.AddRecoverable(
|
||||||
ctx,
|
ctx,
|
||||||
clues.StackWC(ctx, err).
|
clues.StackWC(ctx, err).
|
||||||
@ -292,6 +309,12 @@ func (col *prefetchCollection) streamItems(
|
|||||||
|
|
||||||
stream <- item
|
stream <- item
|
||||||
|
|
||||||
|
col.Counter.Add(count.StreamBytesAdded, info.Size)
|
||||||
|
|
||||||
|
if col.Counter.Inc(count.StreamItemsAdded)%1000 == 0 {
|
||||||
|
logger.Ctx(ctx).Infow("item addition stream progress", "stats", col.Counter.Values())
|
||||||
|
}
|
||||||
|
|
||||||
atomic.AddInt64(&success, 1)
|
atomic.AddInt64(&success, 1)
|
||||||
atomic.AddInt64(&totalBytes, info.Size)
|
atomic.AddInt64(&totalBytes, info.Size)
|
||||||
|
|
||||||
@ -329,6 +352,8 @@ type lazyFetchCollection struct {
|
|||||||
getter itemGetterSerializer
|
getter itemGetterSerializer
|
||||||
|
|
||||||
statusUpdater support.StatusUpdater
|
statusUpdater support.StatusUpdater
|
||||||
|
|
||||||
|
counter *count.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
// Items utility function to asynchronously execute process to fill data channel with
|
// Items utility function to asynchronously execute process to fill data channel with
|
||||||
@ -413,6 +438,7 @@ func (col *lazyFetchCollection) streamItems(
|
|||||||
},
|
},
|
||||||
id,
|
id,
|
||||||
modTime,
|
modTime,
|
||||||
|
col.counter,
|
||||||
errs)
|
errs)
|
||||||
|
|
||||||
atomic.AddInt64(&success, 1)
|
atomic.AddInt64(&success, 1)
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/services/m365/api/graph"
|
"github.com/alcionai/corso/src/pkg/services/m365/api/graph"
|
||||||
@ -146,13 +147,15 @@ func (suite *CollectionUnitSuite) TestNewCollection_state() {
|
|||||||
test.prev,
|
test.prev,
|
||||||
test.loc,
|
test.loc,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
false),
|
false,
|
||||||
|
count.New()),
|
||||||
"u",
|
"u",
|
||||||
mock.DefaultItemGetSerialize(),
|
mock.DefaultItemGetSerialize(),
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
colType.validModTimes,
|
colType.validModTimes,
|
||||||
nil)
|
nil,
|
||||||
|
count.New())
|
||||||
assert.Equal(t, test.expect, c.State(), "collection state")
|
assert.Equal(t, test.expect, c.State(), "collection state")
|
||||||
assert.Equal(t, test.curr, c.FullPath(), "full path")
|
assert.Equal(t, test.curr, c.FullPath(), "full path")
|
||||||
assert.Equal(t, test.prev, c.PreviousPath(), "prev path")
|
assert.Equal(t, test.prev, c.PreviousPath(), "prev path")
|
||||||
@ -289,13 +292,15 @@ func (suite *CollectionUnitSuite) TestPrefetchCollection_Items() {
|
|||||||
nil,
|
nil,
|
||||||
locPath.ToBuilder(),
|
locPath.ToBuilder(),
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
false),
|
false,
|
||||||
|
count.New()),
|
||||||
"",
|
"",
|
||||||
&mock.ItemGetSerialize{},
|
&mock.ItemGetSerialize{},
|
||||||
test.added,
|
test.added,
|
||||||
maps.Keys(test.removed),
|
maps.Keys(test.removed),
|
||||||
false,
|
false,
|
||||||
statusUpdater)
|
statusUpdater,
|
||||||
|
count.New())
|
||||||
|
|
||||||
for item := range col.Items(ctx, errs) {
|
for item := range col.Items(ctx, errs) {
|
||||||
itemCount++
|
itemCount++
|
||||||
@ -427,13 +432,15 @@ func (suite *CollectionUnitSuite) TestLazyFetchCollection_Items_LazyFetch() {
|
|||||||
nil,
|
nil,
|
||||||
locPath.ToBuilder(),
|
locPath.ToBuilder(),
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
false),
|
false,
|
||||||
|
count.New()),
|
||||||
"",
|
"",
|
||||||
mlg,
|
mlg,
|
||||||
test.added,
|
test.added,
|
||||||
maps.Keys(test.removed),
|
maps.Keys(test.removed),
|
||||||
true,
|
true,
|
||||||
statusUpdater)
|
statusUpdater,
|
||||||
|
count.New())
|
||||||
|
|
||||||
for item := range col.Items(ctx, errs) {
|
for item := range col.Items(ctx, errs) {
|
||||||
itemCount++
|
itemCount++
|
||||||
@ -499,6 +506,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_NoRead_GetInfo_Errors() {
|
|||||||
nil,
|
nil,
|
||||||
"itemID",
|
"itemID",
|
||||||
time.Now(),
|
time.Now(),
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
_, err := li.Info()
|
_, err := li.Info()
|
||||||
@ -564,6 +572,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_GetDataErrors() {
|
|||||||
},
|
},
|
||||||
"itemID",
|
"itemID",
|
||||||
now,
|
now,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||||
@ -604,6 +613,7 @@ func (suite *CollectionUnitSuite) TestLazyItem_ReturnsEmptyReaderOnDeletedInFlig
|
|||||||
},
|
},
|
||||||
"itemID",
|
"itemID",
|
||||||
now,
|
now,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||||
@ -657,6 +667,7 @@ func (suite *CollectionUnitSuite) TestLazyItem() {
|
|||||||
},
|
},
|
||||||
"itemID",
|
"itemID",
|
||||||
now,
|
now,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
assert.False(t, li.Deleted(), "item shouldn't be marked deleted")
|
||||||
|
|||||||
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -38,6 +39,7 @@ func CreateCollections(
|
|||||||
tenantID string,
|
tenantID string,
|
||||||
scope selectors.GroupsScope,
|
scope selectors.GroupsScope,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
ctx = clues.Add(ctx, "category", scope.Category().PathType())
|
||||||
@ -64,6 +66,8 @@ func CreateCollections(
|
|||||||
return nil, false, clues.Stack(err)
|
return nil, false, clues.Stack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
counter.Add(count.Channels, int64(len(channels)))
|
||||||
|
|
||||||
collections, err := populateCollections(
|
collections, err := populateCollections(
|
||||||
ctx,
|
ctx,
|
||||||
qp,
|
qp,
|
||||||
@ -73,6 +77,7 @@ func CreateCollections(
|
|||||||
scope,
|
scope,
|
||||||
cdps[scope.Category().PathType()],
|
cdps[scope.Category().PathType()],
|
||||||
bpc.Options,
|
bpc.Options,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, clues.Wrap(err, "filling collections")
|
return nil, false, clues.Wrap(err, "filling collections")
|
||||||
@ -94,6 +99,7 @@ func populateCollections(
|
|||||||
scope selectors.GroupsScope,
|
scope selectors.GroupsScope,
|
||||||
dps metadata.DeltaPaths,
|
dps metadata.DeltaPaths,
|
||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (map[string]data.BackupCollection, error) {
|
) (map[string]data.BackupCollection, error) {
|
||||||
var (
|
var (
|
||||||
@ -117,6 +123,7 @@ func populateCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
cl = counter.Local()
|
||||||
cID = ptr.Val(c.GetId())
|
cID = ptr.Val(c.GetId())
|
||||||
cName = ptr.Val(c.GetDisplayName())
|
cName = ptr.Val(c.GetDisplayName())
|
||||||
err error
|
err error
|
||||||
@ -134,15 +141,19 @@ func populateCollections(
|
|||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ictx = clues.AddLabelCounter(ictx, cl.PlainAdder())
|
||||||
|
|
||||||
delete(tombstones, cID)
|
delete(tombstones, cID)
|
||||||
|
|
||||||
// Only create a collection if the path matches the scope.
|
// Only create a collection if the path matches the scope.
|
||||||
if !bh.includeContainer(ictx, qp, c, scope) {
|
if !bh.includeContainer(ictx, qp, c, scope) {
|
||||||
|
cl.Inc(count.SkippedContainers)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(prevPathStr) > 0 {
|
if len(prevPathStr) > 0 {
|
||||||
if prevPath, err = pathFromPrevString(prevPathStr); err != nil {
|
if prevPath, err = pathFromPrevString(prevPathStr); err != nil {
|
||||||
|
err = clues.StackWC(ctx, err).Label(count.BadPrevPath)
|
||||||
logger.CtxErr(ictx, err).Error("parsing prev path")
|
logger.CtxErr(ictx, err).Error("parsing prev path")
|
||||||
// if the previous path is unusable, then the delta must be, too.
|
// if the previous path is unusable, then the delta must be, too.
|
||||||
prevDelta = ""
|
prevDelta = ""
|
||||||
@ -166,6 +177,9 @@ func populateCollections(
|
|||||||
added := str.SliceToMap(maps.Keys(addAndRem.Added))
|
added := str.SliceToMap(maps.Keys(addAndRem.Added))
|
||||||
removed := str.SliceToMap(addAndRem.Removed)
|
removed := str.SliceToMap(addAndRem.Removed)
|
||||||
|
|
||||||
|
cl.Add(count.ItemsAdded, int64(len(added)))
|
||||||
|
cl.Add(count.ItemsRemoved, int64(len(removed)))
|
||||||
|
|
||||||
if len(addAndRem.DU.URL) > 0 {
|
if len(addAndRem.DU.URL) > 0 {
|
||||||
deltaURLs[cID] = addAndRem.DU.URL
|
deltaURLs[cID] = addAndRem.DU.URL
|
||||||
} else if !addAndRem.DU.Reset {
|
} else if !addAndRem.DU.Reset {
|
||||||
@ -174,7 +188,9 @@ func populateCollections(
|
|||||||
|
|
||||||
currPath, err := bh.canonicalPath(path.Builder{}.Append(cID), qp.TenantID)
|
currPath, err := bh.canonicalPath(path.Builder{}.Append(cID), qp.TenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, clues.Stack(err))
|
err = clues.StackWC(ctx, err).Label(count.BadCollPath)
|
||||||
|
el.AddRecoverable(ctx, err)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,7 +207,8 @@ func populateCollections(
|
|||||||
prevPath,
|
prevPath,
|
||||||
path.Builder{}.Append(cName),
|
path.Builder{}.Append(cName),
|
||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
addAndRem.DU.Reset),
|
addAndRem.DU.Reset,
|
||||||
|
cl),
|
||||||
bh,
|
bh,
|
||||||
qp.ProtectedResource.ID(),
|
qp.ProtectedResource.ID(),
|
||||||
added,
|
added,
|
||||||
@ -219,7 +236,9 @@ func populateCollections(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if collections[id] != nil {
|
if collections[id] != nil {
|
||||||
el.AddRecoverable(ictx, clues.WrapWC(ictx, err, "conflict: tombstone exists for a live collection"))
|
err := clues.NewWC(ictx, "conflict: tombstone exists for a live collection").Label(count.CollectionTombstoneConflict)
|
||||||
|
el.AddRecoverable(ctx, err)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,12 +250,14 @@ func populateCollections(
|
|||||||
|
|
||||||
prevPath, err := pathFromPrevString(p)
|
prevPath, err := pathFromPrevString(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
err := clues.StackWC(ctx, err).Label(count.BadPrevPath)
|
||||||
// technically shouldn't ever happen. But just in case...
|
// technically shouldn't ever happen. But just in case...
|
||||||
logger.CtxErr(ictx, err).Error("parsing tombstone prev path")
|
logger.CtxErr(ictx, err).Error("parsing tombstone prev path")
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
collections[id] = data.NewTombstoneCollection(prevPath, ctrlOpts)
|
collections[id] = data.NewTombstoneCollection(prevPath, ctrlOpts, counter.Local())
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow(
|
logger.Ctx(ctx).Infow(
|
||||||
@ -251,7 +272,8 @@ func populateCollections(
|
|||||||
qp.Category,
|
qp.Category,
|
||||||
false)
|
false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making metadata path prefix")
|
return nil, clues.WrapWC(ctx, err, "making metadata path prefix").
|
||||||
|
Label(count.BadPathPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
col, err := graph.MakeMetadataCollection(
|
col, err := graph.MakeMetadataCollection(
|
||||||
@ -260,14 +282,13 @@ func populateCollections(
|
|||||||
graph.NewMetadataEntry(metadata.PreviousPathFileName, currPaths),
|
graph.NewMetadataEntry(metadata.PreviousPathFileName, currPaths),
|
||||||
graph.NewMetadataEntry(metadata.DeltaURLsFileName, deltaURLs),
|
graph.NewMetadataEntry(metadata.DeltaURLsFileName, deltaURLs),
|
||||||
},
|
},
|
||||||
statusUpdater)
|
statusUpdater,
|
||||||
|
counter.Local())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "making metadata collection")
|
return nil, clues.WrapWC(ctx, err, "making metadata collection")
|
||||||
}
|
}
|
||||||
|
|
||||||
collections["metadata"] = col
|
collections["metadata"] = col
|
||||||
|
|
||||||
logger.Ctx(ctx).Infow("produced collections", "count_collections", len(collections))
|
|
||||||
|
|
||||||
return collections, el.Failure()
|
return collections, el.Failure()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -246,6 +246,7 @@ func (suite *BackupUnitSuite) TestPopulateCollections() {
|
|||||||
selectors.NewGroupsBackup(nil).Channels(selectors.Any())[0],
|
selectors.NewGroupsBackup(nil).Channels(selectors.Any())[0],
|
||||||
nil,
|
nil,
|
||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
assert.Len(t, collections, test.expectColls, "number of collections")
|
assert.Len(t, collections, test.expectColls, "number of collections")
|
||||||
@ -405,6 +406,7 @@ func (suite *BackupUnitSuite) TestPopulateCollections_incremental() {
|
|||||||
allScope,
|
allScope,
|
||||||
test.deltaPaths,
|
test.deltaPaths,
|
||||||
ctrlOpts,
|
ctrlOpts,
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
test.expectErr(t, err, clues.ToCore(err))
|
test.expectErr(t, err, clues.ToCore(err))
|
||||||
assert.Len(t, collections, test.expectColls, "number of collections")
|
assert.Len(t, collections, test.expectColls, "number of collections")
|
||||||
@ -527,6 +529,7 @@ func (suite *BackupIntgSuite) TestCreateCollections() {
|
|||||||
suite.tenantID,
|
suite.tenantID,
|
||||||
test.scope,
|
test.scope,
|
||||||
func(status *support.ControllerOperationStatus) {},
|
func(status *support.ControllerOperationStatus) {},
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
require.NotEmpty(t, collections, "must have at least one collection")
|
require.NotEmpty(t, collections, "must have at least one collection")
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
@ -90,6 +91,9 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
ctx = clues.Add(ctx, "category", col.Category().String())
|
ctx = clues.Add(ctx, "category", col.Category().String())
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
logger.Ctx(ctx).Infow(
|
||||||
|
"finished stream backup collection items",
|
||||||
|
"stats", col.Counter.Values())
|
||||||
col.finishPopulation(ctx, streamedItems, totalBytes, errs.Failure())
|
col.finishPopulation(ctx, streamedItems, totalBytes, errs.Failure())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -117,7 +121,7 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
col.stream <- data.NewDeletedItem(id)
|
col.stream <- data.NewDeletedItem(id)
|
||||||
|
|
||||||
atomic.AddInt64(&streamedItems, 1)
|
atomic.AddInt64(&streamedItems, 1)
|
||||||
atomic.AddInt64(&totalBytes, 0)
|
col.Counter.Inc(count.StreamItemsRemoved)
|
||||||
|
|
||||||
if colProgress != nil {
|
if colProgress != nil {
|
||||||
colProgress <- struct{}{}
|
colProgress <- struct{}{}
|
||||||
@ -150,26 +154,23 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
parentFolderID,
|
parentFolderID,
|
||||||
id)
|
id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(
|
err = clues.Wrap(err, "getting channel message data").Label(fault.LabelForceNoBackupCreation)
|
||||||
ctx,
|
el.AddRecoverable(ctx, err)
|
||||||
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := writer.WriteObjectValue("", item); err != nil {
|
if err := writer.WriteObjectValue("", item); err != nil {
|
||||||
el.AddRecoverable(
|
err = clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation)
|
||||||
ctx,
|
el.AddRecoverable(ctx, err)
|
||||||
clues.Wrap(err, "writing channel message to serializer").Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
itemData, err := writer.GetSerializedContent()
|
itemData, err := writer.GetSerializedContent()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(
|
err = clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation)
|
||||||
ctx,
|
el.AddRecoverable(ctx, err)
|
||||||
clues.Wrap(err, "serializing channel message").Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -181,9 +182,8 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
id,
|
id,
|
||||||
details.ItemInfo{Groups: info})
|
details.ItemInfo{Groups: info})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(
|
err := clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation)
|
||||||
ctx,
|
el.AddRecoverable(ctx, err)
|
||||||
clues.StackWC(ctx, err).Label(fault.LabelForceNoBackupCreation))
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -193,6 +193,12 @@ func (col *Collection) streamItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
atomic.AddInt64(&streamedItems, 1)
|
atomic.AddInt64(&streamedItems, 1)
|
||||||
atomic.AddInt64(&totalBytes, info.Size)
|
atomic.AddInt64(&totalBytes, info.Size)
|
||||||
|
|
||||||
|
if col.Counter.Inc(count.StreamItemsAdded)%1000 == 0 {
|
||||||
|
logger.Ctx(ctx).Infow("item stream progress", "stats", col.Counter.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
col.Counter.Add(count.StreamBytesAdded, info.Size)
|
||||||
|
|
||||||
if colProgress != nil {
|
if colProgress != nil {
|
||||||
colProgress <- struct{}{}
|
colProgress <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -121,7 +122,8 @@ func (suite *CollectionUnitSuite) TestNewCollection_state() {
|
|||||||
test.prev,
|
test.prev,
|
||||||
test.loc,
|
test.loc,
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
false),
|
false,
|
||||||
|
count.New()),
|
||||||
nil,
|
nil,
|
||||||
"g",
|
"g",
|
||||||
nil, nil,
|
nil, nil,
|
||||||
@ -202,7 +204,8 @@ func (suite *CollectionUnitSuite) TestCollection_streamItems() {
|
|||||||
nil,
|
nil,
|
||||||
locPath.ToBuilder(),
|
locPath.ToBuilder(),
|
||||||
control.DefaultOptions(),
|
control.DefaultOptions(),
|
||||||
false),
|
false,
|
||||||
|
count.New()),
|
||||||
added: test.added,
|
added: test.added,
|
||||||
removed: test.removed,
|
removed: test.removed,
|
||||||
getter: mock.GetChannelMessage{},
|
getter: mock.GetChannelMessage{},
|
||||||
|
|||||||
@ -33,6 +33,7 @@ func CollectLibraries(
|
|||||||
tenantID string,
|
tenantID string,
|
||||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, bool, error) {
|
) ([]data.BackupCollection, bool, error) {
|
||||||
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
||||||
@ -44,7 +45,8 @@ func CollectLibraries(
|
|||||||
tenantID,
|
tenantID,
|
||||||
bpc.ProtectedResource,
|
bpc.ProtectedResource,
|
||||||
su,
|
su,
|
||||||
bpc.Options)
|
bpc.Options,
|
||||||
|
counter)
|
||||||
)
|
)
|
||||||
|
|
||||||
msg := fmt.Sprintf(
|
msg := fmt.Sprintf(
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/exchange"
|
"github.com/alcionai/corso/src/internal/m365/collection/exchange"
|
||||||
"github.com/alcionai/corso/src/internal/m365/collection/groups"
|
"github.com/alcionai/corso/src/internal/m365/collection/groups"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/store"
|
"github.com/alcionai/corso/src/pkg/store"
|
||||||
)
|
)
|
||||||
@ -28,9 +29,9 @@ func (ctrl *Controller) DeserializeMetadataFiles(
|
|||||||
case path.ExchangeService, path.ExchangeMetadataService:
|
case path.ExchangeService, path.ExchangeMetadataService:
|
||||||
return exchange.DeserializeMetadataFiles(ctx, colls)
|
return exchange.DeserializeMetadataFiles(ctx, colls)
|
||||||
case path.OneDriveService, path.OneDriveMetadataService:
|
case path.OneDriveService, path.OneDriveMetadataService:
|
||||||
return drive.DeserializeMetadataFiles(ctx, colls)
|
return drive.DeserializeMetadataFiles(ctx, colls, count.New())
|
||||||
case path.SharePointService, path.SharePointMetadataService:
|
case path.SharePointService, path.SharePointMetadataService:
|
||||||
return drive.DeserializeMetadataFiles(ctx, colls)
|
return drive.DeserializeMetadataFiles(ctx, colls, count.New())
|
||||||
case path.GroupsService, path.GroupsMetadataService:
|
case path.GroupsService, path.GroupsMetadataService:
|
||||||
return groups.DeserializeMetadataFiles(ctx, colls)
|
return groups.DeserializeMetadataFiles(ctx, colls)
|
||||||
default:
|
default:
|
||||||
|
|||||||
@ -10,6 +10,8 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/collection/exchange"
|
"github.com/alcionai/corso/src/internal/m365/collection/exchange"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -23,8 +25,9 @@ func ProduceBackupCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bpc inject.BackupProducerConfig,
|
bpc inject.BackupProducerConfig,
|
||||||
ac api.Client,
|
ac api.Client,
|
||||||
tenantID string,
|
creds account.M365Config,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||||
eb, err := bpc.Selector.ToExchangeBackup()
|
eb, err := bpc.Selector.ToExchangeBackup()
|
||||||
@ -35,6 +38,7 @@ func ProduceBackupCollections(
|
|||||||
var (
|
var (
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
|
tenantID = creds.AzureTenantID
|
||||||
categories = map[path.CategoryType]struct{}{}
|
categories = map[path.CategoryType]struct{}{}
|
||||||
handlers = exchange.BackupHandlers(ac)
|
handlers = exchange.BackupHandlers(ac)
|
||||||
)
|
)
|
||||||
@ -46,6 +50,7 @@ func ProduceBackupCollections(
|
|||||||
|
|
||||||
if !canMakeDeltaQueries {
|
if !canMakeDeltaQueries {
|
||||||
logger.Ctx(ctx).Info("delta requests not available")
|
logger.Ctx(ctx).Info("delta requests not available")
|
||||||
|
counter.Inc(count.NoDeltaQueries)
|
||||||
|
|
||||||
bpc.Options.ToggleFeatures.DisableDelta = true
|
bpc.Options.ToggleFeatures.DisableDelta = true
|
||||||
}
|
}
|
||||||
@ -75,6 +80,7 @@ func ProduceBackupCollections(
|
|||||||
scope,
|
scope,
|
||||||
cdps[scope.Category().PathType()],
|
cdps[scope.Category().PathType()],
|
||||||
su,
|
su,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, err)
|
el.AddRecoverable(ctx, err)
|
||||||
@ -95,6 +101,7 @@ func ProduceBackupCollections(
|
|||||||
path.ExchangeService,
|
path.ExchangeService,
|
||||||
categories,
|
categories,
|
||||||
su,
|
su,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
@ -103,6 +110,8 @@ func ProduceBackupCollections(
|
|||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow("produced collections", "stats", counter.Values())
|
||||||
|
|
||||||
return collections, nil, canUsePreviousBackup, el.Failure()
|
return collections, nil, canUsePreviousBackup, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -23,6 +23,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/backup/identity"
|
"github.com/alcionai/corso/src/pkg/backup/identity"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
"github.com/alcionai/corso/src/pkg/backup/metadata"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -36,6 +37,7 @@ func ProduceBackupCollections(
|
|||||||
ac api.Client,
|
ac api.Client,
|
||||||
creds account.M365Config,
|
creds account.M365Config,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
||||||
b, err := bpc.Selector.ToGroupsBackup()
|
b, err := bpc.Selector.ToGroupsBackup()
|
||||||
@ -71,15 +73,20 @@ func ProduceBackupCollections(
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cl := counter.Local()
|
||||||
|
ictx := clues.AddLabelCounter(ctx, cl.PlainAdder())
|
||||||
|
|
||||||
var dbcs []data.BackupCollection
|
var dbcs []data.BackupCollection
|
||||||
|
|
||||||
switch scope.Category().PathType() {
|
switch scope.Category().PathType() {
|
||||||
case path.LibrariesCategory:
|
case path.LibrariesCategory:
|
||||||
sites, err := ac.Groups().GetAllSites(ctx, bpc.ProtectedResource.ID(), errs)
|
sites, err := ac.Groups().GetAllSites(ictx, bpc.ProtectedResource.ID(), errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cl.Add(count.Sites, int64(len(sites)))
|
||||||
|
|
||||||
siteMetadataCollection := map[string][]data.RestoreCollection{}
|
siteMetadataCollection := map[string][]data.RestoreCollection{}
|
||||||
|
|
||||||
// Once we have metadata collections for chat as well, we will have to filter those out
|
// Once we have metadata collections for chat as well, we will have to filter those out
|
||||||
@ -89,43 +96,51 @@ func ProduceBackupCollections(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range sites {
|
for _, s := range sites {
|
||||||
pr := idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetWebUrl()))
|
var (
|
||||||
sbpc := inject.BackupProducerConfig{
|
scl = cl.Local()
|
||||||
LastBackupVersion: bpc.LastBackupVersion,
|
pr = idname.NewProvider(ptr.Val(s.GetId()), ptr.Val(s.GetWebUrl()))
|
||||||
Options: bpc.Options,
|
sbpc = inject.BackupProducerConfig{
|
||||||
ProtectedResource: pr,
|
LastBackupVersion: bpc.LastBackupVersion,
|
||||||
Selector: bpc.Selector,
|
Options: bpc.Options,
|
||||||
MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())],
|
ProtectedResource: pr,
|
||||||
}
|
Selector: bpc.Selector,
|
||||||
|
MetadataCollections: siteMetadataCollection[ptr.Val(s.GetId())],
|
||||||
|
}
|
||||||
|
bh = drive.NewGroupBackupHandler(
|
||||||
|
bpc.ProtectedResource.ID(),
|
||||||
|
ptr.Val(s.GetId()),
|
||||||
|
ac.Drives(),
|
||||||
|
scope)
|
||||||
|
)
|
||||||
|
|
||||||
bh := drive.NewGroupBackupHandler(
|
ictx = clues.Add(
|
||||||
bpc.ProtectedResource.ID(),
|
ictx,
|
||||||
ptr.Val(s.GetId()),
|
"site_id", ptr.Val(s.GetId()),
|
||||||
ac.Drives(),
|
"site_weburl", graph.LoggableURL(ptr.Val(s.GetWebUrl())))
|
||||||
scope)
|
|
||||||
|
|
||||||
sp, err := bh.SitePathPrefix(creds.AzureTenantID)
|
sp, err := bh.SitePathPrefix(creds.AzureTenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, clues.Wrap(err, "getting site path")
|
return nil, nil, clues.WrapWC(ictx, err, "getting site path").Label(count.BadPathPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String()
|
sitesPreviousPaths[ptr.Val(s.GetId())] = sp.String()
|
||||||
|
|
||||||
cs, canUsePreviousBackup, err := site.CollectLibraries(
|
cs, canUsePreviousBackup, err := site.CollectLibraries(
|
||||||
ctx,
|
ictx,
|
||||||
sbpc,
|
sbpc,
|
||||||
bh,
|
bh,
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
ssmb,
|
ssmb,
|
||||||
su,
|
su,
|
||||||
|
scl,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, err)
|
el.AddRecoverable(ictx, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !canUsePreviousBackup {
|
if !canUsePreviousBackup {
|
||||||
dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{}))
|
dbcs = append(dbcs, data.NewTombstoneCollection(sp, control.Options{}, scl))
|
||||||
}
|
}
|
||||||
|
|
||||||
dbcs = append(dbcs, cs...)
|
dbcs = append(dbcs, cs...)
|
||||||
@ -142,7 +157,7 @@ func ProduceBackupCollections(
|
|||||||
// TODO(meain): Use number of messages and not channels
|
// TODO(meain): Use number of messages and not channels
|
||||||
CompletionMessage: func() string { return fmt.Sprintf("(found %d channels)", len(cs)) },
|
CompletionMessage: func() string { return fmt.Sprintf("(found %d channels)", len(cs)) },
|
||||||
}
|
}
|
||||||
progressBar := observe.MessageWithCompletion(ctx, pcfg, scope.Category().PathType().HumanString())
|
progressBar := observe.MessageWithCompletion(ictx, pcfg, scope.Category().PathType().HumanString())
|
||||||
|
|
||||||
if !isTeam {
|
if !isTeam {
|
||||||
continue
|
continue
|
||||||
@ -151,25 +166,26 @@ func ProduceBackupCollections(
|
|||||||
bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels())
|
bh := groups.NewChannelBackupHandler(bpc.ProtectedResource.ID(), ac.Channels())
|
||||||
|
|
||||||
cs, canUsePreviousBackup, err = groups.CreateCollections(
|
cs, canUsePreviousBackup, err = groups.CreateCollections(
|
||||||
ctx,
|
ictx,
|
||||||
bpc,
|
bpc,
|
||||||
bh,
|
bh,
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
scope,
|
scope,
|
||||||
su,
|
su,
|
||||||
|
cl,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, err)
|
el.AddRecoverable(ictx, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !canUsePreviousBackup {
|
if !canUsePreviousBackup {
|
||||||
tp, err := bh.PathPrefix(creds.AzureTenantID)
|
tp, err := bh.PathPrefix(creds.AzureTenantID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, clues.Wrap(err, "getting message path")
|
return nil, nil, clues.WrapWC(ictx, err, "getting message path").Label(count.BadPathPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{}))
|
dbcs = append(dbcs, data.NewTombstoneCollection(tp, control.Options{}, cl))
|
||||||
}
|
}
|
||||||
|
|
||||||
dbcs = append(dbcs, cs...)
|
dbcs = append(dbcs, cs...)
|
||||||
@ -191,6 +207,7 @@ func ProduceBackupCollections(
|
|||||||
path.GroupsService,
|
path.GroupsService,
|
||||||
categories,
|
categories,
|
||||||
su,
|
su,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -204,13 +221,18 @@ func ProduceBackupCollections(
|
|||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
bpc.ProtectedResource.ID(),
|
bpc.ProtectedResource.ID(),
|
||||||
sitesPreviousPaths,
|
sitesPreviousPaths,
|
||||||
su)
|
su,
|
||||||
|
counter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
collections = append(collections, md)
|
collections = append(collections, md)
|
||||||
|
|
||||||
|
counter.Add(count.Collections, int64(len(collections)))
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow("produced collections", "stats", counter.Values())
|
||||||
|
|
||||||
return collections, ssmb.ToReader(), el.Failure()
|
return collections, ssmb.ToReader(), el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,6 +240,7 @@ func getSitesMetadataCollection(
|
|||||||
tenantID, groupID string,
|
tenantID, groupID string,
|
||||||
sites map[string]string,
|
sites map[string]string,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
) (data.BackupCollection, error) {
|
) (data.BackupCollection, error) {
|
||||||
p, err := path.BuildMetadata(
|
p, err := path.BuildMetadata(
|
||||||
tenantID,
|
tenantID,
|
||||||
@ -239,7 +262,8 @@ func getSitesMetadataCollection(
|
|||||||
[]graph.MetadataCollectionEntry{
|
[]graph.MetadataCollectionEntry{
|
||||||
graph.NewMetadataEntry(metadata.PreviousPathFileName, sites),
|
graph.NewMetadataEntry(metadata.PreviousPathFileName, sites),
|
||||||
},
|
},
|
||||||
su)
|
su,
|
||||||
|
counter.Local())
|
||||||
|
|
||||||
return md, err
|
return md, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,6 +13,8 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
@ -24,8 +26,9 @@ func ProduceBackupCollections(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
bpc inject.BackupProducerConfig,
|
bpc inject.BackupProducerConfig,
|
||||||
ac api.Client,
|
ac api.Client,
|
||||||
tenant string,
|
creds account.M365Config,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||||
odb, err := bpc.Selector.ToOneDriveBackup()
|
odb, err := bpc.Selector.ToOneDriveBackup()
|
||||||
@ -35,6 +38,7 @@ func ProduceBackupCollections(
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
|
tenantID = creds.AzureTenantID
|
||||||
categories = map[path.CategoryType]struct{}{}
|
categories = map[path.CategoryType]struct{}{}
|
||||||
collections = []data.BackupCollection{}
|
collections = []data.BackupCollection{}
|
||||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||||
@ -52,10 +56,11 @@ func ProduceBackupCollections(
|
|||||||
|
|
||||||
nc := drive.NewCollections(
|
nc := drive.NewCollections(
|
||||||
drive.NewUserDriveBackupHandler(ac.Drives(), bpc.ProtectedResource.ID(), scope),
|
drive.NewUserDriveBackupHandler(ac.Drives(), bpc.ProtectedResource.ID(), scope),
|
||||||
tenant,
|
tenantID,
|
||||||
bpc.ProtectedResource,
|
bpc.ProtectedResource,
|
||||||
su,
|
su,
|
||||||
bpc.Options)
|
bpc.Options,
|
||||||
|
counter)
|
||||||
|
|
||||||
pcfg := observe.ProgressCfg{
|
pcfg := observe.ProgressCfg{
|
||||||
Indent: 1,
|
Indent: 1,
|
||||||
@ -75,7 +80,7 @@ func ProduceBackupCollections(
|
|||||||
collections = append(collections, odcs...)
|
collections = append(collections, odcs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
mcs, err := migrationCollections(bpc, tenant, su)
|
mcs, err := migrationCollections(bpc, tenantID, su, counter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
@ -86,11 +91,12 @@ func ProduceBackupCollections(
|
|||||||
baseCols, err := graph.BaseCollections(
|
baseCols, err := graph.BaseCollections(
|
||||||
ctx,
|
ctx,
|
||||||
collections,
|
collections,
|
||||||
tenant,
|
tenantID,
|
||||||
bpc.ProtectedResource.ID(),
|
bpc.ProtectedResource.ID(),
|
||||||
path.OneDriveService,
|
path.OneDriveService,
|
||||||
categories,
|
categories,
|
||||||
su,
|
su,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
@ -99,6 +105,8 @@ func ProduceBackupCollections(
|
|||||||
collections = append(collections, baseCols...)
|
collections = append(collections, baseCols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Ctx(ctx).Infow("produced collections", "stats", counter.Values())
|
||||||
|
|
||||||
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,6 +115,7 @@ func migrationCollections(
|
|||||||
bpc inject.BackupProducerConfig,
|
bpc inject.BackupProducerConfig,
|
||||||
tenant string,
|
tenant string,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
// assume a version < 0 implies no prior backup, thus nothing to migrate.
|
// assume a version < 0 implies no prior backup, thus nothing to migrate.
|
||||||
if version.IsNoBackup(bpc.LastBackupVersion) {
|
if version.IsNoBackup(bpc.LastBackupVersion) {
|
||||||
@ -117,6 +126,8 @@ func migrationCollections(
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
counter.Inc(count.RequiresUserPnToIDMigration)
|
||||||
|
|
||||||
// unlike exchange, which enumerates all folders on every
|
// unlike exchange, which enumerates all folders on every
|
||||||
// backup, onedrive needs to force the owner PN -> ID migration
|
// backup, onedrive needs to force the owner PN -> ID migration
|
||||||
mc, err := path.BuildPrefix(
|
mc, err := path.BuildPrefix(
|
||||||
@ -137,7 +148,7 @@ func migrationCollections(
|
|||||||
return nil, clues.Wrap(err, "creating user name migration path")
|
return nil, clues.Wrap(err, "creating user name migration path")
|
||||||
}
|
}
|
||||||
|
|
||||||
mgn, err := graph.NewPrefixCollection(mpc, mc, su)
|
mgn, err := graph.NewPrefixCollection(mpc, mc, su, counter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "creating migration collection")
|
return nil, clues.Wrap(err, "creating migration collection")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/internal/version"
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
)
|
)
|
||||||
@ -92,7 +93,7 @@ func (suite *BackupUnitSuite) TestMigrationCollections() {
|
|||||||
ProtectedResource: u,
|
ProtectedResource: u,
|
||||||
}
|
}
|
||||||
|
|
||||||
mc, err := migrationCollections(bpc, "t", nil)
|
mc, err := migrationCollections(bpc, "t", nil, count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
if test.expectLen == 0 {
|
if test.expectLen == 0 {
|
||||||
|
|||||||
@ -84,6 +84,7 @@ func ProduceBackupCollections(
|
|||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
ssmb,
|
ssmb,
|
||||||
su,
|
su,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
el.AddRecoverable(ctx, err)
|
el.AddRecoverable(ctx, err)
|
||||||
@ -124,6 +125,7 @@ func ProduceBackupCollections(
|
|||||||
path.SharePointService,
|
path.SharePointService,
|
||||||
categories,
|
categories,
|
||||||
su,
|
su,
|
||||||
|
counter,
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, err
|
return nil, nil, false, err
|
||||||
|
|||||||
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
"github.com/alcionai/corso/src/internal/m365/service/onedrive/mock"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -119,7 +120,8 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
|||||||
tenantID,
|
tenantID,
|
||||||
idname.NewProvider(siteID, siteID),
|
idname.NewProvider(siteID, siteID),
|
||||||
nil,
|
nil,
|
||||||
control.DefaultOptions())
|
control.DefaultOptions(),
|
||||||
|
count.New())
|
||||||
|
|
||||||
c.CollectionMap = collMap
|
c.CollectionMap = collMap
|
||||||
|
|
||||||
@ -131,6 +133,7 @@ func (suite *LibrariesBackupUnitSuite) TestUpdateCollections() {
|
|||||||
excluded,
|
excluded,
|
||||||
topLevelPackages,
|
topLevelPackages,
|
||||||
"notempty",
|
"notempty",
|
||||||
|
count.New(),
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
|
|
||||||
test.expect(t, err, clues.ToCore(err))
|
test.expect(t, err, clues.ToCore(err))
|
||||||
|
|||||||
@ -197,6 +197,8 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
ctx = clues.AddLabelCounter(ctx, op.Counter.PlainAdder())
|
||||||
|
|
||||||
ctx, end := diagnostics.Span(ctx, "operations:backup:run")
|
ctx, end := diagnostics.Span(ctx, "operations:backup:run")
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
@ -543,7 +545,7 @@ func produceBackupDataCollections(
|
|||||||
Selector: sel,
|
Selector: sel,
|
||||||
}
|
}
|
||||||
|
|
||||||
return bp.ProduceBackupCollections(ctx, bpc, counter, errs)
|
return bp.ProduceBackupCollections(ctx, bpc, counter.Local(), errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -1915,7 +1915,8 @@ func (suite *AssistBackupIntegrationSuite) TestBackupTypesForFailureModes() {
|
|||||||
mc, err := graph.MakeMetadataCollection(
|
mc, err := graph.MakeMetadataCollection(
|
||||||
pathPrefix,
|
pathPrefix,
|
||||||
makeMetadataCollectionEntries("url/1", driveID, folderID, tmp),
|
makeMetadataCollectionEntries("url/1", driveID, folderID, tmp),
|
||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {},
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cs = append(cs, mc)
|
cs = append(cs, mc)
|
||||||
@ -2233,7 +2234,8 @@ func (suite *AssistBackupIntegrationSuite) TestExtensionsIncrementals() {
|
|||||||
mc, err := graph.MakeMetadataCollection(
|
mc, err := graph.MakeMetadataCollection(
|
||||||
pathPrefix,
|
pathPrefix,
|
||||||
makeMetadataCollectionEntries("url/1", driveID, folderID, tmp),
|
makeMetadataCollectionEntries("url/1", driveID, folderID, tmp),
|
||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {},
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cs = append(cs, mc)
|
cs = append(cs, mc)
|
||||||
|
|||||||
@ -167,7 +167,8 @@ func (suite *MaintenanceOpNightlySuite) TestRepoMaintenance_GarbageCollection()
|
|||||||
mc, err := graph.MakeMetadataCollection(
|
mc, err := graph.MakeMetadataCollection(
|
||||||
prefixPath,
|
prefixPath,
|
||||||
makeMetadataCollectionEntries("url/1", driveID, folderID, tmp),
|
makeMetadataCollectionEntries("url/1", driveID, folderID, tmp),
|
||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {},
|
||||||
|
count.New())
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
cs = append(cs, mc)
|
cs = append(cs, mc)
|
||||||
|
|||||||
@ -128,6 +128,8 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
// Setup
|
// Setup
|
||||||
// -----
|
// -----
|
||||||
|
|
||||||
|
ctx = clues.AddLabelCounter(ctx, op.Counter.PlainAdder())
|
||||||
|
|
||||||
ctx, end := diagnostics.Span(ctx, "operations:restore:run")
|
ctx, end := diagnostics.Span(ctx, "operations:restore:run")
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
|
|||||||
@ -27,24 +27,24 @@ func (b *Bus) Local() *Bus {
|
|||||||
return bus
|
return bus
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bus) getCounter(k key) *xsync.Counter {
|
func (b *Bus) getCounter(k Key) *xsync.Counter {
|
||||||
xc, _ := b.stats.LoadOrStore(string(k), xsync.NewCounter())
|
xc, _ := b.stats.LoadOrStore(string(k), xsync.NewCounter())
|
||||||
return xc
|
return xc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inc increases the count by 1.
|
// Inc increases the count by 1.
|
||||||
func (b *Bus) Inc(k key) {
|
func (b *Bus) Inc(k Key) int64 {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Add(k, 1)
|
return b.Add(k, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inc increases the count by n.
|
// Add increases the count by n.
|
||||||
func (b *Bus) Add(k key, n int64) {
|
func (b *Bus) Add(k Key, n int64) int64 {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
b.getCounter(k).Add(n)
|
b.getCounter(k).Add(n)
|
||||||
@ -52,18 +52,12 @@ func (b *Bus) Add(k key, n int64) {
|
|||||||
if b.parent != nil {
|
if b.parent != nil {
|
||||||
b.parent.Add(k, n)
|
b.parent.Add(k, n)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// AdderFor returns a func that adds any value of i
|
return b.Get(k)
|
||||||
// to the bus using the given key.
|
|
||||||
func (b *Bus) AdderFor(k key) func(i int64) {
|
|
||||||
return func(i int64) {
|
|
||||||
b.Add(k, i)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the local count.
|
// Get returns the local count.
|
||||||
func (b *Bus) Get(k key) int64 {
|
func (b *Bus) Get(k Key) int64 {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
@ -72,7 +66,7 @@ func (b *Bus) Get(k key) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Total returns the global count.
|
// Total returns the global count.
|
||||||
func (b *Bus) Total(k key) int64 {
|
func (b *Bus) Total(k Key) int64 {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
@ -114,3 +108,33 @@ func (b *Bus) TotalValues() map[string]int64 {
|
|||||||
|
|
||||||
return b.Values()
|
return b.Values()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// compliance with callbacks and external packages
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// AdderFor returns a func that adds any value of i
|
||||||
|
// to the bus using the given key.
|
||||||
|
func (b *Bus) AdderFor(k Key) func(i int64) {
|
||||||
|
return func(i int64) {
|
||||||
|
b.Add(k, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type plainAdder struct {
|
||||||
|
bus *Bus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pa plainAdder) Add(k string, n int64) {
|
||||||
|
if pa.bus == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pa.bus.Add(Key(k), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PlainAdder provides support to external packages that could take in a count.Bus
|
||||||
|
// but don't recognize the `Key` type, and would prefer a string type key.
|
||||||
|
func (b *Bus) PlainAdder() *plainAdder {
|
||||||
|
return &plainAdder{b}
|
||||||
|
}
|
||||||
|
|||||||
@ -17,7 +17,7 @@ func TestCountUnitSuite(t *testing.T) {
|
|||||||
suite.Run(t, &CountUnitSuite{Suite: tester.NewUnitSuite(t)})
|
suite.Run(t, &CountUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||||
}
|
}
|
||||||
|
|
||||||
const testKey = key("just-for-testing")
|
const testKey = Key("just-for-testing")
|
||||||
|
|
||||||
func (suite *CountUnitSuite) TestBus_Inc() {
|
func (suite *CountUnitSuite) TestBus_Inc() {
|
||||||
newParent := func() *Bus {
|
newParent := func() *Bus {
|
||||||
@ -71,6 +71,46 @@ func (suite *CountUnitSuite) TestBus_Inc() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *CountUnitSuite) TestBus_Inc_result() {
|
||||||
|
newParent := func() *Bus {
|
||||||
|
parent := New()
|
||||||
|
parent.Inc(testKey)
|
||||||
|
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
bus *Bus
|
||||||
|
expect int64
|
||||||
|
expectTotal int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
bus: nil,
|
||||||
|
expect: -1,
|
||||||
|
expectTotal: -1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one",
|
||||||
|
bus: newParent().Local(),
|
||||||
|
expect: 1,
|
||||||
|
expectTotal: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
result := test.bus.Inc(testKey)
|
||||||
|
assert.Equal(t, test.expect, result)
|
||||||
|
|
||||||
|
resultTotal := test.bus.Total(testKey)
|
||||||
|
assert.Equal(t, test.expectTotal, resultTotal)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *CountUnitSuite) TestBus_Add() {
|
func (suite *CountUnitSuite) TestBus_Add() {
|
||||||
newParent := func() *Bus {
|
newParent := func() *Bus {
|
||||||
parent := New()
|
parent := New()
|
||||||
@ -123,6 +163,47 @@ func (suite *CountUnitSuite) TestBus_Add() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *CountUnitSuite) TestBus_Add_result() {
|
||||||
|
newParent := func() *Bus {
|
||||||
|
parent := New()
|
||||||
|
parent.Add(testKey, 2)
|
||||||
|
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
skip bool
|
||||||
|
bus *Bus
|
||||||
|
expect int64
|
||||||
|
expectTotal int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
bus: nil,
|
||||||
|
expect: -1,
|
||||||
|
expectTotal: -1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "some",
|
||||||
|
bus: newParent().Local(),
|
||||||
|
expect: 4,
|
||||||
|
expectTotal: 6,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.Run(test.name, func() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
result := test.bus.Add(testKey, 4)
|
||||||
|
assert.Equal(t, test.expect, result)
|
||||||
|
|
||||||
|
resultTotal := test.bus.Total(testKey)
|
||||||
|
assert.Equal(t, test.expectTotal, resultTotal)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *CountUnitSuite) TestBus_Values() {
|
func (suite *CountUnitSuite) TestBus_Values() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
|
|||||||
@ -1,42 +1,99 @@
|
|||||||
package count
|
package count
|
||||||
|
|
||||||
type key string
|
type Key string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// count of bucket-tokens consumed by api calls.
|
// count of bucket-tokens consumed by api calls.
|
||||||
APICallTokensConsumed key = "api-call-tokens-consumed"
|
APICallTokensConsumed Key = "api-call-tokens-consumed"
|
||||||
// count of api calls that resulted in failure due to throttling.
|
// count of api calls that resulted in failure due to throttling.
|
||||||
ThrottledAPICalls key = "throttled-api-calls"
|
ThrottledAPICalls Key = "throttled-api-calls"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tracked during backup
|
// Tracked during backup
|
||||||
const (
|
const (
|
||||||
// amounts reported by kopia
|
// amounts reported by kopia
|
||||||
PersistedCachedFiles key = "persisted-cached-files"
|
|
||||||
PersistedDirectories key = "persisted-directories"
|
PersistedCachedFiles Key = "persisted-cached-files"
|
||||||
PersistedFiles key = "persisted-files"
|
PersistedDirectories Key = "persisted-directories"
|
||||||
PersistedHashedBytes key = "persisted-hashed-bytes"
|
PersistedFiles Key = "persisted-files"
|
||||||
PersistedNonCachedFiles key = "persisted-non-cached-files"
|
PersistedHashedBytes Key = "persisted-hashed-bytes"
|
||||||
PersistedNonMetaFiles key = "persisted-non-meta-files"
|
PersistedNonCachedFiles Key = "persisted-non-cached-files"
|
||||||
PersistedNonMetaUploadedBytes key = "persisted-non-meta-uploaded-bytes"
|
PersistedNonMetaFiles Key = "persisted-non-meta-files"
|
||||||
PersistedUploadedBytes key = "persisted-uploaded-bytes"
|
PersistedNonMetaUploadedBytes Key = "persisted-non-meta-uploaded-bytes"
|
||||||
PersistenceErrors key = "persistence-errors"
|
PersistedUploadedBytes Key = "persisted-uploaded-bytes"
|
||||||
PersistenceExpectedErrors key = "persistence-expected-errors"
|
PersistenceErrors Key = "persistence-errors"
|
||||||
PersistenceIgnoredErrors key = "persistence-ignored-errors"
|
PersistenceExpectedErrors Key = "persistence-expected-errors"
|
||||||
|
PersistenceIgnoredErrors Key = "persistence-ignored-errors"
|
||||||
|
|
||||||
// amounts reported by data providers
|
// amounts reported by data providers
|
||||||
ProviderItemsRead key = "provider-items-read"
|
|
||||||
|
Channels Key = "channels"
|
||||||
|
CollectionMoved Key = "collection-moved"
|
||||||
|
CollectionNew Key = "collection-state-new"
|
||||||
|
CollectionNotMoved Key = "collection-state-not-moved"
|
||||||
|
CollectionTombstoned Key = "collection-state-tombstoned"
|
||||||
|
Collections Key = "collections"
|
||||||
|
DeleteFolderMarker Key = "delete-folder-marker"
|
||||||
|
DeleteItemMarker Key = "delete-item-marker"
|
||||||
|
Drives Key = "drives"
|
||||||
|
DriveTombstones Key = "drive-tombstones"
|
||||||
|
Files Key = "files"
|
||||||
|
Folders Key = "folders"
|
||||||
|
ItemDownloadURLRefetch Key = "item-download-url-refetch"
|
||||||
|
ItemsAdded Key = "items-added"
|
||||||
|
ItemsRemoved Key = "items-removed"
|
||||||
|
LazyDeletedInFlight Key = "lazy-deleted-in-flight"
|
||||||
|
Malware Key = "malware"
|
||||||
|
MetadataItems Key = "metadata-items"
|
||||||
|
MetadataBytes Key = "metadata-bytes"
|
||||||
|
MissingDelta Key = "missing-delta-token"
|
||||||
|
NewDeltas Key = "new-delta-tokens"
|
||||||
|
NewPrevPaths Key = "new-previous-paths"
|
||||||
|
NoDeltaQueries Key = "cannot-make-delta-queries"
|
||||||
|
Packages Key = "packages"
|
||||||
|
PagerResets Key = "pager-resets"
|
||||||
|
PagesEnumerated Key = "pages-enumerated"
|
||||||
|
PrevDeltas Key = "previous-deltas"
|
||||||
|
PrevPaths Key = "previous-paths"
|
||||||
|
PreviousPathMetadataCollision Key = "previous-path-metadata-collision"
|
||||||
|
Sites Key = "sites"
|
||||||
|
SkippedContainers Key = "skipped-containers"
|
||||||
|
StreamBytesAdded Key = "stream-bytes-added"
|
||||||
|
StreamDirsAdded Key = "stream-dirs-added"
|
||||||
|
StreamDirsFound Key = "stream-dirs-found"
|
||||||
|
StreamItemsAdded Key = "stream-items-added"
|
||||||
|
StreamItemsDeletedInFlight Key = "stream-items-deleted-in-flight"
|
||||||
|
StreamItemsErrored Key = "stream-items-errored"
|
||||||
|
StreamItemsFound Key = "stream-items-found"
|
||||||
|
StreamItemsRemoved Key = "stream-items-removed"
|
||||||
|
URLCacheMiss Key = "url-cache-miss"
|
||||||
|
URLCacheRefresh Key = "url-cache-refresh"
|
||||||
|
|
||||||
|
// miscellaneous
|
||||||
|
RequiresUserPnToIDMigration Key = "requires-user-pn-to-id-migration"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Counted using clues error labels
|
||||||
|
const (
|
||||||
|
BadCollPath = "bad_collection_path"
|
||||||
|
BadPathPrefix = "bad_path_prefix_creation"
|
||||||
|
BadPrevPath = "unparsable_prev_path"
|
||||||
|
CollectionTombstoneConflict = "collection_tombstone_conflicts_with_live_collection"
|
||||||
|
ItemBeforeParent = "item_before_parent"
|
||||||
|
MissingParent = "missing_parent"
|
||||||
|
UnknownItemType = "unknown_item_type"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tracked during restore
|
// Tracked during restore
|
||||||
const (
|
const (
|
||||||
// count of times that items had collisions during restore,
|
// count of times that items had collisions during restore,
|
||||||
// and that collision was solved by replacing the item.
|
// and that collision was solved by replacing the item.
|
||||||
CollisionReplace key = "collision-replace"
|
CollisionReplace Key = "collision-replace"
|
||||||
// count of times that items had collisions during restore,
|
// count of times that items had collisions during restore,
|
||||||
// and that collision was solved by skipping the item.
|
// and that collision was solved by skipping the item.
|
||||||
CollisionSkip key = "collision-skip"
|
CollisionSkip Key = "collision-skip"
|
||||||
// NewItemCreated should be used for non-skip, non-replace,
|
// NewItemCreated should be used for non-skip, non-replace,
|
||||||
// non-meta item creation counting. IE: use it specifically
|
// non-meta item creation counting. IE: use it specifically
|
||||||
// for counting new items (no collision) or copied items.
|
// for counting new items (no collision) or copied items.
|
||||||
NewItemCreated key = "new-item-created"
|
NewItemCreated Key = "new-item-created"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -60,6 +61,7 @@ func BaseCollections(
|
|||||||
service path.ServiceType,
|
service path.ServiceType,
|
||||||
categories map[path.CategoryType]struct{},
|
categories map[path.CategoryType]struct{},
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) ([]data.BackupCollection, error) {
|
) ([]data.BackupCollection, error) {
|
||||||
var (
|
var (
|
||||||
@ -82,7 +84,7 @@ func BaseCollections(
|
|||||||
full, err := path.BuildPrefix(tenant, rOwner, service, cat)
|
full, err := path.BuildPrefix(tenant, rOwner, service, cat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Shouldn't happen.
|
// Shouldn't happen.
|
||||||
err = clues.WrapWC(ictx, err, "making path")
|
err = clues.WrapWC(ictx, err, "making path").Label(count.BadPathPrefix)
|
||||||
el.AddRecoverable(ictx, err)
|
el.AddRecoverable(ictx, err)
|
||||||
lastErr = err
|
lastErr = err
|
||||||
|
|
||||||
@ -95,7 +97,7 @@ func BaseCollections(
|
|||||||
prev: full,
|
prev: full,
|
||||||
full: full,
|
full: full,
|
||||||
su: su,
|
su: su,
|
||||||
state: data.StateOf(full, full),
|
state: data.StateOf(full, full, counter),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -111,6 +113,7 @@ func BaseCollections(
|
|||||||
func NewPrefixCollection(
|
func NewPrefixCollection(
|
||||||
prev, full path.Path,
|
prev, full path.Path,
|
||||||
su support.StatusUpdater,
|
su support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
) (*prefixCollection, error) {
|
) (*prefixCollection, error) {
|
||||||
if prev != nil {
|
if prev != nil {
|
||||||
if len(prev.Item()) > 0 {
|
if len(prev.Item()) > 0 {
|
||||||
@ -136,7 +139,7 @@ func NewPrefixCollection(
|
|||||||
prev: prev,
|
prev: prev,
|
||||||
full: full,
|
full: full,
|
||||||
su: su,
|
su: su,
|
||||||
state: data.StateOf(prev, full),
|
state: data.StateOf(prev, full, counter),
|
||||||
}
|
}
|
||||||
|
|
||||||
if pc.state == data.DeletedState {
|
if pc.state == data.DeletedState {
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -93,7 +94,7 @@ func (suite *CollectionsUnitSuite) TestNewPrefixCollection() {
|
|||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
_, err := NewPrefixCollection(test.prev, test.full, nil)
|
_, err := NewPrefixCollection(test.prev, test.full, nil, count.New())
|
||||||
test.expectErr(suite.T(), err, clues.ToCore(err))
|
test.expectErr(suite.T(), err, clues.ToCore(err))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -27,6 +28,7 @@ type MetadataCollection struct {
|
|||||||
fullPath path.Path
|
fullPath path.Path
|
||||||
items []metadataItem
|
items []metadataItem
|
||||||
statusUpdater support.StatusUpdater
|
statusUpdater support.StatusUpdater
|
||||||
|
counter *count.Bus
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetadataCollectionEntry describes a file that should get added to a metadata
|
// MetadataCollectionEntry describes a file that should get added to a metadata
|
||||||
@ -78,6 +80,7 @@ func MakeMetadataCollection(
|
|||||||
pathPrefix path.Path,
|
pathPrefix path.Path,
|
||||||
metadata []MetadataCollectionEntry,
|
metadata []MetadataCollectionEntry,
|
||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
) (data.BackupCollection, error) {
|
) (data.BackupCollection, error) {
|
||||||
if len(metadata) == 0 {
|
if len(metadata) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -94,7 +97,7 @@ func MakeMetadataCollection(
|
|||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
coll := NewMetadataCollection(pathPrefix, items, statusUpdater)
|
coll := NewMetadataCollection(pathPrefix, items, statusUpdater, counter)
|
||||||
|
|
||||||
return coll, nil
|
return coll, nil
|
||||||
}
|
}
|
||||||
@ -103,11 +106,13 @@ func NewMetadataCollection(
|
|||||||
p path.Path,
|
p path.Path,
|
||||||
items []metadataItem,
|
items []metadataItem,
|
||||||
statusUpdater support.StatusUpdater,
|
statusUpdater support.StatusUpdater,
|
||||||
|
counter *count.Bus,
|
||||||
) *MetadataCollection {
|
) *MetadataCollection {
|
||||||
return &MetadataCollection{
|
return &MetadataCollection{
|
||||||
fullPath: p,
|
fullPath: p,
|
||||||
items: items,
|
items: items,
|
||||||
statusUpdater: statusUpdater,
|
statusUpdater: statusUpdater,
|
||||||
|
counter: counter,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,11 +160,13 @@ func (md MetadataCollection) Items(
|
|||||||
},
|
},
|
||||||
md.fullPath.Folder(false))
|
md.fullPath.Folder(false))
|
||||||
|
|
||||||
|
md.counter.Add(count.MetadataItems, int64(len(md.items)))
|
||||||
md.statusUpdater(status)
|
md.statusUpdater(status)
|
||||||
}()
|
}()
|
||||||
defer close(res)
|
defer close(res)
|
||||||
|
|
||||||
for _, item := range md.items {
|
for _, item := range md.items {
|
||||||
|
md.counter.Add(count.MetadataBytes, item.size)
|
||||||
totalBytes += item.size
|
totalBytes += item.size
|
||||||
res <- item
|
res <- item
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/internal/m365/support"
|
"github.com/alcionai/corso/src/internal/m365/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
|
"github.com/alcionai/corso/src/pkg/count"
|
||||||
"github.com/alcionai/corso/src/pkg/fault"
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
"github.com/alcionai/corso/src/pkg/path"
|
||||||
)
|
)
|
||||||
@ -41,7 +42,7 @@ func (suite *MetadataCollectionUnitSuite) TestFullPath() {
|
|||||||
"foo")
|
"foo")
|
||||||
require.NoError(t, err, clues.ToCore(err))
|
require.NoError(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
c := NewMetadataCollection(p, nil, nil)
|
c := NewMetadataCollection(p, nil, nil, count.New())
|
||||||
|
|
||||||
assert.Equal(t, p.String(), c.FullPath().String())
|
assert.Equal(t, p.String(), c.FullPath().String())
|
||||||
}
|
}
|
||||||
@ -99,7 +100,8 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
|||||||
func(c *support.ControllerOperationStatus) {
|
func(c *support.ControllerOperationStatus) {
|
||||||
assert.Equal(t, len(itemNames), c.Metrics.Objects)
|
assert.Equal(t, len(itemNames), c.Metrics.Objects)
|
||||||
assert.Equal(t, len(itemNames), c.Metrics.Successes)
|
assert.Equal(t, len(itemNames), c.Metrics.Successes)
|
||||||
})
|
},
|
||||||
|
count.New())
|
||||||
|
|
||||||
gotData := [][]byte{}
|
gotData := [][]byte{}
|
||||||
gotNames := []string{}
|
gotNames := []string{}
|
||||||
@ -198,7 +200,8 @@ func (suite *MetadataCollectionUnitSuite) TestMakeMetadataCollection() {
|
|||||||
col, err := MakeMetadataCollection(
|
col, err := MakeMetadataCollection(
|
||||||
pathPrefix,
|
pathPrefix,
|
||||||
[]MetadataCollectionEntry{test.metadata},
|
[]MetadataCollectionEntry{test.metadata},
|
||||||
func(*support.ControllerOperationStatus) {})
|
func(*support.ControllerOperationStatus) {},
|
||||||
|
count.New())
|
||||||
|
|
||||||
test.errCheck(t, err, clues.ToCore(err))
|
test.errCheck(t, err, clues.ToCore(err))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user