remove errors from connector status (#2586)
## Description Now that fault is in place, we can remove the error tracking functionality of graph status, and let that focus purely on metrics. ## Does this PR need a docs update or release note? - [x] ⛔ No ## Type of change - [x] 🧹 Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
ad75540b03
commit
29c6ac4874
@ -130,7 +130,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestExchangeDataCollection
|
|||||||
}
|
}
|
||||||
|
|
||||||
status := connector.AwaitStatus()
|
status := connector.AwaitStatus()
|
||||||
assert.NotZero(t, status.Successful)
|
assert.NotZero(t, status.Metrics.Successes)
|
||||||
t.Log(status.String())
|
t.Log(status.String())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -286,7 +286,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
|||||||
}
|
}
|
||||||
|
|
||||||
status := connector.AwaitStatus()
|
status := connector.AwaitStatus()
|
||||||
assert.NotZero(t, status.Successful)
|
assert.NotZero(t, status.Metrics.Successes)
|
||||||
t.Log(status.String())
|
t.Log(status.String())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -202,7 +202,6 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
|||||||
func newStatusUpdater(t *testing.T, wg *sync.WaitGroup) func(status *support.ConnectorOperationStatus) {
|
func newStatusUpdater(t *testing.T, wg *sync.WaitGroup) func(status *support.ConnectorOperationStatus) {
|
||||||
updater := func(status *support.ConnectorOperationStatus) {
|
updater := func(status *support.ConnectorOperationStatus) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
assert.Zero(t, status.ErrorCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return updater
|
return updater
|
||||||
|
|||||||
@ -316,11 +316,10 @@ func (col *Collection) finishPopulation(
|
|||||||
support.Backup,
|
support.Backup,
|
||||||
1,
|
1,
|
||||||
support.CollectionMetrics{
|
support.CollectionMetrics{
|
||||||
Objects: attempted,
|
Objects: attempted,
|
||||||
Successes: success,
|
Successes: success,
|
||||||
TotalBytes: totalBytes,
|
Bytes: totalBytes,
|
||||||
},
|
},
|
||||||
err,
|
|
||||||
col.fullPath.Folder(false))
|
col.fullPath.Folder(false))
|
||||||
|
|
||||||
logger.Ctx(ctx).Debugw("done streaming items", "status", status.String())
|
logger.Ctx(ctx).Debugw("done streaming items", "status", status.String())
|
||||||
|
|||||||
@ -343,7 +343,7 @@ func RestoreExchangeDataCollections(
|
|||||||
|
|
||||||
temp, canceled := restoreCollection(ctx, gs, dc, containerID, policy, deets, errs)
|
temp, canceled := restoreCollection(ctx, gs, dc, containerID, policy, deets, errs)
|
||||||
|
|
||||||
metrics.Combine(temp)
|
metrics = support.CombineMetrics(metrics, temp)
|
||||||
|
|
||||||
if canceled {
|
if canceled {
|
||||||
break
|
break
|
||||||
@ -355,7 +355,6 @@ func RestoreExchangeDataCollections(
|
|||||||
support.Restore,
|
support.Restore,
|
||||||
len(dcs),
|
len(dcs),
|
||||||
metrics,
|
metrics,
|
||||||
el.Failure(),
|
|
||||||
dest.ContainerName)
|
dest.ContainerName)
|
||||||
|
|
||||||
return status, el.Failure()
|
return status, el.Failure()
|
||||||
@ -436,7 +435,7 @@ func restoreCollection(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.TotalBytes += int64(len(byteArray))
|
metrics.Bytes += int64(len(byteArray))
|
||||||
metrics.Successes++
|
metrics.Successes++
|
||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
|
|||||||
@ -150,11 +150,10 @@ func (md MetadataCollection) Items(
|
|||||||
support.Backup,
|
support.Backup,
|
||||||
1,
|
1,
|
||||||
support.CollectionMetrics{
|
support.CollectionMetrics{
|
||||||
Objects: len(md.items),
|
Objects: len(md.items),
|
||||||
Successes: len(md.items),
|
Successes: len(md.items),
|
||||||
TotalBytes: totalBytes,
|
Bytes: totalBytes,
|
||||||
},
|
},
|
||||||
nil,
|
|
||||||
md.fullPath.Folder(false),
|
md.fullPath.Folder(false),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -84,8 +84,8 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
|||||||
p,
|
p,
|
||||||
items,
|
items,
|
||||||
func(c *support.ConnectorOperationStatus) {
|
func(c *support.ConnectorOperationStatus) {
|
||||||
assert.Equal(t, len(itemNames), c.ObjectCount)
|
assert.Equal(t, len(itemNames), c.Metrics.Objects)
|
||||||
assert.Equal(t, len(itemNames), c.Successful)
|
assert.Equal(t, len(itemNames), c.Metrics.Successes)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -4,7 +4,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
@ -92,17 +91,11 @@ func statusTestTask(gc *GraphConnector, objects, success, folder int) {
|
|||||||
ctx,
|
ctx,
|
||||||
support.Restore, folder,
|
support.Restore, folder,
|
||||||
support.CollectionMetrics{
|
support.CollectionMetrics{
|
||||||
Objects: objects,
|
Objects: objects,
|
||||||
Successes: success,
|
Successes: success,
|
||||||
TotalBytes: 0,
|
Bytes: 0,
|
||||||
},
|
},
|
||||||
support.WrapAndAppend(
|
"statusTestTask")
|
||||||
"tres",
|
|
||||||
errors.New("three"),
|
|
||||||
support.WrapAndAppend("arc376", errors.New("one"), errors.New("two")),
|
|
||||||
),
|
|
||||||
"statusTestTask",
|
|
||||||
)
|
|
||||||
gc.UpdateStatus(status)
|
gc.UpdateStatus(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,11 +116,11 @@ func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_Status() {
|
|||||||
|
|
||||||
assert.NotEmpty(t, gc.PrintableStatus())
|
assert.NotEmpty(t, gc.PrintableStatus())
|
||||||
// Expect 8 objects
|
// Expect 8 objects
|
||||||
assert.Equal(t, 8, gc.Status().ObjectCount)
|
assert.Equal(t, 8, gc.Status().Metrics.Objects)
|
||||||
// Expect 2 success
|
// Expect 2 success
|
||||||
assert.Equal(t, 2, gc.Status().Successful)
|
assert.Equal(t, 2, gc.Status().Metrics.Successes)
|
||||||
// Expect 2 folders
|
// Expect 2 folders
|
||||||
assert.Equal(t, 2, gc.Status().FolderCount)
|
assert.Equal(t, 2, gc.Status().Folders)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices() {
|
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices() {
|
||||||
|
|||||||
@ -252,9 +252,9 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() {
|
|||||||
assert.NotNil(t, deets)
|
assert.NotNil(t, deets)
|
||||||
|
|
||||||
status := suite.connector.AwaitStatus()
|
status := suite.connector.AwaitStatus()
|
||||||
assert.Equal(t, 0, status.ObjectCount)
|
assert.Equal(t, 0, status.Metrics.Objects)
|
||||||
assert.Equal(t, 0, status.FolderCount)
|
assert.Equal(t, 0, status.Folders)
|
||||||
assert.Equal(t, 0, status.Successful)
|
assert.Equal(t, 0, status.Metrics.Successes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||||
@ -331,9 +331,9 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
|||||||
assert.NotNil(t, deets)
|
assert.NotNil(t, deets)
|
||||||
|
|
||||||
stats := suite.connector.AwaitStatus()
|
stats := suite.connector.AwaitStatus()
|
||||||
assert.Zero(t, stats.ObjectCount)
|
assert.Zero(t, stats.Metrics.Objects)
|
||||||
assert.Zero(t, stats.FolderCount)
|
assert.Zero(t, stats.Folders)
|
||||||
assert.Zero(t, stats.Successful)
|
assert.Zero(t, stats.Metrics.Successes)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -435,10 +435,8 @@ func runRestore(
|
|||||||
status := restoreGC.AwaitStatus()
|
status := restoreGC.AwaitStatus()
|
||||||
runTime := time.Since(start)
|
runTime := time.Since(start)
|
||||||
|
|
||||||
assert.NoError(t, status.Err, "restored status.Err")
|
assert.Equal(t, numRestoreItems, status.Metrics.Objects, "restored status.Metrics.Objects")
|
||||||
assert.Zero(t, status.ErrorCount, "restored status.ErrorCount")
|
assert.Equal(t, numRestoreItems, status.Metrics.Successes, "restored status.Metrics.Successes")
|
||||||
assert.Equal(t, numRestoreItems, status.ObjectCount, "restored status.ObjectCount")
|
|
||||||
assert.Equal(t, numRestoreItems, status.Successful, "restored status.Successful")
|
|
||||||
assert.Len(
|
assert.Len(
|
||||||
t,
|
t,
|
||||||
deets.Entries,
|
deets.Entries,
|
||||||
@ -504,12 +502,10 @@ func runBackupAndCompare(
|
|||||||
|
|
||||||
status := backupGC.AwaitStatus()
|
status := backupGC.AwaitStatus()
|
||||||
|
|
||||||
assert.NoError(t, status.Err, "backup status.Err")
|
assert.Equalf(t, totalItems+skipped, status.Metrics.Objects,
|
||||||
assert.Zero(t, status.ErrorCount, "backup status.ErrorCount")
|
"backup status.Metrics.Objects; wanted %d items + %d skipped", totalItems, skipped)
|
||||||
assert.Equalf(t, totalItems+skipped, status.ObjectCount,
|
assert.Equalf(t, totalItems+skipped, status.Metrics.Successes,
|
||||||
"backup status.ObjectCount; wanted %d items + %d skipped", totalItems, skipped)
|
"backup status.Metrics.Successes; wanted %d items + %d skipped", totalItems, skipped)
|
||||||
assert.Equalf(t, totalItems+skipped, status.Successful,
|
|
||||||
"backup status.Successful; wanted %d items + %d skipped", totalItems, skipped)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runRestoreBackupTest(
|
func runRestoreBackupTest(
|
||||||
@ -975,8 +971,8 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
|
|
||||||
status := restoreGC.AwaitStatus()
|
status := restoreGC.AwaitStatus()
|
||||||
// Always just 1 because it's just 1 collection.
|
// Always just 1 because it's just 1 collection.
|
||||||
assert.Equal(t, totalItems, status.ObjectCount, "status.ObjectCount")
|
assert.Equal(t, totalItems, status.Metrics.Objects, "status.Metrics.Objects")
|
||||||
assert.Equal(t, totalItems, status.Successful, "status.Successful")
|
assert.Equal(t, totalItems, status.Metrics.Successes, "status.Metrics.Successes")
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t, totalItems, len(deets.Entries),
|
t, totalItems, len(deets.Entries),
|
||||||
"details entries contains same item count as total successful items restored")
|
"details entries contains same item count as total successful items restored")
|
||||||
@ -1018,8 +1014,8 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
true)
|
true)
|
||||||
|
|
||||||
status := backupGC.AwaitStatus()
|
status := backupGC.AwaitStatus()
|
||||||
assert.Equal(t, allItems+skipped, status.ObjectCount, "status.ObjectCount")
|
assert.Equal(t, allItems+skipped, status.Metrics.Objects, "status.Metrics.Objects")
|
||||||
assert.Equal(t, allItems+skipped, status.Successful, "status.Successful")
|
assert.Equal(t, allItems+skipped, status.Metrics.Successes, "status.Metrics.Successes")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -256,7 +256,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
// `details.OneDriveInfo`
|
// `details.OneDriveInfo`
|
||||||
parentPathString, err := path.GetDriveFolderPath(oc.folderPath)
|
parentPathString, err := path.GetDriveFolderPath(oc.folderPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
oc.reportAsCompleted(ctx, 0, 0, 0, clues.Wrap(err, "getting drive path").WithClues(ctx))
|
oc.reportAsCompleted(ctx, 0, 0, 0)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -453,22 +453,22 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
|
|||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, el.Failure())
|
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, err error) {
|
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64) {
|
||||||
close(oc.data)
|
close(oc.data)
|
||||||
|
|
||||||
status := support.CreateStatus(ctx, support.Backup,
|
status := support.CreateStatus(ctx, support.Backup,
|
||||||
1, // num folders (always 1)
|
1, // num folders (always 1)
|
||||||
support.CollectionMetrics{
|
support.CollectionMetrics{
|
||||||
Objects: itemsFound, // items to read,
|
Objects: itemsFound,
|
||||||
Successes: itemsRead, // items read successfully,
|
Successes: itemsRead,
|
||||||
TotalBytes: byteCount, // Number of bytes read in the operation,
|
Bytes: byteCount,
|
||||||
},
|
},
|
||||||
err,
|
oc.folderPath.Folder(false))
|
||||||
oc.folderPath.Folder(false), // Additional details
|
|
||||||
)
|
|
||||||
logger.Ctx(ctx).Debugw("done streaming items", "status", status.String())
|
logger.Ctx(ctx).Debugw("done streaming items", "status", status.String())
|
||||||
|
|
||||||
oc.statusUpdater(status)
|
oc.statusUpdater(status)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -52,7 +52,7 @@ func (suite *CollectionUnitTestSuite) testStatusUpdater(
|
|||||||
statusToUpdate *support.ConnectorOperationStatus,
|
statusToUpdate *support.ConnectorOperationStatus,
|
||||||
) support.StatusUpdater {
|
) support.StatusUpdater {
|
||||||
return func(s *support.ConnectorOperationStatus) {
|
return func(s *support.ConnectorOperationStatus) {
|
||||||
suite.T().Logf("Update status %v, count %d, success %d", s, s.ObjectCount, s.Successful)
|
suite.T().Logf("Update status %v, count %d, success %d", s, s.Metrics.Objects, s.Metrics.Successes)
|
||||||
*statusToUpdate = *s
|
*statusToUpdate = *s
|
||||||
|
|
||||||
wg.Done()
|
wg.Done()
|
||||||
@ -224,8 +224,8 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Expect only 1 item
|
// Expect only 1 item
|
||||||
require.Equal(t, 1, collStatus.ObjectCount)
|
require.Equal(t, 1, collStatus.Metrics.Objects)
|
||||||
require.Equal(t, 1, collStatus.Successful)
|
require.Equal(t, 1, collStatus.Metrics.Successes)
|
||||||
|
|
||||||
// Validate item info and data
|
// Validate item info and data
|
||||||
readItem := readItems[0]
|
readItem := readItems[0]
|
||||||
@ -348,8 +348,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// Expect no items
|
// Expect no items
|
||||||
require.Equal(t, 1, collStatus.ObjectCount, "only one object should be counted")
|
require.Equal(t, 1, collStatus.Metrics.Objects, "only one object should be counted")
|
||||||
require.Equal(t, 1, collStatus.Successful, "TODO: should be 0, but allowing 1 to reduce async management")
|
require.Equal(t, 1, collStatus.Metrics.Successes, "TODO: should be 0, but allowing 1 to reduce async management")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -432,8 +432,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionPermissionBackupLatestModTim
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// Expect no items
|
// Expect no items
|
||||||
require.Equal(t, 1, collStatus.ObjectCount)
|
require.Equal(t, 1, collStatus.Metrics.Objects)
|
||||||
require.Equal(t, 1, collStatus.Successful)
|
require.Equal(t, 1, collStatus.Metrics.Successes)
|
||||||
|
|
||||||
for _, i := range readItems {
|
for _, i := range readItems {
|
||||||
if strings.HasSuffix(i.UUID(), MetaFileSuffix) {
|
if strings.HasSuffix(i.UUID(), MetaFileSuffix) {
|
||||||
|
|||||||
@ -102,7 +102,7 @@ func RestoreCollections(
|
|||||||
parentPermissions[k] = v
|
parentPermissions[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
restoreMetrics.Combine(metrics)
|
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
|
||||||
|
|
||||||
if errors.Is(err, context.Canceled) {
|
if errors.Is(err, context.Canceled) {
|
||||||
break
|
break
|
||||||
@ -114,7 +114,6 @@ func RestoreCollections(
|
|||||||
support.Restore,
|
support.Restore,
|
||||||
len(dcs),
|
len(dcs),
|
||||||
restoreMetrics,
|
restoreMetrics,
|
||||||
el.Failure(),
|
|
||||||
dest.ContainerName)
|
dest.ContainerName)
|
||||||
|
|
||||||
return status, el.Failure()
|
return status, el.Failure()
|
||||||
@ -224,7 +223,7 @@ func RestoreCollection(
|
|||||||
|
|
||||||
if strings.HasSuffix(name, DataFileSuffix) {
|
if strings.HasSuffix(name, DataFileSuffix) {
|
||||||
metrics.Objects++
|
metrics.Objects++
|
||||||
metrics.TotalBytes += int64(len(copyBuffer))
|
metrics.Bytes += int64(len(copyBuffer))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
itemInfo details.ItemInfo
|
itemInfo details.ItemInfo
|
||||||
@ -302,7 +301,7 @@ func RestoreCollection(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
metrics.Objects++
|
metrics.Objects++
|
||||||
metrics.TotalBytes += int64(len(copyBuffer))
|
metrics.Bytes += int64(len(copyBuffer))
|
||||||
|
|
||||||
// No permissions stored at the moment for SharePoint
|
// No permissions stored at the moment for SharePoint
|
||||||
_, itemInfo, err = restoreData(
|
_, itemInfo, err = restoreData(
|
||||||
|
|||||||
@ -43,12 +43,6 @@ var (
|
|||||||
_ data.StreamModTime = &Item{}
|
_ data.StreamModTime = &Item{}
|
||||||
)
|
)
|
||||||
|
|
||||||
type numMetrics struct {
|
|
||||||
attempts int
|
|
||||||
success int
|
|
||||||
totalBytes int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
|
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
|
||||||
// by the oneDrive.Collection as the calls are identical for populating the Collection
|
// by the oneDrive.Collection as the calls are identical for populating the Collection
|
||||||
type Collection struct {
|
type Collection struct {
|
||||||
@ -157,24 +151,17 @@ func (sd *Item) ModTime() time.Time {
|
|||||||
|
|
||||||
func (sc *Collection) finishPopulation(
|
func (sc *Collection) finishPopulation(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
attempts, success int,
|
metrics support.CollectionMetrics,
|
||||||
totalBytes int64,
|
|
||||||
err error,
|
|
||||||
) {
|
) {
|
||||||
close(sc.data)
|
close(sc.data)
|
||||||
|
|
||||||
attempted := attempts
|
|
||||||
status := support.CreateStatus(
|
status := support.CreateStatus(
|
||||||
ctx,
|
ctx,
|
||||||
support.Backup,
|
support.Backup,
|
||||||
1, // 1 folder
|
1, // 1 folder
|
||||||
support.CollectionMetrics{
|
metrics,
|
||||||
Objects: attempted,
|
|
||||||
Successes: success,
|
|
||||||
TotalBytes: totalBytes,
|
|
||||||
},
|
|
||||||
err,
|
|
||||||
sc.fullPath.Folder(false))
|
sc.fullPath.Folder(false))
|
||||||
|
|
||||||
logger.Ctx(ctx).Debug(status.String())
|
logger.Ctx(ctx).Debug(status.String())
|
||||||
|
|
||||||
if sc.statusUpdater != nil {
|
if sc.statusUpdater != nil {
|
||||||
@ -184,15 +171,16 @@ func (sc *Collection) finishPopulation(
|
|||||||
|
|
||||||
// populate utility function to retrieve data from back store for a given collection
|
// populate utility function to retrieve data from back store for a given collection
|
||||||
func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
|
func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
|
||||||
var (
|
metrics, _ := sc.runPopulate(ctx, errs)
|
||||||
metrics numMetrics
|
sc.finishPopulation(ctx, metrics)
|
||||||
writer = kw.NewJsonSerializationWriter()
|
}
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
defer func() {
|
func (sc *Collection) runPopulate(ctx context.Context, errs *fault.Bus) (support.CollectionMetrics, error) {
|
||||||
sc.finishPopulation(ctx, metrics.attempts, metrics.success, int64(metrics.totalBytes), err)
|
var (
|
||||||
}()
|
err error
|
||||||
|
metrics support.CollectionMetrics
|
||||||
|
writer = kw.NewJsonSerializationWriter()
|
||||||
|
)
|
||||||
|
|
||||||
// TODO: Insert correct ID for CollectionProgress
|
// TODO: Insert correct ID for CollectionProgress
|
||||||
colProgress, closer := observe.CollectionProgress(
|
colProgress, closer := observe.CollectionProgress(
|
||||||
@ -213,6 +201,8 @@ func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
|
|||||||
case Pages:
|
case Pages:
|
||||||
metrics, err = sc.retrievePages(ctx, writer, colProgress, errs)
|
metrics, err = sc.retrievePages(ctx, writer, colProgress, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return metrics, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// retrieveLists utility function for collection that downloads and serializes
|
// retrieveLists utility function for collection that downloads and serializes
|
||||||
@ -222,9 +212,9 @@ func (sc *Collection) retrieveLists(
|
|||||||
wtr *kw.JsonSerializationWriter,
|
wtr *kw.JsonSerializationWriter,
|
||||||
progress chan<- struct{},
|
progress chan<- struct{},
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (numMetrics, error) {
|
) (support.CollectionMetrics, error) {
|
||||||
var (
|
var (
|
||||||
metrics numMetrics
|
metrics support.CollectionMetrics
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -233,7 +223,7 @@ func (sc *Collection) retrieveLists(
|
|||||||
return metrics, err
|
return metrics, err
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.attempts += len(lists)
|
metrics.Objects += len(lists)
|
||||||
// For each models.Listable, object is serialized and the metrics are collected.
|
// For each models.Listable, object is serialized and the metrics are collected.
|
||||||
// The progress is objected via the passed in channel.
|
// The progress is objected via the passed in channel.
|
||||||
for _, lst := range lists {
|
for _, lst := range lists {
|
||||||
@ -255,9 +245,9 @@ func (sc *Collection) retrieveLists(
|
|||||||
t = *t1
|
t = *t1
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.totalBytes += size
|
metrics.Bytes += size
|
||||||
|
|
||||||
metrics.success++
|
metrics.Successes++
|
||||||
sc.data <- &Item{
|
sc.data <- &Item{
|
||||||
id: *lst.GetId(),
|
id: *lst.GetId(),
|
||||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
@ -277,9 +267,9 @@ func (sc *Collection) retrievePages(
|
|||||||
wtr *kw.JsonSerializationWriter,
|
wtr *kw.JsonSerializationWriter,
|
||||||
progress chan<- struct{},
|
progress chan<- struct{},
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) (numMetrics, error) {
|
) (support.CollectionMetrics, error) {
|
||||||
var (
|
var (
|
||||||
metrics numMetrics
|
metrics support.CollectionMetrics
|
||||||
el = errs.Local()
|
el = errs.Local()
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -300,7 +290,7 @@ func (sc *Collection) retrievePages(
|
|||||||
return metrics, err
|
return metrics, err
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.attempts = len(pages)
|
metrics.Objects = len(pages)
|
||||||
// For each models.Pageable, object is serialize and the metrics are collected and returned.
|
// For each models.Pageable, object is serialize and the metrics are collected and returned.
|
||||||
// Pageable objects are not supported in v1.0 of msgraph at this time.
|
// Pageable objects are not supported in v1.0 of msgraph at this time.
|
||||||
// TODO: Verify Parsable interface supported with modified-Pageable
|
// TODO: Verify Parsable interface supported with modified-Pageable
|
||||||
@ -318,8 +308,8 @@ func (sc *Collection) retrievePages(
|
|||||||
size := int64(len(byteArray))
|
size := int64(len(byteArray))
|
||||||
|
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
metrics.totalBytes += size
|
metrics.Bytes += size
|
||||||
metrics.success++
|
metrics.Successes++
|
||||||
sc.data <- &Item{
|
sc.data <- &Item{
|
||||||
id: *pg.GetId(),
|
id: *pg.GetId(),
|
||||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||||
|
|||||||
@ -98,7 +98,7 @@ func RestoreCollections(
|
|||||||
return nil, clues.Wrap(clues.New(category.String()), "category not supported")
|
return nil, clues.Wrap(clues.New(category.String()), "category not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
restoreMetrics.Combine(metrics)
|
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
@ -110,7 +110,6 @@ func RestoreCollections(
|
|||||||
support.Restore,
|
support.Restore,
|
||||||
len(dcs),
|
len(dcs),
|
||||||
restoreMetrics,
|
restoreMetrics,
|
||||||
err,
|
|
||||||
dest.ContainerName)
|
dest.ContainerName)
|
||||||
|
|
||||||
return status, err
|
return status, err
|
||||||
@ -256,7 +255,7 @@ func RestoreListCollection(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.TotalBytes += itemInfo.SharePoint.Size
|
metrics.Bytes += itemInfo.SharePoint.Size
|
||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -339,7 +338,7 @@ func RestorePageCollection(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.TotalBytes += itemInfo.SharePoint.Size
|
metrics.Bytes += itemInfo.SharePoint.Size
|
||||||
|
|
||||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/dustin/go-humanize"
|
"github.com/dustin/go-humanize"
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConnectorOperationStatus is a data type used to describe the state of
|
// ConnectorOperationStatus is a data type used to describe the state of
|
||||||
@ -15,27 +14,23 @@ import (
|
|||||||
// @param incomplete: Bool representation of whether all intended items were download or uploaded.
|
// @param incomplete: Bool representation of whether all intended items were download or uploaded.
|
||||||
// @param bytes: represents the total number of bytes that have been downloaded or uploaded.
|
// @param bytes: represents the total number of bytes that have been downloaded or uploaded.
|
||||||
type ConnectorOperationStatus struct {
|
type ConnectorOperationStatus struct {
|
||||||
lastOperation Operation
|
Folders int
|
||||||
ObjectCount int
|
Metrics CollectionMetrics
|
||||||
FolderCount int
|
details string
|
||||||
Successful int
|
op Operation
|
||||||
ErrorCount int
|
|
||||||
Err error
|
|
||||||
incomplete bool
|
|
||||||
incompleteReason string
|
|
||||||
additionalDetails string
|
|
||||||
bytes int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type CollectionMetrics struct {
|
type CollectionMetrics struct {
|
||||||
Objects, Successes int
|
Objects, Successes int
|
||||||
TotalBytes int64
|
Bytes int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *CollectionMetrics) Combine(additional CollectionMetrics) {
|
func CombineMetrics(a, b CollectionMetrics) CollectionMetrics {
|
||||||
cm.Objects += additional.Objects
|
return CollectionMetrics{
|
||||||
cm.Successes += additional.Successes
|
Objects: a.Objects + b.Objects,
|
||||||
cm.TotalBytes += additional.TotalBytes
|
Successes: a.Successes + b.Successes,
|
||||||
|
Bytes: a.Bytes + b.Bytes,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Operation int
|
type Operation int
|
||||||
@ -53,30 +48,13 @@ func CreateStatus(
|
|||||||
op Operation,
|
op Operation,
|
||||||
folders int,
|
folders int,
|
||||||
cm CollectionMetrics,
|
cm CollectionMetrics,
|
||||||
err error,
|
|
||||||
details string,
|
details string,
|
||||||
) *ConnectorOperationStatus {
|
) *ConnectorOperationStatus {
|
||||||
var reason string
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
reason = err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
hasErrors := err != nil
|
|
||||||
// TODO(keeprs): remove
|
|
||||||
numErr := GetNumberOfErrors(err)
|
|
||||||
|
|
||||||
status := ConnectorOperationStatus{
|
status := ConnectorOperationStatus{
|
||||||
lastOperation: op,
|
Folders: folders,
|
||||||
ObjectCount: cm.Objects,
|
Metrics: cm,
|
||||||
FolderCount: folders,
|
details: details,
|
||||||
Successful: cm.Successes,
|
op: op,
|
||||||
ErrorCount: numErr,
|
|
||||||
Err: err,
|
|
||||||
incomplete: hasErrors,
|
|
||||||
incompleteReason: reason,
|
|
||||||
bytes: cm.TotalBytes,
|
|
||||||
additionalDetails: details,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &status
|
return &status
|
||||||
@ -89,32 +67,19 @@ type StatusUpdater func(*ConnectorOperationStatus)
|
|||||||
|
|
||||||
// MergeStatus combines ConnectorOperationsStatus value into a single status
|
// MergeStatus combines ConnectorOperationsStatus value into a single status
|
||||||
func MergeStatus(one, two ConnectorOperationStatus) ConnectorOperationStatus {
|
func MergeStatus(one, two ConnectorOperationStatus) ConnectorOperationStatus {
|
||||||
var hasErrors bool
|
if one.op == OpUnknown {
|
||||||
|
|
||||||
if one.lastOperation == OpUnknown {
|
|
||||||
return two
|
return two
|
||||||
}
|
}
|
||||||
|
|
||||||
if two.lastOperation == OpUnknown {
|
if two.op == OpUnknown {
|
||||||
return one
|
return one
|
||||||
}
|
}
|
||||||
|
|
||||||
if one.incomplete || two.incomplete {
|
|
||||||
hasErrors = true
|
|
||||||
}
|
|
||||||
|
|
||||||
status := ConnectorOperationStatus{
|
status := ConnectorOperationStatus{
|
||||||
lastOperation: one.lastOperation,
|
Folders: one.Folders + two.Folders,
|
||||||
ObjectCount: one.ObjectCount + two.ObjectCount,
|
Metrics: CombineMetrics(one.Metrics, two.Metrics),
|
||||||
FolderCount: one.FolderCount + two.FolderCount,
|
details: one.details + ", " + two.details,
|
||||||
Successful: one.Successful + two.Successful,
|
op: one.op,
|
||||||
// TODO: remove in favor of fault.Errors
|
|
||||||
ErrorCount: one.ErrorCount + two.ErrorCount,
|
|
||||||
Err: multierror.Append(one.Err, two.Err).ErrorOrNil(),
|
|
||||||
bytes: one.bytes + two.bytes,
|
|
||||||
incomplete: hasErrors,
|
|
||||||
incompleteReason: one.incompleteReason + ", " + two.incompleteReason,
|
|
||||||
additionalDetails: one.additionalDetails + ", " + two.additionalDetails,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return status
|
return status
|
||||||
@ -123,23 +88,19 @@ func MergeStatus(one, two ConnectorOperationStatus) ConnectorOperationStatus {
|
|||||||
func (cos *ConnectorOperationStatus) String() string {
|
func (cos *ConnectorOperationStatus) String() string {
|
||||||
var operationStatement string
|
var operationStatement string
|
||||||
|
|
||||||
switch cos.lastOperation {
|
switch cos.op {
|
||||||
case Backup:
|
case Backup:
|
||||||
operationStatement = "Downloaded from "
|
operationStatement = "Downloaded from "
|
||||||
case Restore:
|
case Restore:
|
||||||
operationStatement = "Restored content to "
|
operationStatement = "Restored content to "
|
||||||
}
|
}
|
||||||
|
|
||||||
message := fmt.Sprintf("Action: %s performed on %d of %d objects (%s) within %d directories.",
|
return fmt.Sprintf("Action: %s performed on %d of %d objects (%s) within %d directories. %s %s",
|
||||||
cos.lastOperation.String(),
|
cos.op.String(),
|
||||||
cos.Successful,
|
cos.Metrics.Successes,
|
||||||
cos.ObjectCount,
|
cos.Metrics.Objects,
|
||||||
humanize.Bytes(uint64(cos.bytes)),
|
humanize.Bytes(uint64(cos.Metrics.Bytes)),
|
||||||
cos.FolderCount)
|
cos.Folders,
|
||||||
|
operationStatement,
|
||||||
if cos.incomplete {
|
cos.details)
|
||||||
message += " " + cos.incompleteReason
|
|
||||||
}
|
|
||||||
|
|
||||||
return message + " " + operationStatement + cos.additionalDetails
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,53 +1,46 @@
|
|||||||
package support
|
package support
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GCStatusTestSuite struct {
|
type StatusUnitSuite struct {
|
||||||
tester.Suite
|
tester.Suite
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGraphConnectorStatus(t *testing.T) {
|
func TestGraphConnectorStatus(t *testing.T) {
|
||||||
suite.Run(t, &GCStatusTestSuite{Suite: tester.NewUnitSuite(t)})
|
suite.Run(t, &StatusUnitSuite{tester.NewUnitSuite(t)})
|
||||||
}
|
}
|
||||||
|
|
||||||
// operationType, objects, success, folders, errCount int, errStatus string
|
func metricsMatch(t *testing.T, expect, result CollectionMetrics) {
|
||||||
|
assert.Equal(t, expect.Bytes, result.Bytes, "bytes")
|
||||||
type statusParams struct {
|
assert.Equal(t, expect.Objects, result.Objects, "objects")
|
||||||
operationType Operation
|
assert.Equal(t, expect.Successes, result.Successes, "successes")
|
||||||
objects int
|
|
||||||
success int
|
|
||||||
folders int
|
|
||||||
err error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *GCStatusTestSuite) TestCreateStatus() {
|
func (suite *StatusUnitSuite) TestCreateStatus() {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
params statusParams
|
op Operation
|
||||||
expect assert.BoolAssertionFunc
|
metrics CollectionMetrics
|
||||||
|
folders int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Test: Status Success",
|
name: "Backup",
|
||||||
params: statusParams{Backup, 12, 12, 3, nil},
|
op: Backup,
|
||||||
expect: assert.False,
|
metrics: CollectionMetrics{12, 12, 3},
|
||||||
|
folders: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test: Status Failed",
|
name: "Backup",
|
||||||
params: statusParams{
|
op: Restore,
|
||||||
Restore,
|
metrics: CollectionMetrics{12, 9, 8},
|
||||||
12, 9, 8,
|
folders: 2,
|
||||||
WrapAndAppend("tres", errors.New("three"), WrapAndAppend("arc376", errors.New("one"), errors.New("two"))),
|
|
||||||
},
|
|
||||||
expect: assert.True,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
@ -59,89 +52,60 @@ func (suite *GCStatusTestSuite) TestCreateStatus() {
|
|||||||
|
|
||||||
result := CreateStatus(
|
result := CreateStatus(
|
||||||
ctx,
|
ctx,
|
||||||
test.params.operationType,
|
test.op,
|
||||||
test.params.folders,
|
test.folders,
|
||||||
CollectionMetrics{test.params.objects, test.params.success, 0},
|
test.metrics,
|
||||||
test.params.err,
|
"details")
|
||||||
"",
|
assert.Equal(t, test.op, result.op, "operation")
|
||||||
)
|
assert.Equal(t, test.folders, result.Folders, "folders")
|
||||||
test.expect(t, result.incomplete, "status is incomplete")
|
metricsMatch(t, test.metrics, result.Metrics)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *GCStatusTestSuite) TestCreateStatus_InvalidStatus() {
|
func (suite *StatusUnitSuite) TestMergeStatus() {
|
||||||
t := suite.T()
|
|
||||||
params := statusParams{Backup, 9, 3, 13, errors.New("invalidcl")}
|
|
||||||
|
|
||||||
require.NotPanics(t, func() {
|
|
||||||
ctx, flush := tester.NewContext()
|
|
||||||
defer flush()
|
|
||||||
|
|
||||||
CreateStatus(
|
|
||||||
ctx,
|
|
||||||
params.operationType,
|
|
||||||
params.folders,
|
|
||||||
CollectionMetrics{
|
|
||||||
params.objects,
|
|
||||||
params.success,
|
|
||||||
0,
|
|
||||||
},
|
|
||||||
params.err,
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *GCStatusTestSuite) TestMergeStatus() {
|
|
||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
one ConnectorOperationStatus
|
one ConnectorOperationStatus
|
||||||
two ConnectorOperationStatus
|
two ConnectorOperationStatus
|
||||||
expected statusParams
|
expectOp Operation
|
||||||
isIncomplete assert.BoolAssertionFunc
|
expectMetrics CollectionMetrics
|
||||||
|
expectFolders int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Test: Status + unknown",
|
name: "Test: Status + unknown",
|
||||||
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, nil, ""),
|
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, ""),
|
||||||
two: ConnectorOperationStatus{},
|
two: ConnectorOperationStatus{},
|
||||||
expected: statusParams{Backup, 1, 1, 1, nil},
|
expectOp: Backup,
|
||||||
isIncomplete: assert.False,
|
expectMetrics: CollectionMetrics{1, 1, 0},
|
||||||
|
expectFolders: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test: unknown + Status",
|
name: "Test: unknown + Status",
|
||||||
one: ConnectorOperationStatus{},
|
one: ConnectorOperationStatus{},
|
||||||
two: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, nil, ""),
|
two: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, ""),
|
||||||
expected: statusParams{Backup, 1, 1, 1, nil},
|
expectOp: Backup,
|
||||||
isIncomplete: assert.False,
|
expectMetrics: CollectionMetrics{1, 1, 0},
|
||||||
|
expectFolders: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test: Successful + Successful",
|
name: "Test: Successful + Successful",
|
||||||
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, nil, ""),
|
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, ""),
|
||||||
two: *CreateStatus(ctx, Backup, 3, CollectionMetrics{3, 3, 0}, nil, ""),
|
two: *CreateStatus(ctx, Backup, 3, CollectionMetrics{3, 3, 0}, ""),
|
||||||
expected: statusParams{Backup, 4, 4, 4, nil},
|
expectOp: Backup,
|
||||||
isIncomplete: assert.False,
|
expectMetrics: CollectionMetrics{4, 4, 0},
|
||||||
|
expectFolders: 4,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Test: Successful + Unsuccessful",
|
name: "Test: Successful + Unsuccessful",
|
||||||
one: *CreateStatus(ctx, Backup, 13, CollectionMetrics{17, 17, 0}, nil, ""),
|
one: *CreateStatus(ctx, Backup, 13, CollectionMetrics{17, 17, 0}, ""),
|
||||||
two: *CreateStatus(
|
two: *CreateStatus(ctx, Backup, 8, CollectionMetrics{12, 9, 0}, ""),
|
||||||
ctx,
|
expectOp: Backup,
|
||||||
Backup,
|
expectMetrics: CollectionMetrics{29, 26, 0},
|
||||||
8,
|
expectFolders: 21,
|
||||||
CollectionMetrics{
|
|
||||||
12,
|
|
||||||
9,
|
|
||||||
0,
|
|
||||||
},
|
|
||||||
WrapAndAppend("tres", errors.New("three"), WrapAndAppend("arc376", errors.New("one"), errors.New("two"))),
|
|
||||||
"",
|
|
||||||
),
|
|
||||||
expected: statusParams{Backup, 29, 26, 21, nil},
|
|
||||||
isIncomplete: assert.True,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,12 +113,10 @@ func (suite *GCStatusTestSuite) TestMergeStatus() {
|
|||||||
suite.Run(test.name, func() {
|
suite.Run(test.name, func() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
|
||||||
returned := MergeStatus(test.one, test.two)
|
result := MergeStatus(test.one, test.two)
|
||||||
assert.Equal(t, returned.FolderCount, test.expected.folders)
|
assert.Equal(t, test.expectFolders, result.Folders, "folders")
|
||||||
assert.Equal(t, returned.ObjectCount, test.expected.objects)
|
assert.Equal(t, test.expectOp, result.op, "operation")
|
||||||
assert.Equal(t, returned.lastOperation, test.expected.operationType)
|
metricsMatch(t, test.expectMetrics, result.Metrics)
|
||||||
assert.Equal(t, returned.Successful, test.expected.success)
|
|
||||||
test.isIncomplete(t, returned.incomplete)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -290,10 +290,6 @@ func (op *BackupOperation) do(
|
|||||||
}
|
}
|
||||||
|
|
||||||
opStats.gc = gc.AwaitStatus()
|
opStats.gc = gc.AwaitStatus()
|
||||||
// TODO(keepers): remove when fault.Errors handles all iterable error aggregation.
|
|
||||||
if opStats.gc.ErrorCount > 0 {
|
|
||||||
return nil, opStats.gc.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
||||||
|
|
||||||
@ -657,11 +653,11 @@ func (op *BackupOperation) persistResults(
|
|||||||
return errors.New("backup population never completed")
|
return errors.New("backup population never completed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if opStats.gc.Successful == 0 {
|
if opStats.gc.Metrics.Successes == 0 {
|
||||||
op.Status = NoData
|
op.Status = NoData
|
||||||
}
|
}
|
||||||
|
|
||||||
op.Results.ItemsRead = opStats.gc.Successful
|
op.Results.ItemsRead = opStats.gc.Metrics.Successes
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -391,7 +391,7 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
|||||||
TotalUploadedBytes: 1,
|
TotalUploadedBytes: 1,
|
||||||
},
|
},
|
||||||
gc: &support.ConnectorOperationStatus{
|
gc: &support.ConnectorOperationStatus{
|
||||||
Successful: 1,
|
Metrics: support.CollectionMetrics{Successes: 1},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -431,7 +431,7 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
|||||||
test.expectErr(t, op.persistResults(now, &test.stats))
|
test.expectErr(t, op.persistResults(now, &test.stats))
|
||||||
|
|
||||||
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
||||||
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsRead, "items read")
|
assert.Equal(t, test.stats.gc.Metrics.Successes, op.Results.ItemsRead, "items read")
|
||||||
assert.Equal(t, test.stats.k.TotalFileCount, op.Results.ItemsWritten, "items written")
|
assert.Equal(t, test.stats.k.TotalFileCount, op.Results.ItemsWritten, "items written")
|
||||||
assert.Equal(t, test.stats.k.TotalHashedBytes, op.Results.BytesRead, "bytes read")
|
assert.Equal(t, test.stats.k.TotalHashedBytes, op.Results.BytesRead, "bytes read")
|
||||||
assert.Equal(t, test.stats.k.TotalUploadedBytes, op.Results.BytesUploaded, "bytes written")
|
assert.Equal(t, test.stats.k.TotalUploadedBytes, op.Results.BytesUploaded, "bytes written")
|
||||||
|
|||||||
@ -263,10 +263,6 @@ func (op *RestoreOperation) do(
|
|||||||
restoreComplete <- struct{}{}
|
restoreComplete <- struct{}{}
|
||||||
|
|
||||||
opStats.gc = gc.AwaitStatus()
|
opStats.gc = gc.AwaitStatus()
|
||||||
// TODO(keepers): remove when fault.Errors handles all iterable error aggregation.
|
|
||||||
if opStats.gc.ErrorCount > 0 {
|
|
||||||
return nil, opStats.gc.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
||||||
|
|
||||||
@ -305,11 +301,11 @@ func (op *RestoreOperation) persistResults(
|
|||||||
return errors.New("restoration never completed")
|
return errors.New("restoration never completed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if opStats.gc.Successful == 0 {
|
if opStats.gc.Metrics.Successes == 0 {
|
||||||
op.Status = NoData
|
op.Status = NoData
|
||||||
}
|
}
|
||||||
|
|
||||||
op.Results.ItemsWritten = opStats.gc.Successful
|
op.Results.ItemsWritten = opStats.gc.Metrics.Successes
|
||||||
|
|
||||||
dur := op.Results.CompletedAt.Sub(op.Results.StartedAt)
|
dur := op.Results.CompletedAt.Sub(op.Results.StartedAt)
|
||||||
|
|
||||||
|
|||||||
@ -68,8 +68,10 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
gc: &support.ConnectorOperationStatus{
|
gc: &support.ConnectorOperationStatus{
|
||||||
ObjectCount: 1,
|
Metrics: support.CollectionMetrics{
|
||||||
Successful: 1,
|
Objects: 1,
|
||||||
|
Successes: 1,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -111,7 +113,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
|||||||
|
|
||||||
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
||||||
assert.Equal(t, len(test.stats.cs), op.Results.ItemsRead, "items read")
|
assert.Equal(t, len(test.stats.cs), op.Results.ItemsRead, "items read")
|
||||||
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsWritten, "items written")
|
assert.Equal(t, test.stats.gc.Metrics.Successes, op.Results.ItemsWritten, "items written")
|
||||||
assert.Equal(t, test.stats.bytesRead.NumBytes, op.Results.BytesRead, "resource owners")
|
assert.Equal(t, test.stats.bytesRead.NumBytes, op.Results.BytesRead, "resource owners")
|
||||||
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
||||||
assert.Equal(t, test.stats.readErr, op.Results.ReadErrors, "read errors")
|
assert.Equal(t, test.stats.readErr, op.Results.ReadErrors, "read errors")
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user