remove errors from connector status (#2586)
## Description Now that fault is in place, we can remove the error tracking functionality of graph status, and let that focus purely on metrics. ## Does this PR need a docs update or release note? - [x] ⛔ No ## Type of change - [x] 🧹 Tech Debt/Cleanup ## Issue(s) * #1970 ## Test Plan - [x] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
ad75540b03
commit
29c6ac4874
@ -130,7 +130,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestExchangeDataCollection
|
||||
}
|
||||
|
||||
status := connector.AwaitStatus()
|
||||
assert.NotZero(t, status.Successful)
|
||||
assert.NotZero(t, status.Metrics.Successes)
|
||||
t.Log(status.String())
|
||||
})
|
||||
}
|
||||
@ -286,7 +286,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
||||
}
|
||||
|
||||
status := connector.AwaitStatus()
|
||||
assert.NotZero(t, status.Successful)
|
||||
assert.NotZero(t, status.Metrics.Successes)
|
||||
t.Log(status.String())
|
||||
})
|
||||
}
|
||||
|
||||
@ -202,7 +202,6 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
func newStatusUpdater(t *testing.T, wg *sync.WaitGroup) func(status *support.ConnectorOperationStatus) {
|
||||
updater := func(status *support.ConnectorOperationStatus) {
|
||||
defer wg.Done()
|
||||
assert.Zero(t, status.ErrorCount)
|
||||
}
|
||||
|
||||
return updater
|
||||
|
||||
@ -316,11 +316,10 @@ func (col *Collection) finishPopulation(
|
||||
support.Backup,
|
||||
1,
|
||||
support.CollectionMetrics{
|
||||
Objects: attempted,
|
||||
Successes: success,
|
||||
TotalBytes: totalBytes,
|
||||
Objects: attempted,
|
||||
Successes: success,
|
||||
Bytes: totalBytes,
|
||||
},
|
||||
err,
|
||||
col.fullPath.Folder(false))
|
||||
|
||||
logger.Ctx(ctx).Debugw("done streaming items", "status", status.String())
|
||||
|
||||
@ -343,7 +343,7 @@ func RestoreExchangeDataCollections(
|
||||
|
||||
temp, canceled := restoreCollection(ctx, gs, dc, containerID, policy, deets, errs)
|
||||
|
||||
metrics.Combine(temp)
|
||||
metrics = support.CombineMetrics(metrics, temp)
|
||||
|
||||
if canceled {
|
||||
break
|
||||
@ -355,7 +355,6 @@ func RestoreExchangeDataCollections(
|
||||
support.Restore,
|
||||
len(dcs),
|
||||
metrics,
|
||||
el.Failure(),
|
||||
dest.ContainerName)
|
||||
|
||||
return status, el.Failure()
|
||||
@ -436,7 +435,7 @@ func restoreCollection(
|
||||
continue
|
||||
}
|
||||
|
||||
metrics.TotalBytes += int64(len(byteArray))
|
||||
metrics.Bytes += int64(len(byteArray))
|
||||
metrics.Successes++
|
||||
|
||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||
|
||||
@ -150,11 +150,10 @@ func (md MetadataCollection) Items(
|
||||
support.Backup,
|
||||
1,
|
||||
support.CollectionMetrics{
|
||||
Objects: len(md.items),
|
||||
Successes: len(md.items),
|
||||
TotalBytes: totalBytes,
|
||||
Objects: len(md.items),
|
||||
Successes: len(md.items),
|
||||
Bytes: totalBytes,
|
||||
},
|
||||
nil,
|
||||
md.fullPath.Folder(false),
|
||||
)
|
||||
|
||||
|
||||
@ -84,8 +84,8 @@ func (suite *MetadataCollectionUnitSuite) TestItems() {
|
||||
p,
|
||||
items,
|
||||
func(c *support.ConnectorOperationStatus) {
|
||||
assert.Equal(t, len(itemNames), c.ObjectCount)
|
||||
assert.Equal(t, len(itemNames), c.Successful)
|
||||
assert.Equal(t, len(itemNames), c.Metrics.Objects)
|
||||
assert.Equal(t, len(itemNames), c.Metrics.Successes)
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -92,17 +91,11 @@ func statusTestTask(gc *GraphConnector, objects, success, folder int) {
|
||||
ctx,
|
||||
support.Restore, folder,
|
||||
support.CollectionMetrics{
|
||||
Objects: objects,
|
||||
Successes: success,
|
||||
TotalBytes: 0,
|
||||
Objects: objects,
|
||||
Successes: success,
|
||||
Bytes: 0,
|
||||
},
|
||||
support.WrapAndAppend(
|
||||
"tres",
|
||||
errors.New("three"),
|
||||
support.WrapAndAppend("arc376", errors.New("one"), errors.New("two")),
|
||||
),
|
||||
"statusTestTask",
|
||||
)
|
||||
"statusTestTask")
|
||||
gc.UpdateStatus(status)
|
||||
}
|
||||
|
||||
@ -123,11 +116,11 @@ func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_Status() {
|
||||
|
||||
assert.NotEmpty(t, gc.PrintableStatus())
|
||||
// Expect 8 objects
|
||||
assert.Equal(t, 8, gc.Status().ObjectCount)
|
||||
assert.Equal(t, 8, gc.Status().Metrics.Objects)
|
||||
// Expect 2 success
|
||||
assert.Equal(t, 2, gc.Status().Successful)
|
||||
assert.Equal(t, 2, gc.Status().Metrics.Successes)
|
||||
// Expect 2 folders
|
||||
assert.Equal(t, 2, gc.Status().FolderCount)
|
||||
assert.Equal(t, 2, gc.Status().Folders)
|
||||
}
|
||||
|
||||
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs_allServices() {
|
||||
|
||||
@ -252,9 +252,9 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() {
|
||||
assert.NotNil(t, deets)
|
||||
|
||||
status := suite.connector.AwaitStatus()
|
||||
assert.Equal(t, 0, status.ObjectCount)
|
||||
assert.Equal(t, 0, status.FolderCount)
|
||||
assert.Equal(t, 0, status.Successful)
|
||||
assert.Equal(t, 0, status.Metrics.Objects)
|
||||
assert.Equal(t, 0, status.Folders)
|
||||
assert.Equal(t, 0, status.Metrics.Successes)
|
||||
}
|
||||
|
||||
func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||
@ -331,9 +331,9 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||
assert.NotNil(t, deets)
|
||||
|
||||
stats := suite.connector.AwaitStatus()
|
||||
assert.Zero(t, stats.ObjectCount)
|
||||
assert.Zero(t, stats.FolderCount)
|
||||
assert.Zero(t, stats.Successful)
|
||||
assert.Zero(t, stats.Metrics.Objects)
|
||||
assert.Zero(t, stats.Folders)
|
||||
assert.Zero(t, stats.Metrics.Successes)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -435,10 +435,8 @@ func runRestore(
|
||||
status := restoreGC.AwaitStatus()
|
||||
runTime := time.Since(start)
|
||||
|
||||
assert.NoError(t, status.Err, "restored status.Err")
|
||||
assert.Zero(t, status.ErrorCount, "restored status.ErrorCount")
|
||||
assert.Equal(t, numRestoreItems, status.ObjectCount, "restored status.ObjectCount")
|
||||
assert.Equal(t, numRestoreItems, status.Successful, "restored status.Successful")
|
||||
assert.Equal(t, numRestoreItems, status.Metrics.Objects, "restored status.Metrics.Objects")
|
||||
assert.Equal(t, numRestoreItems, status.Metrics.Successes, "restored status.Metrics.Successes")
|
||||
assert.Len(
|
||||
t,
|
||||
deets.Entries,
|
||||
@ -504,12 +502,10 @@ func runBackupAndCompare(
|
||||
|
||||
status := backupGC.AwaitStatus()
|
||||
|
||||
assert.NoError(t, status.Err, "backup status.Err")
|
||||
assert.Zero(t, status.ErrorCount, "backup status.ErrorCount")
|
||||
assert.Equalf(t, totalItems+skipped, status.ObjectCount,
|
||||
"backup status.ObjectCount; wanted %d items + %d skipped", totalItems, skipped)
|
||||
assert.Equalf(t, totalItems+skipped, status.Successful,
|
||||
"backup status.Successful; wanted %d items + %d skipped", totalItems, skipped)
|
||||
assert.Equalf(t, totalItems+skipped, status.Metrics.Objects,
|
||||
"backup status.Metrics.Objects; wanted %d items + %d skipped", totalItems, skipped)
|
||||
assert.Equalf(t, totalItems+skipped, status.Metrics.Successes,
|
||||
"backup status.Metrics.Successes; wanted %d items + %d skipped", totalItems, skipped)
|
||||
}
|
||||
|
||||
func runRestoreBackupTest(
|
||||
@ -975,8 +971,8 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
||||
|
||||
status := restoreGC.AwaitStatus()
|
||||
// Always just 1 because it's just 1 collection.
|
||||
assert.Equal(t, totalItems, status.ObjectCount, "status.ObjectCount")
|
||||
assert.Equal(t, totalItems, status.Successful, "status.Successful")
|
||||
assert.Equal(t, totalItems, status.Metrics.Objects, "status.Metrics.Objects")
|
||||
assert.Equal(t, totalItems, status.Metrics.Successes, "status.Metrics.Successes")
|
||||
assert.Equal(
|
||||
t, totalItems, len(deets.Entries),
|
||||
"details entries contains same item count as total successful items restored")
|
||||
@ -1018,8 +1014,8 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
||||
true)
|
||||
|
||||
status := backupGC.AwaitStatus()
|
||||
assert.Equal(t, allItems+skipped, status.ObjectCount, "status.ObjectCount")
|
||||
assert.Equal(t, allItems+skipped, status.Successful, "status.Successful")
|
||||
assert.Equal(t, allItems+skipped, status.Metrics.Objects, "status.Metrics.Objects")
|
||||
assert.Equal(t, allItems+skipped, status.Metrics.Successes, "status.Metrics.Successes")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -256,7 +256,7 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
|
||||
// `details.OneDriveInfo`
|
||||
parentPathString, err := path.GetDriveFolderPath(oc.folderPath)
|
||||
if err != nil {
|
||||
oc.reportAsCompleted(ctx, 0, 0, 0, clues.Wrap(err, "getting drive path").WithClues(ctx))
|
||||
oc.reportAsCompleted(ctx, 0, 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
@ -453,22 +453,22 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
|
||||
|
||||
wg.Wait()
|
||||
|
||||
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, el.Failure())
|
||||
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount)
|
||||
}
|
||||
|
||||
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, err error) {
|
||||
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64) {
|
||||
close(oc.data)
|
||||
|
||||
status := support.CreateStatus(ctx, support.Backup,
|
||||
1, // num folders (always 1)
|
||||
support.CollectionMetrics{
|
||||
Objects: itemsFound, // items to read,
|
||||
Successes: itemsRead, // items read successfully,
|
||||
TotalBytes: byteCount, // Number of bytes read in the operation,
|
||||
Objects: itemsFound,
|
||||
Successes: itemsRead,
|
||||
Bytes: byteCount,
|
||||
},
|
||||
err,
|
||||
oc.folderPath.Folder(false), // Additional details
|
||||
)
|
||||
oc.folderPath.Folder(false))
|
||||
|
||||
logger.Ctx(ctx).Debugw("done streaming items", "status", status.String())
|
||||
|
||||
oc.statusUpdater(status)
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ func (suite *CollectionUnitTestSuite) testStatusUpdater(
|
||||
statusToUpdate *support.ConnectorOperationStatus,
|
||||
) support.StatusUpdater {
|
||||
return func(s *support.ConnectorOperationStatus) {
|
||||
suite.T().Logf("Update status %v, count %d, success %d", s, s.ObjectCount, s.Successful)
|
||||
suite.T().Logf("Update status %v, count %d, success %d", s, s.Metrics.Objects, s.Metrics.Successes)
|
||||
*statusToUpdate = *s
|
||||
|
||||
wg.Done()
|
||||
@ -224,8 +224,8 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
||||
}
|
||||
|
||||
// Expect only 1 item
|
||||
require.Equal(t, 1, collStatus.ObjectCount)
|
||||
require.Equal(t, 1, collStatus.Successful)
|
||||
require.Equal(t, 1, collStatus.Metrics.Objects)
|
||||
require.Equal(t, 1, collStatus.Metrics.Successes)
|
||||
|
||||
// Validate item info and data
|
||||
readItem := readItems[0]
|
||||
@ -348,8 +348,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
||||
wg.Wait()
|
||||
|
||||
// Expect no items
|
||||
require.Equal(t, 1, collStatus.ObjectCount, "only one object should be counted")
|
||||
require.Equal(t, 1, collStatus.Successful, "TODO: should be 0, but allowing 1 to reduce async management")
|
||||
require.Equal(t, 1, collStatus.Metrics.Objects, "only one object should be counted")
|
||||
require.Equal(t, 1, collStatus.Metrics.Successes, "TODO: should be 0, but allowing 1 to reduce async management")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -432,8 +432,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionPermissionBackupLatestModTim
|
||||
wg.Wait()
|
||||
|
||||
// Expect no items
|
||||
require.Equal(t, 1, collStatus.ObjectCount)
|
||||
require.Equal(t, 1, collStatus.Successful)
|
||||
require.Equal(t, 1, collStatus.Metrics.Objects)
|
||||
require.Equal(t, 1, collStatus.Metrics.Successes)
|
||||
|
||||
for _, i := range readItems {
|
||||
if strings.HasSuffix(i.UUID(), MetaFileSuffix) {
|
||||
|
||||
@ -102,7 +102,7 @@ func RestoreCollections(
|
||||
parentPermissions[k] = v
|
||||
}
|
||||
|
||||
restoreMetrics.Combine(metrics)
|
||||
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
|
||||
|
||||
if errors.Is(err, context.Canceled) {
|
||||
break
|
||||
@ -114,7 +114,6 @@ func RestoreCollections(
|
||||
support.Restore,
|
||||
len(dcs),
|
||||
restoreMetrics,
|
||||
el.Failure(),
|
||||
dest.ContainerName)
|
||||
|
||||
return status, el.Failure()
|
||||
@ -224,7 +223,7 @@ func RestoreCollection(
|
||||
|
||||
if strings.HasSuffix(name, DataFileSuffix) {
|
||||
metrics.Objects++
|
||||
metrics.TotalBytes += int64(len(copyBuffer))
|
||||
metrics.Bytes += int64(len(copyBuffer))
|
||||
|
||||
var (
|
||||
itemInfo details.ItemInfo
|
||||
@ -302,7 +301,7 @@ func RestoreCollection(
|
||||
}
|
||||
} else {
|
||||
metrics.Objects++
|
||||
metrics.TotalBytes += int64(len(copyBuffer))
|
||||
metrics.Bytes += int64(len(copyBuffer))
|
||||
|
||||
// No permissions stored at the moment for SharePoint
|
||||
_, itemInfo, err = restoreData(
|
||||
|
||||
@ -43,12 +43,6 @@ var (
|
||||
_ data.StreamModTime = &Item{}
|
||||
)
|
||||
|
||||
type numMetrics struct {
|
||||
attempts int
|
||||
success int
|
||||
totalBytes int64
|
||||
}
|
||||
|
||||
// Collection is the SharePoint.List implementation of data.Collection. SharePoint.Libraries collections are supported
|
||||
// by the oneDrive.Collection as the calls are identical for populating the Collection
|
||||
type Collection struct {
|
||||
@ -157,24 +151,17 @@ func (sd *Item) ModTime() time.Time {
|
||||
|
||||
func (sc *Collection) finishPopulation(
|
||||
ctx context.Context,
|
||||
attempts, success int,
|
||||
totalBytes int64,
|
||||
err error,
|
||||
metrics support.CollectionMetrics,
|
||||
) {
|
||||
close(sc.data)
|
||||
|
||||
attempted := attempts
|
||||
status := support.CreateStatus(
|
||||
ctx,
|
||||
support.Backup,
|
||||
1, // 1 folder
|
||||
support.CollectionMetrics{
|
||||
Objects: attempted,
|
||||
Successes: success,
|
||||
TotalBytes: totalBytes,
|
||||
},
|
||||
err,
|
||||
metrics,
|
||||
sc.fullPath.Folder(false))
|
||||
|
||||
logger.Ctx(ctx).Debug(status.String())
|
||||
|
||||
if sc.statusUpdater != nil {
|
||||
@ -184,15 +171,16 @@ func (sc *Collection) finishPopulation(
|
||||
|
||||
// populate utility function to retrieve data from back store for a given collection
|
||||
func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
|
||||
var (
|
||||
metrics numMetrics
|
||||
writer = kw.NewJsonSerializationWriter()
|
||||
err error
|
||||
)
|
||||
metrics, _ := sc.runPopulate(ctx, errs)
|
||||
sc.finishPopulation(ctx, metrics)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
sc.finishPopulation(ctx, metrics.attempts, metrics.success, int64(metrics.totalBytes), err)
|
||||
}()
|
||||
func (sc *Collection) runPopulate(ctx context.Context, errs *fault.Bus) (support.CollectionMetrics, error) {
|
||||
var (
|
||||
err error
|
||||
metrics support.CollectionMetrics
|
||||
writer = kw.NewJsonSerializationWriter()
|
||||
)
|
||||
|
||||
// TODO: Insert correct ID for CollectionProgress
|
||||
colProgress, closer := observe.CollectionProgress(
|
||||
@ -213,6 +201,8 @@ func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
|
||||
case Pages:
|
||||
metrics, err = sc.retrievePages(ctx, writer, colProgress, errs)
|
||||
}
|
||||
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
// retrieveLists utility function for collection that downloads and serializes
|
||||
@ -222,9 +212,9 @@ func (sc *Collection) retrieveLists(
|
||||
wtr *kw.JsonSerializationWriter,
|
||||
progress chan<- struct{},
|
||||
errs *fault.Bus,
|
||||
) (numMetrics, error) {
|
||||
) (support.CollectionMetrics, error) {
|
||||
var (
|
||||
metrics numMetrics
|
||||
metrics support.CollectionMetrics
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
@ -233,7 +223,7 @@ func (sc *Collection) retrieveLists(
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metrics.attempts += len(lists)
|
||||
metrics.Objects += len(lists)
|
||||
// For each models.Listable, object is serialized and the metrics are collected.
|
||||
// The progress is objected via the passed in channel.
|
||||
for _, lst := range lists {
|
||||
@ -255,9 +245,9 @@ func (sc *Collection) retrieveLists(
|
||||
t = *t1
|
||||
}
|
||||
|
||||
metrics.totalBytes += size
|
||||
metrics.Bytes += size
|
||||
|
||||
metrics.success++
|
||||
metrics.Successes++
|
||||
sc.data <- &Item{
|
||||
id: *lst.GetId(),
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
@ -277,9 +267,9 @@ func (sc *Collection) retrievePages(
|
||||
wtr *kw.JsonSerializationWriter,
|
||||
progress chan<- struct{},
|
||||
errs *fault.Bus,
|
||||
) (numMetrics, error) {
|
||||
) (support.CollectionMetrics, error) {
|
||||
var (
|
||||
metrics numMetrics
|
||||
metrics support.CollectionMetrics
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
@ -300,7 +290,7 @@ func (sc *Collection) retrievePages(
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
metrics.attempts = len(pages)
|
||||
metrics.Objects = len(pages)
|
||||
// For each models.Pageable, object is serialize and the metrics are collected and returned.
|
||||
// Pageable objects are not supported in v1.0 of msgraph at this time.
|
||||
// TODO: Verify Parsable interface supported with modified-Pageable
|
||||
@ -318,8 +308,8 @@ func (sc *Collection) retrievePages(
|
||||
size := int64(len(byteArray))
|
||||
|
||||
if size > 0 {
|
||||
metrics.totalBytes += size
|
||||
metrics.success++
|
||||
metrics.Bytes += size
|
||||
metrics.Successes++
|
||||
sc.data <- &Item{
|
||||
id: *pg.GetId(),
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
|
||||
@ -98,7 +98,7 @@ func RestoreCollections(
|
||||
return nil, clues.Wrap(clues.New(category.String()), "category not supported")
|
||||
}
|
||||
|
||||
restoreMetrics.Combine(metrics)
|
||||
restoreMetrics = support.CombineMetrics(restoreMetrics, metrics)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
@ -110,7 +110,6 @@ func RestoreCollections(
|
||||
support.Restore,
|
||||
len(dcs),
|
||||
restoreMetrics,
|
||||
err,
|
||||
dest.ContainerName)
|
||||
|
||||
return status, err
|
||||
@ -256,7 +255,7 @@ func RestoreListCollection(
|
||||
continue
|
||||
}
|
||||
|
||||
metrics.TotalBytes += itemInfo.SharePoint.Size
|
||||
metrics.Bytes += itemInfo.SharePoint.Size
|
||||
|
||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||
if err != nil {
|
||||
@ -339,7 +338,7 @@ func RestorePageCollection(
|
||||
continue
|
||||
}
|
||||
|
||||
metrics.TotalBytes += itemInfo.SharePoint.Size
|
||||
metrics.Bytes += itemInfo.SharePoint.Size
|
||||
|
||||
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
|
||||
if err != nil {
|
||||
|
||||
@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
// ConnectorOperationStatus is a data type used to describe the state of
|
||||
@ -15,27 +14,23 @@ import (
|
||||
// @param incomplete: Bool representation of whether all intended items were download or uploaded.
|
||||
// @param bytes: represents the total number of bytes that have been downloaded or uploaded.
|
||||
type ConnectorOperationStatus struct {
|
||||
lastOperation Operation
|
||||
ObjectCount int
|
||||
FolderCount int
|
||||
Successful int
|
||||
ErrorCount int
|
||||
Err error
|
||||
incomplete bool
|
||||
incompleteReason string
|
||||
additionalDetails string
|
||||
bytes int64
|
||||
Folders int
|
||||
Metrics CollectionMetrics
|
||||
details string
|
||||
op Operation
|
||||
}
|
||||
|
||||
type CollectionMetrics struct {
|
||||
Objects, Successes int
|
||||
TotalBytes int64
|
||||
Bytes int64
|
||||
}
|
||||
|
||||
func (cm *CollectionMetrics) Combine(additional CollectionMetrics) {
|
||||
cm.Objects += additional.Objects
|
||||
cm.Successes += additional.Successes
|
||||
cm.TotalBytes += additional.TotalBytes
|
||||
func CombineMetrics(a, b CollectionMetrics) CollectionMetrics {
|
||||
return CollectionMetrics{
|
||||
Objects: a.Objects + b.Objects,
|
||||
Successes: a.Successes + b.Successes,
|
||||
Bytes: a.Bytes + b.Bytes,
|
||||
}
|
||||
}
|
||||
|
||||
type Operation int
|
||||
@ -53,30 +48,13 @@ func CreateStatus(
|
||||
op Operation,
|
||||
folders int,
|
||||
cm CollectionMetrics,
|
||||
err error,
|
||||
details string,
|
||||
) *ConnectorOperationStatus {
|
||||
var reason string
|
||||
|
||||
if err != nil {
|
||||
reason = err.Error()
|
||||
}
|
||||
|
||||
hasErrors := err != nil
|
||||
// TODO(keeprs): remove
|
||||
numErr := GetNumberOfErrors(err)
|
||||
|
||||
status := ConnectorOperationStatus{
|
||||
lastOperation: op,
|
||||
ObjectCount: cm.Objects,
|
||||
FolderCount: folders,
|
||||
Successful: cm.Successes,
|
||||
ErrorCount: numErr,
|
||||
Err: err,
|
||||
incomplete: hasErrors,
|
||||
incompleteReason: reason,
|
||||
bytes: cm.TotalBytes,
|
||||
additionalDetails: details,
|
||||
Folders: folders,
|
||||
Metrics: cm,
|
||||
details: details,
|
||||
op: op,
|
||||
}
|
||||
|
||||
return &status
|
||||
@ -89,32 +67,19 @@ type StatusUpdater func(*ConnectorOperationStatus)
|
||||
|
||||
// MergeStatus combines ConnectorOperationsStatus value into a single status
|
||||
func MergeStatus(one, two ConnectorOperationStatus) ConnectorOperationStatus {
|
||||
var hasErrors bool
|
||||
|
||||
if one.lastOperation == OpUnknown {
|
||||
if one.op == OpUnknown {
|
||||
return two
|
||||
}
|
||||
|
||||
if two.lastOperation == OpUnknown {
|
||||
if two.op == OpUnknown {
|
||||
return one
|
||||
}
|
||||
|
||||
if one.incomplete || two.incomplete {
|
||||
hasErrors = true
|
||||
}
|
||||
|
||||
status := ConnectorOperationStatus{
|
||||
lastOperation: one.lastOperation,
|
||||
ObjectCount: one.ObjectCount + two.ObjectCount,
|
||||
FolderCount: one.FolderCount + two.FolderCount,
|
||||
Successful: one.Successful + two.Successful,
|
||||
// TODO: remove in favor of fault.Errors
|
||||
ErrorCount: one.ErrorCount + two.ErrorCount,
|
||||
Err: multierror.Append(one.Err, two.Err).ErrorOrNil(),
|
||||
bytes: one.bytes + two.bytes,
|
||||
incomplete: hasErrors,
|
||||
incompleteReason: one.incompleteReason + ", " + two.incompleteReason,
|
||||
additionalDetails: one.additionalDetails + ", " + two.additionalDetails,
|
||||
Folders: one.Folders + two.Folders,
|
||||
Metrics: CombineMetrics(one.Metrics, two.Metrics),
|
||||
details: one.details + ", " + two.details,
|
||||
op: one.op,
|
||||
}
|
||||
|
||||
return status
|
||||
@ -123,23 +88,19 @@ func MergeStatus(one, two ConnectorOperationStatus) ConnectorOperationStatus {
|
||||
func (cos *ConnectorOperationStatus) String() string {
|
||||
var operationStatement string
|
||||
|
||||
switch cos.lastOperation {
|
||||
switch cos.op {
|
||||
case Backup:
|
||||
operationStatement = "Downloaded from "
|
||||
case Restore:
|
||||
operationStatement = "Restored content to "
|
||||
}
|
||||
|
||||
message := fmt.Sprintf("Action: %s performed on %d of %d objects (%s) within %d directories.",
|
||||
cos.lastOperation.String(),
|
||||
cos.Successful,
|
||||
cos.ObjectCount,
|
||||
humanize.Bytes(uint64(cos.bytes)),
|
||||
cos.FolderCount)
|
||||
|
||||
if cos.incomplete {
|
||||
message += " " + cos.incompleteReason
|
||||
}
|
||||
|
||||
return message + " " + operationStatement + cos.additionalDetails
|
||||
return fmt.Sprintf("Action: %s performed on %d of %d objects (%s) within %d directories. %s %s",
|
||||
cos.op.String(),
|
||||
cos.Metrics.Successes,
|
||||
cos.Metrics.Objects,
|
||||
humanize.Bytes(uint64(cos.Metrics.Bytes)),
|
||||
cos.Folders,
|
||||
operationStatement,
|
||||
cos.details)
|
||||
}
|
||||
|
||||
@ -1,53 +1,46 @@
|
||||
package support
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
)
|
||||
|
||||
type GCStatusTestSuite struct {
|
||||
type StatusUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestGraphConnectorStatus(t *testing.T) {
|
||||
suite.Run(t, &GCStatusTestSuite{Suite: tester.NewUnitSuite(t)})
|
||||
suite.Run(t, &StatusUnitSuite{tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
// operationType, objects, success, folders, errCount int, errStatus string
|
||||
|
||||
type statusParams struct {
|
||||
operationType Operation
|
||||
objects int
|
||||
success int
|
||||
folders int
|
||||
err error
|
||||
func metricsMatch(t *testing.T, expect, result CollectionMetrics) {
|
||||
assert.Equal(t, expect.Bytes, result.Bytes, "bytes")
|
||||
assert.Equal(t, expect.Objects, result.Objects, "objects")
|
||||
assert.Equal(t, expect.Successes, result.Successes, "successes")
|
||||
}
|
||||
|
||||
func (suite *GCStatusTestSuite) TestCreateStatus() {
|
||||
func (suite *StatusUnitSuite) TestCreateStatus() {
|
||||
table := []struct {
|
||||
name string
|
||||
params statusParams
|
||||
expect assert.BoolAssertionFunc
|
||||
name string
|
||||
op Operation
|
||||
metrics CollectionMetrics
|
||||
folders int
|
||||
}{
|
||||
{
|
||||
name: "Test: Status Success",
|
||||
params: statusParams{Backup, 12, 12, 3, nil},
|
||||
expect: assert.False,
|
||||
name: "Backup",
|
||||
op: Backup,
|
||||
metrics: CollectionMetrics{12, 12, 3},
|
||||
folders: 1,
|
||||
},
|
||||
{
|
||||
name: "Test: Status Failed",
|
||||
params: statusParams{
|
||||
Restore,
|
||||
12, 9, 8,
|
||||
WrapAndAppend("tres", errors.New("three"), WrapAndAppend("arc376", errors.New("one"), errors.New("two"))),
|
||||
},
|
||||
expect: assert.True,
|
||||
name: "Backup",
|
||||
op: Restore,
|
||||
metrics: CollectionMetrics{12, 9, 8},
|
||||
folders: 2,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
@ -59,89 +52,60 @@ func (suite *GCStatusTestSuite) TestCreateStatus() {
|
||||
|
||||
result := CreateStatus(
|
||||
ctx,
|
||||
test.params.operationType,
|
||||
test.params.folders,
|
||||
CollectionMetrics{test.params.objects, test.params.success, 0},
|
||||
test.params.err,
|
||||
"",
|
||||
)
|
||||
test.expect(t, result.incomplete, "status is incomplete")
|
||||
test.op,
|
||||
test.folders,
|
||||
test.metrics,
|
||||
"details")
|
||||
assert.Equal(t, test.op, result.op, "operation")
|
||||
assert.Equal(t, test.folders, result.Folders, "folders")
|
||||
metricsMatch(t, test.metrics, result.Metrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *GCStatusTestSuite) TestCreateStatus_InvalidStatus() {
|
||||
t := suite.T()
|
||||
params := statusParams{Backup, 9, 3, 13, errors.New("invalidcl")}
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
CreateStatus(
|
||||
ctx,
|
||||
params.operationType,
|
||||
params.folders,
|
||||
CollectionMetrics{
|
||||
params.objects,
|
||||
params.success,
|
||||
0,
|
||||
},
|
||||
params.err,
|
||||
"",
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *GCStatusTestSuite) TestMergeStatus() {
|
||||
func (suite *StatusUnitSuite) TestMergeStatus() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
one ConnectorOperationStatus
|
||||
two ConnectorOperationStatus
|
||||
expected statusParams
|
||||
isIncomplete assert.BoolAssertionFunc
|
||||
name string
|
||||
one ConnectorOperationStatus
|
||||
two ConnectorOperationStatus
|
||||
expectOp Operation
|
||||
expectMetrics CollectionMetrics
|
||||
expectFolders int
|
||||
}{
|
||||
{
|
||||
name: "Test: Status + unknown",
|
||||
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, nil, ""),
|
||||
two: ConnectorOperationStatus{},
|
||||
expected: statusParams{Backup, 1, 1, 1, nil},
|
||||
isIncomplete: assert.False,
|
||||
name: "Test: Status + unknown",
|
||||
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, ""),
|
||||
two: ConnectorOperationStatus{},
|
||||
expectOp: Backup,
|
||||
expectMetrics: CollectionMetrics{1, 1, 0},
|
||||
expectFolders: 1,
|
||||
},
|
||||
{
|
||||
name: "Test: unknown + Status",
|
||||
one: ConnectorOperationStatus{},
|
||||
two: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, nil, ""),
|
||||
expected: statusParams{Backup, 1, 1, 1, nil},
|
||||
isIncomplete: assert.False,
|
||||
name: "Test: unknown + Status",
|
||||
one: ConnectorOperationStatus{},
|
||||
two: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, ""),
|
||||
expectOp: Backup,
|
||||
expectMetrics: CollectionMetrics{1, 1, 0},
|
||||
expectFolders: 1,
|
||||
},
|
||||
{
|
||||
name: "Test: Successful + Successful",
|
||||
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, nil, ""),
|
||||
two: *CreateStatus(ctx, Backup, 3, CollectionMetrics{3, 3, 0}, nil, ""),
|
||||
expected: statusParams{Backup, 4, 4, 4, nil},
|
||||
isIncomplete: assert.False,
|
||||
name: "Test: Successful + Successful",
|
||||
one: *CreateStatus(ctx, Backup, 1, CollectionMetrics{1, 1, 0}, ""),
|
||||
two: *CreateStatus(ctx, Backup, 3, CollectionMetrics{3, 3, 0}, ""),
|
||||
expectOp: Backup,
|
||||
expectMetrics: CollectionMetrics{4, 4, 0},
|
||||
expectFolders: 4,
|
||||
},
|
||||
{
|
||||
name: "Test: Successful + Unsuccessful",
|
||||
one: *CreateStatus(ctx, Backup, 13, CollectionMetrics{17, 17, 0}, nil, ""),
|
||||
two: *CreateStatus(
|
||||
ctx,
|
||||
Backup,
|
||||
8,
|
||||
CollectionMetrics{
|
||||
12,
|
||||
9,
|
||||
0,
|
||||
},
|
||||
WrapAndAppend("tres", errors.New("three"), WrapAndAppend("arc376", errors.New("one"), errors.New("two"))),
|
||||
"",
|
||||
),
|
||||
expected: statusParams{Backup, 29, 26, 21, nil},
|
||||
isIncomplete: assert.True,
|
||||
name: "Test: Successful + Unsuccessful",
|
||||
one: *CreateStatus(ctx, Backup, 13, CollectionMetrics{17, 17, 0}, ""),
|
||||
two: *CreateStatus(ctx, Backup, 8, CollectionMetrics{12, 9, 0}, ""),
|
||||
expectOp: Backup,
|
||||
expectMetrics: CollectionMetrics{29, 26, 0},
|
||||
expectFolders: 21,
|
||||
},
|
||||
}
|
||||
|
||||
@ -149,12 +113,10 @@ func (suite *GCStatusTestSuite) TestMergeStatus() {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
returned := MergeStatus(test.one, test.two)
|
||||
assert.Equal(t, returned.FolderCount, test.expected.folders)
|
||||
assert.Equal(t, returned.ObjectCount, test.expected.objects)
|
||||
assert.Equal(t, returned.lastOperation, test.expected.operationType)
|
||||
assert.Equal(t, returned.Successful, test.expected.success)
|
||||
test.isIncomplete(t, returned.incomplete)
|
||||
result := MergeStatus(test.one, test.two)
|
||||
assert.Equal(t, test.expectFolders, result.Folders, "folders")
|
||||
assert.Equal(t, test.expectOp, result.op, "operation")
|
||||
metricsMatch(t, test.expectMetrics, result.Metrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,10 +290,6 @@ func (op *BackupOperation) do(
|
||||
}
|
||||
|
||||
opStats.gc = gc.AwaitStatus()
|
||||
// TODO(keepers): remove when fault.Errors handles all iterable error aggregation.
|
||||
if opStats.gc.ErrorCount > 0 {
|
||||
return nil, opStats.gc.Err
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
||||
|
||||
@ -657,11 +653,11 @@ func (op *BackupOperation) persistResults(
|
||||
return errors.New("backup population never completed")
|
||||
}
|
||||
|
||||
if opStats.gc.Successful == 0 {
|
||||
if opStats.gc.Metrics.Successes == 0 {
|
||||
op.Status = NoData
|
||||
}
|
||||
|
||||
op.Results.ItemsRead = opStats.gc.Successful
|
||||
op.Results.ItemsRead = opStats.gc.Metrics.Successes
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -391,7 +391,7 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
||||
TotalUploadedBytes: 1,
|
||||
},
|
||||
gc: &support.ConnectorOperationStatus{
|
||||
Successful: 1,
|
||||
Metrics: support.CollectionMetrics{Successes: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -431,7 +431,7 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
|
||||
test.expectErr(t, op.persistResults(now, &test.stats))
|
||||
|
||||
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
||||
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsRead, "items read")
|
||||
assert.Equal(t, test.stats.gc.Metrics.Successes, op.Results.ItemsRead, "items read")
|
||||
assert.Equal(t, test.stats.k.TotalFileCount, op.Results.ItemsWritten, "items written")
|
||||
assert.Equal(t, test.stats.k.TotalHashedBytes, op.Results.BytesRead, "bytes read")
|
||||
assert.Equal(t, test.stats.k.TotalUploadedBytes, op.Results.BytesUploaded, "bytes written")
|
||||
|
||||
@ -263,10 +263,6 @@ func (op *RestoreOperation) do(
|
||||
restoreComplete <- struct{}{}
|
||||
|
||||
opStats.gc = gc.AwaitStatus()
|
||||
// TODO(keepers): remove when fault.Errors handles all iterable error aggregation.
|
||||
if opStats.gc.ErrorCount > 0 {
|
||||
return nil, opStats.gc.Err
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Debug(gc.PrintableStatus())
|
||||
|
||||
@ -305,11 +301,11 @@ func (op *RestoreOperation) persistResults(
|
||||
return errors.New("restoration never completed")
|
||||
}
|
||||
|
||||
if opStats.gc.Successful == 0 {
|
||||
if opStats.gc.Metrics.Successes == 0 {
|
||||
op.Status = NoData
|
||||
}
|
||||
|
||||
op.Results.ItemsWritten = opStats.gc.Successful
|
||||
op.Results.ItemsWritten = opStats.gc.Metrics.Successes
|
||||
|
||||
dur := op.Results.CompletedAt.Sub(op.Results.StartedAt)
|
||||
|
||||
|
||||
@ -68,8 +68,10 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
},
|
||||
},
|
||||
gc: &support.ConnectorOperationStatus{
|
||||
ObjectCount: 1,
|
||||
Successful: 1,
|
||||
Metrics: support.CollectionMetrics{
|
||||
Objects: 1,
|
||||
Successes: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -111,7 +113,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
|
||||
assert.Equal(t, test.expectStatus.String(), op.Status.String(), "status")
|
||||
assert.Equal(t, len(test.stats.cs), op.Results.ItemsRead, "items read")
|
||||
assert.Equal(t, test.stats.gc.Successful, op.Results.ItemsWritten, "items written")
|
||||
assert.Equal(t, test.stats.gc.Metrics.Successes, op.Results.ItemsWritten, "items written")
|
||||
assert.Equal(t, test.stats.bytesRead.NumBytes, op.Results.BytesRead, "resource owners")
|
||||
assert.Equal(t, test.stats.resourceCount, op.Results.ResourceOwners, "resource owners")
|
||||
assert.Equal(t, test.stats.readErr, op.Results.ReadErrors, "read errors")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user