update files count and total size on CLI for backup (#3480)
<!-- PR description--> Currently the backup output show, - total backup size - size of files + size of metafiles - no of files - no of files + no of metafile With this change the output of backup command shows size of only files and count include only file count. NOTE: all current three services results will be impacted here. #### Does this PR need a docs update or release note? - [ ] ✅ Yes, it's included - [ ] 🕐 Yes, but in a later PR - [x] ⛔ No #### Type of change <!--- Please check the type of change your PR introduces: ---> - [ ] 🐛 Bugfix #### Issue(s) <!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. --> * https://github.com/alcionai/corso/issues/3304 #### Test Plan <!-- How will this be tested prior to merging.--> - [x] 💪 Manual - [ ] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
8d68bacfb7
commit
06b04c6eff
@ -50,14 +50,16 @@ var (
|
|||||||
type BackupStats struct {
|
type BackupStats struct {
|
||||||
SnapshotID string
|
SnapshotID string
|
||||||
|
|
||||||
TotalHashedBytes int64
|
TotalHashedBytes int64
|
||||||
TotalUploadedBytes int64
|
TotalUploadedBytes int64
|
||||||
|
TotalNonMetaUploadedBytes int64
|
||||||
|
|
||||||
TotalFileCount int
|
TotalFileCount int
|
||||||
CachedFileCount int
|
TotalNonMetaFileCount int
|
||||||
UncachedFileCount int
|
CachedFileCount int
|
||||||
TotalDirectoryCount int
|
UncachedFileCount int
|
||||||
ErrorCount int
|
TotalDirectoryCount int
|
||||||
|
ErrorCount int
|
||||||
|
|
||||||
IgnoredErrorCount int
|
IgnoredErrorCount int
|
||||||
ExpectedIgnoredErrorCount int
|
ExpectedIgnoredErrorCount int
|
||||||
|
|||||||
@ -347,6 +347,8 @@ func (op *BackupOperation) do(
|
|||||||
mans,
|
mans,
|
||||||
toMerge,
|
toMerge,
|
||||||
deets,
|
deets,
|
||||||
|
writeStats,
|
||||||
|
op.Selectors.PathService(),
|
||||||
op.Errors)
|
op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "merging details")
|
return nil, clues.Wrap(err, "merging details")
|
||||||
@ -704,8 +706,17 @@ func mergeDetails(
|
|||||||
mans []*kopia.ManifestEntry,
|
mans []*kopia.ManifestEntry,
|
||||||
dataFromBackup kopia.DetailsMergeInfoer,
|
dataFromBackup kopia.DetailsMergeInfoer,
|
||||||
deets *details.Builder,
|
deets *details.Builder,
|
||||||
|
writeStats *kopia.BackupStats,
|
||||||
|
serviceType path.ServiceType,
|
||||||
errs *fault.Bus,
|
errs *fault.Bus,
|
||||||
) error {
|
) error {
|
||||||
|
detailsModel := deets.Details().DetailsModel
|
||||||
|
|
||||||
|
// getting the values in writeStats before anything else so that we don't get a return from
|
||||||
|
// conditions like no backup data.
|
||||||
|
writeStats.TotalNonMetaFileCount = len(detailsModel.FilterMetaFiles().Items())
|
||||||
|
writeStats.TotalNonMetaUploadedBytes = detailsModel.SumNonMetaFileSizes()
|
||||||
|
|
||||||
// Don't bother loading any of the base details if there's nothing we need to merge.
|
// Don't bother loading any of the base details if there's nothing we need to merge.
|
||||||
if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
|
if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
|
||||||
return nil
|
return nil
|
||||||
@ -841,6 +852,8 @@ func (op *BackupOperation) persistResults(
|
|||||||
op.Results.BytesRead = opStats.k.TotalHashedBytes
|
op.Results.BytesRead = opStats.k.TotalHashedBytes
|
||||||
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
|
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
|
||||||
op.Results.ItemsWritten = opStats.k.TotalFileCount
|
op.Results.ItemsWritten = opStats.k.TotalFileCount
|
||||||
|
op.Results.NonMetaBytesUploaded = opStats.k.TotalNonMetaUploadedBytes
|
||||||
|
op.Results.NonMetaItemsWritten = opStats.k.TotalNonMetaFileCount
|
||||||
op.Results.ResourceOwners = opStats.resourceCount
|
op.Results.ResourceOwners = opStats.resourceCount
|
||||||
|
|
||||||
if opStats.gc == nil {
|
if opStats.gc == nil {
|
||||||
|
|||||||
@ -961,6 +961,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten int
|
deltaItemsWritten int
|
||||||
nonDeltaItemsRead int
|
nonDeltaItemsRead int
|
||||||
nonDeltaItemsWritten int
|
nonDeltaItemsWritten int
|
||||||
|
nonMetaItemsWritten int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "clean, no changes",
|
name: "clean, no changes",
|
||||||
@ -969,6 +970,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten: 0,
|
deltaItemsWritten: 0,
|
||||||
nonDeltaItemsRead: 8,
|
nonDeltaItemsRead: 8,
|
||||||
nonDeltaItemsWritten: 0, // unchanged items are not counted towards write
|
nonDeltaItemsWritten: 0, // unchanged items are not counted towards write
|
||||||
|
nonMetaItemsWritten: 4,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "move an email folder to a subfolder",
|
name: "move an email folder to a subfolder",
|
||||||
@ -992,6 +994,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten: 2,
|
deltaItemsWritten: 2,
|
||||||
nonDeltaItemsRead: 8,
|
nonDeltaItemsRead: 8,
|
||||||
nonDeltaItemsWritten: 2,
|
nonDeltaItemsWritten: 2,
|
||||||
|
nonMetaItemsWritten: 6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delete a folder",
|
name: "delete a folder",
|
||||||
@ -1018,6 +1021,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten: 0, // deletions are not counted as "writes"
|
deltaItemsWritten: 0, // deletions are not counted as "writes"
|
||||||
nonDeltaItemsRead: 4,
|
nonDeltaItemsRead: 4,
|
||||||
nonDeltaItemsWritten: 0,
|
nonDeltaItemsWritten: 0,
|
||||||
|
nonMetaItemsWritten: 4,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "add a new folder",
|
name: "add a new folder",
|
||||||
@ -1070,6 +1074,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten: 4,
|
deltaItemsWritten: 4,
|
||||||
nonDeltaItemsRead: 8,
|
nonDeltaItemsRead: 8,
|
||||||
nonDeltaItemsWritten: 4,
|
nonDeltaItemsWritten: 4,
|
||||||
|
nonMetaItemsWritten: 8,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "rename a folder",
|
name: "rename a folder",
|
||||||
@ -1125,6 +1130,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten: 0, // two items per category
|
deltaItemsWritten: 0, // two items per category
|
||||||
nonDeltaItemsRead: 8,
|
nonDeltaItemsRead: 8,
|
||||||
nonDeltaItemsWritten: 0,
|
nonDeltaItemsWritten: 0,
|
||||||
|
nonMetaItemsWritten: 4,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "add a new item",
|
name: "add a new item",
|
||||||
@ -1178,6 +1184,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten: 2,
|
deltaItemsWritten: 2,
|
||||||
nonDeltaItemsRead: 10,
|
nonDeltaItemsRead: 10,
|
||||||
nonDeltaItemsWritten: 2,
|
nonDeltaItemsWritten: 2,
|
||||||
|
nonMetaItemsWritten: 6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delete an existing item",
|
name: "delete an existing item",
|
||||||
@ -1231,6 +1238,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
deltaItemsWritten: 0, // deletes are not counted as "writes"
|
deltaItemsWritten: 0, // deletes are not counted as "writes"
|
||||||
nonDeltaItemsRead: 8,
|
nonDeltaItemsRead: 8,
|
||||||
nonDeltaItemsWritten: 0,
|
nonDeltaItemsWritten: 0,
|
||||||
|
nonMetaItemsWritten: 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1263,7 +1271,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
|||||||
assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
|
assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
|
||||||
assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
|
assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
|
||||||
}
|
}
|
||||||
|
assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
|
||||||
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
||||||
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
|
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
|
||||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
||||||
@ -1527,9 +1535,10 @@ func runDriveIncrementalTest(
|
|||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
// performs the incremental update required for the test.
|
// performs the incremental update required for the test.
|
||||||
updateFiles func(t *testing.T)
|
updateFiles func(t *testing.T)
|
||||||
itemsRead int
|
itemsRead int
|
||||||
itemsWritten int
|
itemsWritten int
|
||||||
|
nonMetaItemsWritten int
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "clean incremental, no changes",
|
name: "clean incremental, no changes",
|
||||||
@ -1556,8 +1565,9 @@ func runDriveIncrementalTest(
|
|||||||
|
|
||||||
expectDeets.AddItem(driveID, makeLocRef(container1), newFileID)
|
expectDeets.AddItem(driveID, makeLocRef(container1), newFileID)
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
|
nonMetaItemsWritten: 1, // .data file for newitem
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "add permission to new file",
|
name: "add permission to new file",
|
||||||
@ -1578,8 +1588,9 @@ func runDriveIncrementalTest(
|
|||||||
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
||||||
// no expectedDeets: metadata isn't tracked
|
// no expectedDeets: metadata isn't tracked
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
||||||
|
nonMetaItemsWritten: 1, // the file for which permission was updated
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "remove permission from new file",
|
name: "remove permission from new file",
|
||||||
@ -1599,8 +1610,9 @@ func runDriveIncrementalTest(
|
|||||||
require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err))
|
||||||
// no expectedDeets: metadata isn't tracked
|
// no expectedDeets: metadata isn't tracked
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
||||||
|
nonMetaItemsWritten: 1, //.data file for newitem
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "add permission to container",
|
name: "add permission to container",
|
||||||
@ -1621,8 +1633,9 @@ func runDriveIncrementalTest(
|
|||||||
require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err))
|
||||||
// no expectedDeets: metadata isn't tracked
|
// no expectedDeets: metadata isn't tracked
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 1, // .dirmeta for collection
|
itemsWritten: 1, // .dirmeta for collection
|
||||||
|
nonMetaItemsWritten: 0, // no files updated as update on container
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "remove permission from container",
|
name: "remove permission from container",
|
||||||
@ -1643,8 +1656,9 @@ func runDriveIncrementalTest(
|
|||||||
require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err))
|
||||||
// no expectedDeets: metadata isn't tracked
|
// no expectedDeets: metadata isn't tracked
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 1, // .dirmeta for collection
|
itemsWritten: 1, // .dirmeta for collection
|
||||||
|
nonMetaItemsWritten: 0, // no files updated
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "update contents of a file",
|
name: "update contents of a file",
|
||||||
@ -1658,8 +1672,9 @@ func runDriveIncrementalTest(
|
|||||||
require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err))
|
require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err))
|
||||||
// no expectedDeets: neither file id nor location changed
|
// no expectedDeets: neither file id nor location changed
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
|
nonMetaItemsWritten: 1, // .data file for newitem
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "rename a file",
|
name: "rename a file",
|
||||||
@ -1681,8 +1696,9 @@ func runDriveIncrementalTest(
|
|||||||
driveItem)
|
driveItem)
|
||||||
require.NoError(t, err, "renaming file %v", clues.ToCore(err))
|
require.NoError(t, err, "renaming file %v", clues.ToCore(err))
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
|
nonMetaItemsWritten: 1, // .data file for newitem
|
||||||
// no expectedDeets: neither file id nor location changed
|
// no expectedDeets: neither file id nor location changed
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1710,8 +1726,9 @@ func runDriveIncrementalTest(
|
|||||||
makeLocRef(container2),
|
makeLocRef(container2),
|
||||||
ptr.Val(newFile.GetId()))
|
ptr.Val(newFile.GetId()))
|
||||||
},
|
},
|
||||||
itemsRead: 1, // .data file for newitem
|
itemsRead: 1, // .data file for newitem
|
||||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||||
|
nonMetaItemsWritten: 1, // .data file for new item
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delete file",
|
name: "delete file",
|
||||||
@ -1725,8 +1742,9 @@ func runDriveIncrementalTest(
|
|||||||
|
|
||||||
expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId()))
|
expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId()))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 0,
|
itemsWritten: 0,
|
||||||
|
nonMetaItemsWritten: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "move a folder to a subfolder",
|
name: "move a folder to a subfolder",
|
||||||
@ -1753,8 +1771,9 @@ func runDriveIncrementalTest(
|
|||||||
makeLocRef(container2),
|
makeLocRef(container2),
|
||||||
makeLocRef(container1))
|
makeLocRef(container1))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
||||||
|
nonMetaItemsWritten: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "rename a folder",
|
name: "rename a folder",
|
||||||
@ -1783,8 +1802,9 @@ func runDriveIncrementalTest(
|
|||||||
makeLocRef(container1, container2),
|
makeLocRef(container1, container2),
|
||||||
makeLocRef(container1, containerRename))
|
makeLocRef(container1, containerRename))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
||||||
|
nonMetaItemsWritten: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "delete a folder",
|
name: "delete a folder",
|
||||||
@ -1799,8 +1819,9 @@ func runDriveIncrementalTest(
|
|||||||
|
|
||||||
expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename))
|
expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename))
|
||||||
},
|
},
|
||||||
itemsRead: 0,
|
itemsRead: 0,
|
||||||
itemsWritten: 0,
|
itemsWritten: 0,
|
||||||
|
nonMetaItemsWritten: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "add a new folder",
|
name: "add a new folder",
|
||||||
@ -1831,8 +1852,9 @@ func runDriveIncrementalTest(
|
|||||||
|
|
||||||
expectDeets.AddLocation(driveID, container3)
|
expectDeets.AddLocation(driveID, container3)
|
||||||
},
|
},
|
||||||
itemsRead: 2, // 2 .data for 2 files
|
itemsRead: 2, // 2 .data for 2 files
|
||||||
itemsWritten: 6, // read items + 2 directory meta
|
itemsWritten: 6, // read items + 2 directory meta
|
||||||
|
nonMetaItemsWritten: 2, // 2 .data for 2 files
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
@ -1862,9 +1884,10 @@ func runDriveIncrementalTest(
|
|||||||
// do some additional checks to ensure the incremental dealt with fewer items.
|
// do some additional checks to ensure the incremental dealt with fewer items.
|
||||||
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||||
var (
|
var (
|
||||||
expectWrites = test.itemsWritten + 2
|
expectWrites = test.itemsWritten + 2
|
||||||
expectReads = test.itemsRead + 2
|
expectNonMetaWrites = test.nonMetaItemsWritten
|
||||||
assertReadWrite = assert.Equal
|
expectReads = test.itemsRead + 2
|
||||||
|
assertReadWrite = assert.Equal
|
||||||
)
|
)
|
||||||
|
|
||||||
// Sharepoint can produce a superset of permissions by nature of
|
// Sharepoint can produce a superset of permissions by nature of
|
||||||
@ -1876,6 +1899,7 @@ func runDriveIncrementalTest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
|
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
|
||||||
|
assertReadWrite(t, expectNonMetaWrites, incBO.Results.NonMetaItemsWritten, "incremental non-meta items written")
|
||||||
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
|
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
|
||||||
|
|
||||||
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
||||||
@ -1976,6 +2000,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() {
|
|||||||
|
|
||||||
// 2 on read/writes to account for metadata: 1 delta and 1 path.
|
// 2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||||
assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written")
|
assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written")
|
||||||
|
assert.LessOrEqual(t, 1, incBO.Results.NonMetaItemsWritten, "non meta items written")
|
||||||
assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read")
|
assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read")
|
||||||
assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
||||||
assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors")
|
assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors")
|
||||||
|
|||||||
@ -1198,6 +1198,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
mds := ssmock.Streamer{Deets: test.populatedDetails}
|
mds := ssmock.Streamer{Deets: test.populatedDetails}
|
||||||
w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}}
|
w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}}
|
||||||
deets := details.Builder{}
|
deets := details.Builder{}
|
||||||
|
writeStats := kopia.BackupStats{}
|
||||||
|
|
||||||
err := mergeDetails(
|
err := mergeDetails(
|
||||||
ctx,
|
ctx,
|
||||||
@ -1206,6 +1207,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
|||||||
test.inputMans,
|
test.inputMans,
|
||||||
test.mdm,
|
test.mdm,
|
||||||
&deets,
|
&deets,
|
||||||
|
&writeStats,
|
||||||
|
path.OneDriveService,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
test.errCheck(t, err, clues.ToCore(err))
|
test.errCheck(t, err, clues.ToCore(err))
|
||||||
|
|
||||||
@ -1307,9 +1310,10 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mds = ssmock.Streamer{Deets: populatedDetails}
|
mds = ssmock.Streamer{Deets: populatedDetails}
|
||||||
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
|
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
|
||||||
deets = details.Builder{}
|
deets = details.Builder{}
|
||||||
|
writeStats = kopia.BackupStats{}
|
||||||
)
|
)
|
||||||
|
|
||||||
err := mergeDetails(
|
err := mergeDetails(
|
||||||
@ -1319,6 +1323,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
|||||||
inputMans,
|
inputMans,
|
||||||
mdm,
|
mdm,
|
||||||
&deets,
|
&deets,
|
||||||
|
&writeStats,
|
||||||
|
path.ExchangeService,
|
||||||
fault.New(true))
|
fault.New(true))
|
||||||
assert.NoError(t, err, clues.ToCore(err))
|
assert.NoError(t, err, clues.ToCore(err))
|
||||||
compareDeetEntries(t, expectedEntries, deets.Details().Entries)
|
compareDeetEntries(t, expectedEntries, deets.Details().Entries)
|
||||||
|
|||||||
@ -8,11 +8,13 @@ import (
|
|||||||
// ReadWrites tracks the total count of reads and writes. ItemsRead
|
// ReadWrites tracks the total count of reads and writes. ItemsRead
|
||||||
// and ItemsWritten counts are assumed to be successful reads.
|
// and ItemsWritten counts are assumed to be successful reads.
|
||||||
type ReadWrites struct {
|
type ReadWrites struct {
|
||||||
BytesRead int64 `json:"bytesRead,omitempty"`
|
BytesRead int64 `json:"bytesRead,omitempty"`
|
||||||
BytesUploaded int64 `json:"bytesUploaded,omitempty"`
|
BytesUploaded int64 `json:"bytesUploaded,omitempty"`
|
||||||
ItemsRead int `json:"itemsRead,omitempty"`
|
ItemsRead int `json:"itemsRead,omitempty"`
|
||||||
ItemsWritten int `json:"itemsWritten,omitempty"`
|
NonMetaBytesUploaded int64 `json:"nonMetaBytesUploaded,omitempty"`
|
||||||
ResourceOwners int `json:"resourceOwners,omitempty"`
|
NonMetaItemsWritten int `json:"nonMetaItemsWritten,omitempty"`
|
||||||
|
ItemsWritten int `json:"itemsWritten,omitempty"`
|
||||||
|
ResourceOwners int `json:"resourceOwners,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartAndEndTime tracks a paired starting time and ending time.
|
// StartAndEndTime tracks a paired starting time and ending time.
|
||||||
|
|||||||
@ -284,12 +284,12 @@ func (b Backup) toStats() backupStats {
|
|||||||
return backupStats{
|
return backupStats{
|
||||||
ID: string(b.ID),
|
ID: string(b.ID),
|
||||||
BytesRead: b.BytesRead,
|
BytesRead: b.BytesRead,
|
||||||
BytesUploaded: b.BytesUploaded,
|
BytesUploaded: b.NonMetaBytesUploaded,
|
||||||
EndedAt: b.CompletedAt,
|
EndedAt: b.CompletedAt,
|
||||||
ErrorCount: b.ErrorCount,
|
ErrorCount: b.ErrorCount,
|
||||||
ItemsRead: b.ItemsRead,
|
ItemsRead: b.ItemsRead,
|
||||||
ItemsSkipped: b.TotalSkippedItems,
|
ItemsSkipped: b.TotalSkippedItems,
|
||||||
ItemsWritten: b.ItemsWritten,
|
ItemsWritten: b.NonMetaItemsWritten,
|
||||||
StartedAt: b.StartedAt,
|
StartedAt: b.StartedAt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -49,10 +49,12 @@ func stubBackup(t time.Time, ownerID, ownerName string) backup.Backup {
|
|||||||
ErrorCount: 2,
|
ErrorCount: 2,
|
||||||
Failure: "read, write",
|
Failure: "read, write",
|
||||||
ReadWrites: stats.ReadWrites{
|
ReadWrites: stats.ReadWrites{
|
||||||
BytesRead: 301,
|
BytesRead: 301,
|
||||||
BytesUploaded: 301,
|
BytesUploaded: 301,
|
||||||
ItemsRead: 1,
|
NonMetaBytesUploaded: 301,
|
||||||
ItemsWritten: 1,
|
ItemsRead: 1,
|
||||||
|
NonMetaItemsWritten: 1,
|
||||||
|
ItemsWritten: 1,
|
||||||
},
|
},
|
||||||
StartAndEndTime: stats.StartAndEndTime{
|
StartAndEndTime: stats.StartAndEndTime{
|
||||||
StartedAt: t,
|
StartedAt: t,
|
||||||
@ -248,7 +250,7 @@ func (suite *BackupUnitSuite) TestBackup_MinimumPrintable() {
|
|||||||
assert.Equal(t, now, result.Stats.StartedAt, "started at")
|
assert.Equal(t, now, result.Stats.StartedAt, "started at")
|
||||||
assert.Equal(t, b.Status, result.Status, "status")
|
assert.Equal(t, b.Status, result.Status, "status")
|
||||||
assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size")
|
assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size")
|
||||||
assert.Equal(t, b.BytesUploaded, result.Stats.BytesUploaded, "stored size")
|
assert.Equal(t, b.NonMetaBytesUploaded, result.Stats.BytesUploaded, "stored size")
|
||||||
assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner")
|
assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -240,12 +240,27 @@ func (dm DetailsModel) FilterMetaFiles() DetailsModel {
|
|||||||
return d2
|
return d2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SumNonMetaFileSizes returns the total size of items excluding all the
|
||||||
|
// .meta files from the items.
|
||||||
|
func (dm DetailsModel) SumNonMetaFileSizes() int64 {
|
||||||
|
var size int64
|
||||||
|
|
||||||
|
// Items will provide only files and filter out folders
|
||||||
|
for _, ent := range dm.FilterMetaFiles().Items() {
|
||||||
|
size += ent.size()
|
||||||
|
}
|
||||||
|
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
// Check if a file is a metadata file. These are used to store
|
// Check if a file is a metadata file. These are used to store
|
||||||
// additional data like permissions (in case of Drive items) and are
|
// additional data like permissions (in case of Drive items) and are
|
||||||
// not to be treated as regular files.
|
// not to be treated as regular files.
|
||||||
func (de Entry) isMetaFile() bool {
|
func (de Entry) isMetaFile() bool {
|
||||||
// sharepoint types not needed, since sharepoint permissions were
|
// sharepoint types not needed, since sharepoint permissions were
|
||||||
// added after IsMeta was deprecated.
|
// added after IsMeta was deprecated.
|
||||||
|
// Earlier onedrive backups used to store both metafiles and files in details.
|
||||||
|
// So filter out just the onedrive items and check for metafiles
|
||||||
return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta
|
return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user