update files count and total size on CLI for backup (#3480)

<!-- PR description-->
Currently the backup output show,
- total backup size - size of files + size of metafiles
- no of files - no of files + no of metafile
With this change the output of backup command shows size of only files and count include only file count.

NOTE: all current three services results will be impacted here.

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

<!--- Please check the type of change your PR introduces: --->
- [ ] 🐛 Bugfix

#### Issue(s)

<!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. -->
* https://github.com/alcionai/corso/issues/3304

#### Test Plan

<!-- How will this be tested prior to merging.-->
- [x] 💪 Manual
- [ ]  Unit test
- [x] 💚 E2E
This commit is contained in:
neha_gupta 2023-05-31 23:32:38 +05:30 committed by GitHub
parent 8d68bacfb7
commit 06b04c6eff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 120 additions and 55 deletions

View File

@ -50,14 +50,16 @@ var (
type BackupStats struct {
SnapshotID string
TotalHashedBytes int64
TotalUploadedBytes int64
TotalHashedBytes int64
TotalUploadedBytes int64
TotalNonMetaUploadedBytes int64
TotalFileCount int
CachedFileCount int
UncachedFileCount int
TotalDirectoryCount int
ErrorCount int
TotalFileCount int
TotalNonMetaFileCount int
CachedFileCount int
UncachedFileCount int
TotalDirectoryCount int
ErrorCount int
IgnoredErrorCount int
ExpectedIgnoredErrorCount int

View File

@ -347,6 +347,8 @@ func (op *BackupOperation) do(
mans,
toMerge,
deets,
writeStats,
op.Selectors.PathService(),
op.Errors)
if err != nil {
return nil, clues.Wrap(err, "merging details")
@ -704,8 +706,17 @@ func mergeDetails(
mans []*kopia.ManifestEntry,
dataFromBackup kopia.DetailsMergeInfoer,
deets *details.Builder,
writeStats *kopia.BackupStats,
serviceType path.ServiceType,
errs *fault.Bus,
) error {
detailsModel := deets.Details().DetailsModel
// getting the values in writeStats before anything else so that we don't get a return from
// conditions like no backup data.
writeStats.TotalNonMetaFileCount = len(detailsModel.FilterMetaFiles().Items())
writeStats.TotalNonMetaUploadedBytes = detailsModel.SumNonMetaFileSizes()
// Don't bother loading any of the base details if there's nothing we need to merge.
if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
return nil
@ -841,6 +852,8 @@ func (op *BackupOperation) persistResults(
op.Results.BytesRead = opStats.k.TotalHashedBytes
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
op.Results.ItemsWritten = opStats.k.TotalFileCount
op.Results.NonMetaBytesUploaded = opStats.k.TotalNonMetaUploadedBytes
op.Results.NonMetaItemsWritten = opStats.k.TotalNonMetaFileCount
op.Results.ResourceOwners = opStats.resourceCount
if opStats.gc == nil {

View File

@ -961,6 +961,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten int
nonDeltaItemsRead int
nonDeltaItemsWritten int
nonMetaItemsWritten int
}{
{
name: "clean, no changes",
@ -969,6 +970,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0,
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0, // unchanged items are not counted towards write
nonMetaItemsWritten: 4,
},
{
name: "move an email folder to a subfolder",
@ -992,6 +994,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 2,
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 2,
nonMetaItemsWritten: 6,
},
{
name: "delete a folder",
@ -1018,6 +1021,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // deletions are not counted as "writes"
nonDeltaItemsRead: 4,
nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
},
{
name: "add a new folder",
@ -1070,6 +1074,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 4,
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 4,
nonMetaItemsWritten: 8,
},
{
name: "rename a folder",
@ -1125,6 +1130,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // two items per category
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
},
{
name: "add a new item",
@ -1178,6 +1184,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 2,
nonDeltaItemsRead: 10,
nonDeltaItemsWritten: 2,
nonMetaItemsWritten: 6,
},
{
name: "delete an existing item",
@ -1231,6 +1238,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // deletes are not counted as "writes"
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
},
}
@ -1263,7 +1271,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
}
assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
@ -1527,9 +1535,10 @@ func runDriveIncrementalTest(
table := []struct {
name string
// performs the incremental update required for the test.
updateFiles func(t *testing.T)
itemsRead int
itemsWritten int
updateFiles func(t *testing.T)
itemsRead int
itemsWritten int
nonMetaItemsWritten int
}{
{
name: "clean incremental, no changes",
@ -1556,8 +1565,9 @@ func runDriveIncrementalTest(
expectDeets.AddItem(driveID, makeLocRef(container1), newFileID)
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
},
{
name: "add permission to new file",
@ -1578,8 +1588,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, // the file for which permission was updated
},
{
name: "remove permission from new file",
@ -1599,8 +1610,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, //.data file for newitem
},
{
name: "add permission to container",
@ -1621,8 +1633,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated as update on container
},
{
name: "remove permission from container",
@ -1643,8 +1656,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated
},
{
name: "update contents of a file",
@ -1658,8 +1672,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err))
// no expectedDeets: neither file id nor location changed
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
},
{
name: "rename a file",
@ -1681,8 +1696,9 @@ func runDriveIncrementalTest(
driveItem)
require.NoError(t, err, "renaming file %v", clues.ToCore(err))
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
// no expectedDeets: neither file id nor location changed
},
{
@ -1710,8 +1726,9 @@ func runDriveIncrementalTest(
makeLocRef(container2),
ptr.Val(newFile.GetId()))
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for new item
},
{
name: "delete file",
@ -1725,8 +1742,9 @@ func runDriveIncrementalTest(
expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId()))
},
itemsRead: 0,
itemsWritten: 0,
itemsRead: 0,
itemsWritten: 0,
nonMetaItemsWritten: 0,
},
{
name: "move a folder to a subfolder",
@ -1753,8 +1771,9 @@ func runDriveIncrementalTest(
makeLocRef(container2),
makeLocRef(container1))
},
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
nonMetaItemsWritten: 0,
},
{
name: "rename a folder",
@ -1783,8 +1802,9 @@ func runDriveIncrementalTest(
makeLocRef(container1, container2),
makeLocRef(container1, containerRename))
},
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
nonMetaItemsWritten: 0,
},
{
name: "delete a folder",
@ -1799,8 +1819,9 @@ func runDriveIncrementalTest(
expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename))
},
itemsRead: 0,
itemsWritten: 0,
itemsRead: 0,
itemsWritten: 0,
nonMetaItemsWritten: 0,
},
{
name: "add a new folder",
@ -1831,8 +1852,9 @@ func runDriveIncrementalTest(
expectDeets.AddLocation(driveID, container3)
},
itemsRead: 2, // 2 .data for 2 files
itemsWritten: 6, // read items + 2 directory meta
itemsRead: 2, // 2 .data for 2 files
itemsWritten: 6, // read items + 2 directory meta
nonMetaItemsWritten: 2, // 2 .data for 2 files
},
}
for _, test := range table {
@ -1862,9 +1884,10 @@ func runDriveIncrementalTest(
// do some additional checks to ensure the incremental dealt with fewer items.
// +2 on read/writes to account for metadata: 1 delta and 1 path.
var (
expectWrites = test.itemsWritten + 2
expectReads = test.itemsRead + 2
assertReadWrite = assert.Equal
expectWrites = test.itemsWritten + 2
expectNonMetaWrites = test.nonMetaItemsWritten
expectReads = test.itemsRead + 2
assertReadWrite = assert.Equal
)
// Sharepoint can produce a superset of permissions by nature of
@ -1876,6 +1899,7 @@ func runDriveIncrementalTest(
}
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
assertReadWrite(t, expectNonMetaWrites, incBO.Results.NonMetaItemsWritten, "incremental non-meta items written")
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
@ -1976,6 +2000,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() {
// 2 on read/writes to account for metadata: 1 delta and 1 path.
assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written")
assert.LessOrEqual(t, 1, incBO.Results.NonMetaItemsWritten, "non meta items written")
assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read")
assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors")

View File

@ -1198,6 +1198,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
mds := ssmock.Streamer{Deets: test.populatedDetails}
w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}}
deets := details.Builder{}
writeStats := kopia.BackupStats{}
err := mergeDetails(
ctx,
@ -1206,6 +1207,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
test.inputMans,
test.mdm,
&deets,
&writeStats,
path.OneDriveService,
fault.New(true))
test.errCheck(t, err, clues.ToCore(err))
@ -1307,9 +1310,10 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
defer flush()
var (
mds = ssmock.Streamer{Deets: populatedDetails}
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
deets = details.Builder{}
mds = ssmock.Streamer{Deets: populatedDetails}
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
deets = details.Builder{}
writeStats = kopia.BackupStats{}
)
err := mergeDetails(
@ -1319,6 +1323,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
inputMans,
mdm,
&deets,
&writeStats,
path.ExchangeService,
fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
compareDeetEntries(t, expectedEntries, deets.Details().Entries)

View File

@ -8,11 +8,13 @@ import (
// ReadWrites tracks the total count of reads and writes. ItemsRead
// and ItemsWritten counts are assumed to be successful reads.
type ReadWrites struct {
BytesRead int64 `json:"bytesRead,omitempty"`
BytesUploaded int64 `json:"bytesUploaded,omitempty"`
ItemsRead int `json:"itemsRead,omitempty"`
ItemsWritten int `json:"itemsWritten,omitempty"`
ResourceOwners int `json:"resourceOwners,omitempty"`
BytesRead int64 `json:"bytesRead,omitempty"`
BytesUploaded int64 `json:"bytesUploaded,omitempty"`
ItemsRead int `json:"itemsRead,omitempty"`
NonMetaBytesUploaded int64 `json:"nonMetaBytesUploaded,omitempty"`
NonMetaItemsWritten int `json:"nonMetaItemsWritten,omitempty"`
ItemsWritten int `json:"itemsWritten,omitempty"`
ResourceOwners int `json:"resourceOwners,omitempty"`
}
// StartAndEndTime tracks a paired starting time and ending time.

View File

@ -284,12 +284,12 @@ func (b Backup) toStats() backupStats {
return backupStats{
ID: string(b.ID),
BytesRead: b.BytesRead,
BytesUploaded: b.BytesUploaded,
BytesUploaded: b.NonMetaBytesUploaded,
EndedAt: b.CompletedAt,
ErrorCount: b.ErrorCount,
ItemsRead: b.ItemsRead,
ItemsSkipped: b.TotalSkippedItems,
ItemsWritten: b.ItemsWritten,
ItemsWritten: b.NonMetaItemsWritten,
StartedAt: b.StartedAt,
}
}

View File

@ -49,10 +49,12 @@ func stubBackup(t time.Time, ownerID, ownerName string) backup.Backup {
ErrorCount: 2,
Failure: "read, write",
ReadWrites: stats.ReadWrites{
BytesRead: 301,
BytesUploaded: 301,
ItemsRead: 1,
ItemsWritten: 1,
BytesRead: 301,
BytesUploaded: 301,
NonMetaBytesUploaded: 301,
ItemsRead: 1,
NonMetaItemsWritten: 1,
ItemsWritten: 1,
},
StartAndEndTime: stats.StartAndEndTime{
StartedAt: t,
@ -248,7 +250,7 @@ func (suite *BackupUnitSuite) TestBackup_MinimumPrintable() {
assert.Equal(t, now, result.Stats.StartedAt, "started at")
assert.Equal(t, b.Status, result.Status, "status")
assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size")
assert.Equal(t, b.BytesUploaded, result.Stats.BytesUploaded, "stored size")
assert.Equal(t, b.NonMetaBytesUploaded, result.Stats.BytesUploaded, "stored size")
assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner")
}

View File

@ -240,12 +240,27 @@ func (dm DetailsModel) FilterMetaFiles() DetailsModel {
return d2
}
// SumNonMetaFileSizes returns the total size of items excluding all the
// .meta files from the items.
func (dm DetailsModel) SumNonMetaFileSizes() int64 {
var size int64
// Items will provide only files and filter out folders
for _, ent := range dm.FilterMetaFiles().Items() {
size += ent.size()
}
return size
}
// Check if a file is a metadata file. These are used to store
// additional data like permissions (in case of Drive items) and are
// not to be treated as regular files.
func (de Entry) isMetaFile() bool {
// sharepoint types not needed, since sharepoint permissions were
// added after IsMeta was deprecated.
// Earlier onedrive backups used to store both metafiles and files in details.
// So filter out just the onedrive items and check for metafiles
return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta
}