update files count and total size on CLI for backup (#3480)
<!-- PR description--> Currently the backup output show, - total backup size - size of files + size of metafiles - no of files - no of files + no of metafile With this change the output of backup command shows size of only files and count include only file count. NOTE: all current three services results will be impacted here. #### Does this PR need a docs update or release note? - [ ] ✅ Yes, it's included - [ ] 🕐 Yes, but in a later PR - [x] ⛔ No #### Type of change <!--- Please check the type of change your PR introduces: ---> - [ ] 🐛 Bugfix #### Issue(s) <!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. --> * https://github.com/alcionai/corso/issues/3304 #### Test Plan <!-- How will this be tested prior to merging.--> - [x] 💪 Manual - [ ] ⚡ Unit test - [x] 💚 E2E
This commit is contained in:
parent
8d68bacfb7
commit
06b04c6eff
@ -52,8 +52,10 @@ type BackupStats struct {
|
||||
|
||||
TotalHashedBytes int64
|
||||
TotalUploadedBytes int64
|
||||
TotalNonMetaUploadedBytes int64
|
||||
|
||||
TotalFileCount int
|
||||
TotalNonMetaFileCount int
|
||||
CachedFileCount int
|
||||
UncachedFileCount int
|
||||
TotalDirectoryCount int
|
||||
|
||||
@ -347,6 +347,8 @@ func (op *BackupOperation) do(
|
||||
mans,
|
||||
toMerge,
|
||||
deets,
|
||||
writeStats,
|
||||
op.Selectors.PathService(),
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "merging details")
|
||||
@ -704,8 +706,17 @@ func mergeDetails(
|
||||
mans []*kopia.ManifestEntry,
|
||||
dataFromBackup kopia.DetailsMergeInfoer,
|
||||
deets *details.Builder,
|
||||
writeStats *kopia.BackupStats,
|
||||
serviceType path.ServiceType,
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
detailsModel := deets.Details().DetailsModel
|
||||
|
||||
// getting the values in writeStats before anything else so that we don't get a return from
|
||||
// conditions like no backup data.
|
||||
writeStats.TotalNonMetaFileCount = len(detailsModel.FilterMetaFiles().Items())
|
||||
writeStats.TotalNonMetaUploadedBytes = detailsModel.SumNonMetaFileSizes()
|
||||
|
||||
// Don't bother loading any of the base details if there's nothing we need to merge.
|
||||
if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
|
||||
return nil
|
||||
@ -841,6 +852,8 @@ func (op *BackupOperation) persistResults(
|
||||
op.Results.BytesRead = opStats.k.TotalHashedBytes
|
||||
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
|
||||
op.Results.ItemsWritten = opStats.k.TotalFileCount
|
||||
op.Results.NonMetaBytesUploaded = opStats.k.TotalNonMetaUploadedBytes
|
||||
op.Results.NonMetaItemsWritten = opStats.k.TotalNonMetaFileCount
|
||||
op.Results.ResourceOwners = opStats.resourceCount
|
||||
|
||||
if opStats.gc == nil {
|
||||
|
||||
@ -961,6 +961,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten int
|
||||
nonDeltaItemsRead int
|
||||
nonDeltaItemsWritten int
|
||||
nonMetaItemsWritten int
|
||||
}{
|
||||
{
|
||||
name: "clean, no changes",
|
||||
@ -969,6 +970,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten: 0,
|
||||
nonDeltaItemsRead: 8,
|
||||
nonDeltaItemsWritten: 0, // unchanged items are not counted towards write
|
||||
nonMetaItemsWritten: 4,
|
||||
},
|
||||
{
|
||||
name: "move an email folder to a subfolder",
|
||||
@ -992,6 +994,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten: 2,
|
||||
nonDeltaItemsRead: 8,
|
||||
nonDeltaItemsWritten: 2,
|
||||
nonMetaItemsWritten: 6,
|
||||
},
|
||||
{
|
||||
name: "delete a folder",
|
||||
@ -1018,6 +1021,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten: 0, // deletions are not counted as "writes"
|
||||
nonDeltaItemsRead: 4,
|
||||
nonDeltaItemsWritten: 0,
|
||||
nonMetaItemsWritten: 4,
|
||||
},
|
||||
{
|
||||
name: "add a new folder",
|
||||
@ -1070,6 +1074,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten: 4,
|
||||
nonDeltaItemsRead: 8,
|
||||
nonDeltaItemsWritten: 4,
|
||||
nonMetaItemsWritten: 8,
|
||||
},
|
||||
{
|
||||
name: "rename a folder",
|
||||
@ -1125,6 +1130,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten: 0, // two items per category
|
||||
nonDeltaItemsRead: 8,
|
||||
nonDeltaItemsWritten: 0,
|
||||
nonMetaItemsWritten: 4,
|
||||
},
|
||||
{
|
||||
name: "add a new item",
|
||||
@ -1178,6 +1184,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten: 2,
|
||||
nonDeltaItemsRead: 10,
|
||||
nonDeltaItemsWritten: 2,
|
||||
nonMetaItemsWritten: 6,
|
||||
},
|
||||
{
|
||||
name: "delete an existing item",
|
||||
@ -1231,6 +1238,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
deltaItemsWritten: 0, // deletes are not counted as "writes"
|
||||
nonDeltaItemsRead: 8,
|
||||
nonDeltaItemsWritten: 0,
|
||||
nonMetaItemsWritten: 4,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1263,7 +1271,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
|
||||
assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
|
||||
}
|
||||
|
||||
assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
|
||||
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
||||
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
|
||||
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
|
||||
@ -1530,6 +1538,7 @@ func runDriveIncrementalTest(
|
||||
updateFiles func(t *testing.T)
|
||||
itemsRead int
|
||||
itemsWritten int
|
||||
nonMetaItemsWritten int
|
||||
}{
|
||||
{
|
||||
name: "clean incremental, no changes",
|
||||
@ -1558,6 +1567,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 1, // .data file for newitem
|
||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||
nonMetaItemsWritten: 1, // .data file for newitem
|
||||
},
|
||||
{
|
||||
name: "add permission to new file",
|
||||
@ -1580,6 +1590,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 1, // .data file for newitem
|
||||
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
||||
nonMetaItemsWritten: 1, // the file for which permission was updated
|
||||
},
|
||||
{
|
||||
name: "remove permission from new file",
|
||||
@ -1601,6 +1612,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 1, // .data file for newitem
|
||||
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
|
||||
nonMetaItemsWritten: 1, //.data file for newitem
|
||||
},
|
||||
{
|
||||
name: "add permission to container",
|
||||
@ -1623,6 +1635,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 0,
|
||||
itemsWritten: 1, // .dirmeta for collection
|
||||
nonMetaItemsWritten: 0, // no files updated as update on container
|
||||
},
|
||||
{
|
||||
name: "remove permission from container",
|
||||
@ -1645,6 +1658,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 0,
|
||||
itemsWritten: 1, // .dirmeta for collection
|
||||
nonMetaItemsWritten: 0, // no files updated
|
||||
},
|
||||
{
|
||||
name: "update contents of a file",
|
||||
@ -1660,6 +1674,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 1, // .data file for newitem
|
||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||
nonMetaItemsWritten: 1, // .data file for newitem
|
||||
},
|
||||
{
|
||||
name: "rename a file",
|
||||
@ -1683,6 +1698,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 1, // .data file for newitem
|
||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||
nonMetaItemsWritten: 1, // .data file for newitem
|
||||
// no expectedDeets: neither file id nor location changed
|
||||
},
|
||||
{
|
||||
@ -1712,6 +1728,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 1, // .data file for newitem
|
||||
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
|
||||
nonMetaItemsWritten: 1, // .data file for new item
|
||||
},
|
||||
{
|
||||
name: "delete file",
|
||||
@ -1727,6 +1744,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 0,
|
||||
itemsWritten: 0,
|
||||
nonMetaItemsWritten: 0,
|
||||
},
|
||||
{
|
||||
name: "move a folder to a subfolder",
|
||||
@ -1755,6 +1773,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 0,
|
||||
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
||||
nonMetaItemsWritten: 0,
|
||||
},
|
||||
{
|
||||
name: "rename a folder",
|
||||
@ -1785,6 +1804,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 0,
|
||||
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
|
||||
nonMetaItemsWritten: 0,
|
||||
},
|
||||
{
|
||||
name: "delete a folder",
|
||||
@ -1801,6 +1821,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 0,
|
||||
itemsWritten: 0,
|
||||
nonMetaItemsWritten: 0,
|
||||
},
|
||||
{
|
||||
name: "add a new folder",
|
||||
@ -1833,6 +1854,7 @@ func runDriveIncrementalTest(
|
||||
},
|
||||
itemsRead: 2, // 2 .data for 2 files
|
||||
itemsWritten: 6, // read items + 2 directory meta
|
||||
nonMetaItemsWritten: 2, // 2 .data for 2 files
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
@ -1863,6 +1885,7 @@ func runDriveIncrementalTest(
|
||||
// +2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||
var (
|
||||
expectWrites = test.itemsWritten + 2
|
||||
expectNonMetaWrites = test.nonMetaItemsWritten
|
||||
expectReads = test.itemsRead + 2
|
||||
assertReadWrite = assert.Equal
|
||||
)
|
||||
@ -1876,6 +1899,7 @@ func runDriveIncrementalTest(
|
||||
}
|
||||
|
||||
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
|
||||
assertReadWrite(t, expectNonMetaWrites, incBO.Results.NonMetaItemsWritten, "incremental non-meta items written")
|
||||
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
|
||||
|
||||
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
||||
@ -1976,6 +2000,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() {
|
||||
|
||||
// 2 on read/writes to account for metadata: 1 delta and 1 path.
|
||||
assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written")
|
||||
assert.LessOrEqual(t, 1, incBO.Results.NonMetaItemsWritten, "non meta items written")
|
||||
assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read")
|
||||
assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
|
||||
assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors")
|
||||
|
||||
@ -1198,6 +1198,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
mds := ssmock.Streamer{Deets: test.populatedDetails}
|
||||
w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}}
|
||||
deets := details.Builder{}
|
||||
writeStats := kopia.BackupStats{}
|
||||
|
||||
err := mergeDetails(
|
||||
ctx,
|
||||
@ -1206,6 +1207,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
test.inputMans,
|
||||
test.mdm,
|
||||
&deets,
|
||||
&writeStats,
|
||||
path.OneDriveService,
|
||||
fault.New(true))
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
|
||||
@ -1310,6 +1313,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
mds = ssmock.Streamer{Deets: populatedDetails}
|
||||
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
|
||||
deets = details.Builder{}
|
||||
writeStats = kopia.BackupStats{}
|
||||
)
|
||||
|
||||
err := mergeDetails(
|
||||
@ -1319,6 +1323,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
|
||||
inputMans,
|
||||
mdm,
|
||||
&deets,
|
||||
&writeStats,
|
||||
path.ExchangeService,
|
||||
fault.New(true))
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
compareDeetEntries(t, expectedEntries, deets.Details().Entries)
|
||||
|
||||
@ -11,6 +11,8 @@ type ReadWrites struct {
|
||||
BytesRead int64 `json:"bytesRead,omitempty"`
|
||||
BytesUploaded int64 `json:"bytesUploaded,omitempty"`
|
||||
ItemsRead int `json:"itemsRead,omitempty"`
|
||||
NonMetaBytesUploaded int64 `json:"nonMetaBytesUploaded,omitempty"`
|
||||
NonMetaItemsWritten int `json:"nonMetaItemsWritten,omitempty"`
|
||||
ItemsWritten int `json:"itemsWritten,omitempty"`
|
||||
ResourceOwners int `json:"resourceOwners,omitempty"`
|
||||
}
|
||||
|
||||
@ -284,12 +284,12 @@ func (b Backup) toStats() backupStats {
|
||||
return backupStats{
|
||||
ID: string(b.ID),
|
||||
BytesRead: b.BytesRead,
|
||||
BytesUploaded: b.BytesUploaded,
|
||||
BytesUploaded: b.NonMetaBytesUploaded,
|
||||
EndedAt: b.CompletedAt,
|
||||
ErrorCount: b.ErrorCount,
|
||||
ItemsRead: b.ItemsRead,
|
||||
ItemsSkipped: b.TotalSkippedItems,
|
||||
ItemsWritten: b.ItemsWritten,
|
||||
ItemsWritten: b.NonMetaItemsWritten,
|
||||
StartedAt: b.StartedAt,
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,7 +51,9 @@ func stubBackup(t time.Time, ownerID, ownerName string) backup.Backup {
|
||||
ReadWrites: stats.ReadWrites{
|
||||
BytesRead: 301,
|
||||
BytesUploaded: 301,
|
||||
NonMetaBytesUploaded: 301,
|
||||
ItemsRead: 1,
|
||||
NonMetaItemsWritten: 1,
|
||||
ItemsWritten: 1,
|
||||
},
|
||||
StartAndEndTime: stats.StartAndEndTime{
|
||||
@ -248,7 +250,7 @@ func (suite *BackupUnitSuite) TestBackup_MinimumPrintable() {
|
||||
assert.Equal(t, now, result.Stats.StartedAt, "started at")
|
||||
assert.Equal(t, b.Status, result.Status, "status")
|
||||
assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size")
|
||||
assert.Equal(t, b.BytesUploaded, result.Stats.BytesUploaded, "stored size")
|
||||
assert.Equal(t, b.NonMetaBytesUploaded, result.Stats.BytesUploaded, "stored size")
|
||||
assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner")
|
||||
}
|
||||
|
||||
|
||||
@ -240,12 +240,27 @@ func (dm DetailsModel) FilterMetaFiles() DetailsModel {
|
||||
return d2
|
||||
}
|
||||
|
||||
// SumNonMetaFileSizes returns the total size of items excluding all the
|
||||
// .meta files from the items.
|
||||
func (dm DetailsModel) SumNonMetaFileSizes() int64 {
|
||||
var size int64
|
||||
|
||||
// Items will provide only files and filter out folders
|
||||
for _, ent := range dm.FilterMetaFiles().Items() {
|
||||
size += ent.size()
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// Check if a file is a metadata file. These are used to store
|
||||
// additional data like permissions (in case of Drive items) and are
|
||||
// not to be treated as regular files.
|
||||
func (de Entry) isMetaFile() bool {
|
||||
// sharepoint types not needed, since sharepoint permissions were
|
||||
// added after IsMeta was deprecated.
|
||||
// Earlier onedrive backups used to store both metafiles and files in details.
|
||||
// So filter out just the onedrive items and check for metafiles
|
||||
return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user