Compare commits
7 Commits
main
...
2675-bench
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
35e9a37187 | ||
|
|
1fe7a624c4 | ||
|
|
732eda22d4 | ||
|
|
a931f40896 | ||
|
|
3cd6c803af | ||
|
|
8dc4b0d4cd | ||
|
|
916f649e97 |
@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
### Fixed
|
||||
- Corso-generated .meta files and permissions no longer appear in the backup details.
|
||||
- repo.BackupDetails from the SDK now filter out meta and dirmeta files, and empty folders, by standard.
|
||||
|
||||
### Known Issues
|
||||
- Folders and Calendars containing zero items or subfolders are not included in the backup.
|
||||
|
||||
@ -136,6 +136,8 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
|
||||
c.Use = c.Use + " " + exchangeServiceCommandDetailsUseSuffix
|
||||
c.Example = exchangeServiceCommandDetailsExamples
|
||||
|
||||
options.AddSkipReduceFlag(c)
|
||||
|
||||
// Flags addition ordering should follow the order we want them to appear in help and docs:
|
||||
// More generic (ex: --user) and more frequently used flags take precedence.
|
||||
fs.StringVar(&backupID,
|
||||
@ -212,7 +214,6 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
|
||||
&contactFolder,
|
||||
utils.ContactFolderFN, nil,
|
||||
"Select backup details for contacts within a folder; accepts '"+utils.Wildcard+"' to select all contact folders.")
|
||||
|
||||
fs.StringVar(
|
||||
&contactName,
|
||||
utils.ContactNameFN, "",
|
||||
@ -434,7 +435,8 @@ func exchangeDetailsCmd() *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
// lists the history of backup operations
|
||||
// lists all items in the backup, running the results first through
|
||||
// selector reduction as a filtering step.
|
||||
func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
if utils.HasNoFlagsAndShownHelp(cmd) {
|
||||
return nil
|
||||
@ -468,14 +470,16 @@ func detailsExchangeCmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
r, err := repository.Connect(ctx, acct, s, options.Control())
|
||||
ctrlOpts := options.Control()
|
||||
|
||||
r, err := repository.Connect(ctx, acct, s, ctrlOpts)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrapf(err, "Failed to connect to the %s repository", s.Provider))
|
||||
}
|
||||
|
||||
defer utils.CloseRepo(ctx, r)
|
||||
|
||||
ds, err := runDetailsExchangeCmd(ctx, r, backupID, opts)
|
||||
ds, err := runDetailsExchangeCmd(ctx, r, backupID, opts, ctrlOpts.SkipReduce)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -498,6 +502,7 @@ func runDetailsExchangeCmd(
|
||||
r repository.BackupGetter,
|
||||
backupID string,
|
||||
opts utils.ExchangeOpts,
|
||||
skipReduce bool,
|
||||
) (*details.Details, error) {
|
||||
if err := utils.ValidateExchangeRestoreFlags(backupID, opts); err != nil {
|
||||
return nil, err
|
||||
@ -513,10 +518,13 @@ func runDetailsExchangeCmd(
|
||||
return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||
if !skipReduce {
|
||||
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
|
||||
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
|
||||
d = sel.Reduce(ctx, d, errs)
|
||||
}
|
||||
|
||||
return sel.Reduce(ctx, d, errs), nil
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -231,7 +231,8 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectors() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts)
|
||||
test.Opts,
|
||||
false)
|
||||
assert.NoError(t, err, "failure")
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
})
|
||||
@ -250,7 +251,8 @@ func (suite *ExchangeSuite) TestExchangeBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts)
|
||||
test.Opts,
|
||||
false)
|
||||
assert.Error(t, err, "failure")
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
|
||||
@ -103,6 +103,8 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
|
||||
c.Use = c.Use + " " + oneDriveServiceCommandDetailsUseSuffix
|
||||
c.Example = oneDriveServiceCommandDetailsExamples
|
||||
|
||||
options.AddSkipReduceFlag(c)
|
||||
|
||||
fs.StringVar(&backupID,
|
||||
utils.BackupFN, "",
|
||||
"ID of the backup to explore. (required)")
|
||||
@ -348,7 +350,9 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
r, err := repository.Connect(ctx, acct, s, options.Control())
|
||||
ctrlOpts := options.Control()
|
||||
|
||||
r, err := repository.Connect(ctx, acct, s, ctrlOpts)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrapf(err, "Failed to connect to the %s repository", s.Provider))
|
||||
}
|
||||
@ -367,7 +371,7 @@ func detailsOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
Populated: utils.GetPopulatedFlags(cmd),
|
||||
}
|
||||
|
||||
ds, err := runDetailsOneDriveCmd(ctx, r, backupID, opts)
|
||||
ds, err := runDetailsOneDriveCmd(ctx, r, backupID, opts, ctrlOpts.SkipReduce)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -390,6 +394,7 @@ func runDetailsOneDriveCmd(
|
||||
r repository.BackupGetter,
|
||||
backupID string,
|
||||
opts utils.OneDriveOpts,
|
||||
skipReduce bool,
|
||||
) (*details.Details, error) {
|
||||
if err := utils.ValidateOneDriveRestoreFlags(backupID, opts); err != nil {
|
||||
return nil, err
|
||||
@ -405,10 +410,13 @@ func runDetailsOneDriveCmd(
|
||||
return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||
if !skipReduce {
|
||||
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
|
||||
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
|
||||
d = sel.Reduce(ctx, d, errs)
|
||||
}
|
||||
|
||||
return sel.Reduce(ctx, d, errs), nil
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// `corso backup delete onedrive [<flag>...]`
|
||||
|
||||
@ -102,7 +102,8 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectors() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts)
|
||||
test.Opts,
|
||||
false)
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
})
|
||||
@ -121,7 +122,8 @@ func (suite *OneDriveSuite) TestOneDriveBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts)
|
||||
test.Opts,
|
||||
false)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
|
||||
@ -114,6 +114,8 @@ func addSharePointCommands(cmd *cobra.Command) *cobra.Command {
|
||||
c.Use = c.Use + " " + sharePointServiceCommandDetailsUseSuffix
|
||||
c.Example = sharePointServiceCommandDetailsExamples
|
||||
|
||||
options.AddSkipReduceFlag(c)
|
||||
|
||||
fs.StringVar(&backupID,
|
||||
utils.BackupFN, "",
|
||||
"ID of the backup to retrieve.")
|
||||
@ -479,7 +481,9 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
|
||||
r, err := repository.Connect(ctx, acct, s, options.Control())
|
||||
ctrlOpts := options.Control()
|
||||
|
||||
r, err := repository.Connect(ctx, acct, s, ctrlOpts)
|
||||
if err != nil {
|
||||
return Only(ctx, errors.Wrapf(err, "Failed to connect to the %s repository", s.Provider))
|
||||
}
|
||||
@ -495,7 +499,7 @@ func detailsSharePointCmd(cmd *cobra.Command, args []string) error {
|
||||
Populated: utils.GetPopulatedFlags(cmd),
|
||||
}
|
||||
|
||||
ds, err := runDetailsSharePointCmd(ctx, r, backupID, opts)
|
||||
ds, err := runDetailsSharePointCmd(ctx, r, backupID, opts, ctrlOpts.SkipReduce)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -518,6 +522,7 @@ func runDetailsSharePointCmd(
|
||||
r repository.BackupGetter,
|
||||
backupID string,
|
||||
opts utils.SharePointOpts,
|
||||
skipReduce bool,
|
||||
) (*details.Details, error) {
|
||||
if err := utils.ValidateSharePointRestoreFlags(backupID, opts); err != nil {
|
||||
return nil, err
|
||||
@ -533,8 +538,11 @@ func runDetailsSharePointCmd(
|
||||
return nil, errors.Wrap(errs.Failure(), "Failed to get backup details in the repository")
|
||||
}
|
||||
|
||||
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
|
||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||
if !skipReduce {
|
||||
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
|
||||
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
|
||||
d = sel.Reduce(ctx, d, errs)
|
||||
}
|
||||
|
||||
return sel.Reduce(ctx, d, errs), nil
|
||||
return d, nil
|
||||
}
|
||||
|
||||
@ -219,7 +219,8 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectors() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts)
|
||||
test.Opts,
|
||||
false)
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, test.Expected, output.Entries)
|
||||
})
|
||||
@ -238,7 +239,8 @@ func (suite *SharePointSuite) TestSharePointBackupDetailsSelectorsBadFormats() {
|
||||
ctx,
|
||||
test.BackupGetter,
|
||||
"backup-ID",
|
||||
test.Opts)
|
||||
test.Opts,
|
||||
false)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
|
||||
@ -14,6 +14,7 @@ func Control() control.Options {
|
||||
opt.FailFast = fastFail
|
||||
opt.DisableMetrics = noStats
|
||||
opt.RestorePermissions = restorePermissions
|
||||
opt.SkipReduce = skipReduce
|
||||
opt.ToggleFeatures.DisableIncrementals = disableIncrementals
|
||||
opt.ToggleFeatures.EnablePermissionsBackup = enablePermissionsBackup
|
||||
|
||||
@ -28,6 +29,7 @@ var (
|
||||
fastFail bool
|
||||
noStats bool
|
||||
restorePermissions bool
|
||||
skipReduce bool
|
||||
)
|
||||
|
||||
// AddOperationFlags adds command-local operation flags
|
||||
@ -52,6 +54,14 @@ func AddRestorePermissionsFlag(cmd *cobra.Command) {
|
||||
cobra.CheckErr(fs.MarkHidden("restore-permissions"))
|
||||
}
|
||||
|
||||
// AddSkipReduceFlag adds a hidden flag that allows callers to skip the selector
|
||||
// reduction step. Currently only intended for details commands, not restore.
|
||||
func AddSkipReduceFlag(cmd *cobra.Command) {
|
||||
fs := cmd.Flags()
|
||||
fs.BoolVar(&skipReduce, "skip-reduce", false, "Skip the selector reduce filtering")
|
||||
cobra.CheckErr(fs.MarkHidden("skip-reduce"))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Feature Flags
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -43,3 +43,10 @@ func OrNow(t *time.Time) time.Time {
|
||||
|
||||
return *t
|
||||
}
|
||||
|
||||
// To makes it easy to get a pointer to ad-hoc primitives
|
||||
// without needing to declare additional variables.
|
||||
// ex: ptr.To("a string")
|
||||
func To[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
@ -272,7 +272,7 @@ func (c *Collections) Get(
|
||||
|
||||
retry := c.source == OneDriveSource
|
||||
|
||||
drives, err := drives(ctx, pager, retry)
|
||||
drives, err := Drives(ctx, pager, retry)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ func PagerForSource(
|
||||
}
|
||||
}
|
||||
|
||||
func drives(
|
||||
func Drives(
|
||||
ctx context.Context,
|
||||
pager drivePager,
|
||||
retry bool,
|
||||
@ -362,7 +362,7 @@ func GetAllFolders(
|
||||
prefix string,
|
||||
errs *fault.Bus,
|
||||
) ([]*Displayable, error) {
|
||||
drives, err := drives(ctx, pager, true)
|
||||
drives, err := Drives(ctx, pager, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting OneDrive folders")
|
||||
}
|
||||
|
||||
@ -313,7 +313,7 @@ func (suite *OneDriveUnitSuite) TestDrives() {
|
||||
toReturn: test.pagerResults,
|
||||
}
|
||||
|
||||
drives, err := drives(ctx, pager, test.retry)
|
||||
drives, err := Drives(ctx, pager, test.retry)
|
||||
test.expectedErr(t, err)
|
||||
|
||||
assert.ElementsMatch(t, test.expectedResults, drives)
|
||||
@ -355,7 +355,7 @@ func (suite *OneDriveSuite) TestCreateGetDeleteFolder() {
|
||||
pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
drives, err := drives(ctx, pager, true)
|
||||
drives, err := Drives(ctx, pager, true)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, drives)
|
||||
|
||||
|
||||
@ -46,7 +46,7 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
||||
pager, err := PagerForSource(OneDriveSource, suite.service, suite.user, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
odDrives, err := drives(ctx, pager, true)
|
||||
odDrives, err := Drives(ctx, pager, true)
|
||||
require.NoError(t, err)
|
||||
// Test Requirement 1: Need a drive
|
||||
require.Greaterf(t, len(odDrives), 0, "user %s does not have a drive", suite.user)
|
||||
|
||||
@ -305,6 +305,158 @@ func (d *Details) addFolder(folder folderEntry) {
|
||||
})
|
||||
}
|
||||
|
||||
// FilterMetaFiles returns a new Details struct with a copy of the DetailsModel
|
||||
// that has had all .meta and .dirmeta files stripped out.
|
||||
func (d *Details) FilterMetaFiles() *Details {
|
||||
return &Details{
|
||||
DetailsModel: d.DetailsModel.FilterMetaFiles(),
|
||||
}
|
||||
}
|
||||
|
||||
// FilterEmptyContainers returns a new Details struct all empty (ie: containing no
|
||||
// items) stripped out. If meta files have not been filtered out already, they
|
||||
// will continue to count as a "populated" container.
|
||||
func (d *Details) FilterEmptyContainers() *Details {
|
||||
type entCount struct {
|
||||
ent DetailsEntry
|
||||
itemCount int
|
||||
}
|
||||
|
||||
var (
|
||||
// shortRef: entCount
|
||||
srec = map[string]entCount{}
|
||||
items = []DetailsEntry{}
|
||||
)
|
||||
|
||||
// split the entries into items and folders.
|
||||
// folders are stored in a map by their shortRef for lookup.
|
||||
for _, ent := range d.Entries {
|
||||
if ent.Folder == nil {
|
||||
items = append(items, ent)
|
||||
} else {
|
||||
srec[ent.ShortRef] = entCount{ent, 0}
|
||||
}
|
||||
}
|
||||
|
||||
// for every item, add a count to the owning folder.
|
||||
// this assumes item parentRef == folder shortRef.
|
||||
for _, ent := range items {
|
||||
if len(ent.ParentRef) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ec := srec[ent.ParentRef]
|
||||
ec.itemCount++
|
||||
srec[ent.ParentRef] = ec
|
||||
|
||||
// to maintain a hierarchical count so that we don't
|
||||
// slice parent folders, this loop walks the tree upward
|
||||
// by parent ref, adding one count to each parent up
|
||||
// to the root.
|
||||
parentRef := ec.ent.ParentRef
|
||||
parentCount := 0
|
||||
|
||||
for len(parentRef) > 0 && parentCount == 0 {
|
||||
ec := srec[parentRef]
|
||||
|
||||
// minor optimization: if the parentCount is already
|
||||
// >zero, then all of its parents are guaranteed >zero.
|
||||
parentCount = ec.itemCount
|
||||
|
||||
ec.itemCount++
|
||||
srec[parentRef] = ec
|
||||
|
||||
parentRef = ec.ent.ParentRef
|
||||
}
|
||||
}
|
||||
|
||||
// walk the map of folder entries; every folder with one or more
|
||||
// items gets added back to the items slice to be returned.
|
||||
for _, ec := range srec {
|
||||
if ec.itemCount > 0 {
|
||||
items = append(items, ec.ent)
|
||||
}
|
||||
}
|
||||
|
||||
return &Details{
|
||||
DetailsModel: DetailsModel{items},
|
||||
}
|
||||
}
|
||||
|
||||
// FilterEmptyContainers returns a new Details struct all empty (ie: containing no
|
||||
// items) stripped out. If meta files have not been filtered out already, they
|
||||
// will continue to count as a "populated" container.
|
||||
func (d *Details) FilterBoth() *Details {
|
||||
type entCount struct {
|
||||
ent DetailsEntry
|
||||
itemCount int
|
||||
}
|
||||
|
||||
var (
|
||||
// shortRef: entCount
|
||||
srec = map[string]entCount{}
|
||||
items = []DetailsEntry{}
|
||||
)
|
||||
|
||||
// split the entries into items and folders.
|
||||
// folders are stored in a map by their shortRef for lookup.
|
||||
for _, ent := range d.Entries {
|
||||
if ent.isMetaFile() {
|
||||
continue
|
||||
}
|
||||
|
||||
if ent.Folder == nil {
|
||||
items = append(items, ent)
|
||||
} else {
|
||||
srec[ent.ShortRef] = entCount{ent, 0}
|
||||
}
|
||||
}
|
||||
|
||||
// for every item, add a count to the owning folder.
|
||||
// this assumes item parentRef == folder shortRef.
|
||||
for _, ent := range items {
|
||||
if len(ent.ParentRef) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ec := srec[ent.ParentRef]
|
||||
ec.itemCount++
|
||||
srec[ent.ParentRef] = ec
|
||||
|
||||
// to maintain a hierarchical count so that we don't
|
||||
// slice parent folders, this loop walks the tree upward
|
||||
// by parent ref, adding one count to each parent up
|
||||
// to the root.
|
||||
parentRef := ec.ent.ParentRef
|
||||
parentCount := 0
|
||||
|
||||
for len(parentRef) > 0 && parentCount == 0 {
|
||||
ec := srec[parentRef]
|
||||
|
||||
// minor optimization: if the parentCount is already
|
||||
// >zero, then all of its parents are guaranteed >zero.
|
||||
parentCount = ec.itemCount
|
||||
|
||||
ec.itemCount++
|
||||
srec[parentRef] = ec
|
||||
|
||||
parentRef = ec.ent.ParentRef
|
||||
}
|
||||
}
|
||||
|
||||
// walk the map of folder entries; every folder with one or more
|
||||
// items gets added back to the items slice to be returned.
|
||||
for _, ec := range srec {
|
||||
if ec.itemCount > 0 {
|
||||
items = append(items, ec.ent)
|
||||
}
|
||||
}
|
||||
|
||||
return &Details{
|
||||
DetailsModel: DetailsModel{items},
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
// Entry
|
||||
// --------------------------------------------------------------------------------
|
||||
|
||||
157
src/pkg/backup/details/details_benchmark_test.go
Normal file
157
src/pkg/backup/details/details_benchmark_test.go
Normal file
@ -0,0 +1,157 @@
|
||||
package details
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
var (
|
||||
details512k = detailsBuilder(512000)
|
||||
details256k = detailsBuilder(256000)
|
||||
details102k = detailsBuilder(102000)
|
||||
)
|
||||
|
||||
func detailsBuilder(fileCount int) *Details {
|
||||
alpha := make([]string, 26)
|
||||
|
||||
for i := 0; i < 26; i++ {
|
||||
alpha[i] = string(rune(97 + i))
|
||||
}
|
||||
|
||||
alphaPath := basePath.Append(alpha...)
|
||||
fes := FolderEntriesForPath(alphaPath, alphaPath)
|
||||
|
||||
// populate items
|
||||
for i := 0; i < fileCount; i++ {
|
||||
var (
|
||||
ri = rand.Int31n(26)
|
||||
itemName = "item_" + uuid.NewString()
|
||||
itemP = basePath.Append(alpha[:ri]...).Append(itemName)
|
||||
)
|
||||
|
||||
info := ItemInfo{
|
||||
OneDrive: &OneDriveInfo{
|
||||
ItemName: itemName,
|
||||
},
|
||||
}
|
||||
|
||||
ent := folderEntry{
|
||||
RepoRef: itemP.String(),
|
||||
ShortRef: itemP.ShortRef(),
|
||||
ParentRef: itemP.Dir().String(),
|
||||
Info: info,
|
||||
}
|
||||
|
||||
fes = append(fes, ent)
|
||||
}
|
||||
|
||||
// populate 13 empty folders with dirmeta items
|
||||
for i := 0; i < 13; i++ {
|
||||
var (
|
||||
ri = rand.Int31n(26)
|
||||
fldName = "empty_" + uuid.NewString()
|
||||
fldP = basePath.Append(alpha[:ri]...).Append(fldName)
|
||||
itemName = fldName + ".dirmeta"
|
||||
itemP = fldP.Append(itemName)
|
||||
)
|
||||
|
||||
// folder
|
||||
info := ItemInfo{
|
||||
Folder: &FolderInfo{
|
||||
DisplayName: fldName,
|
||||
},
|
||||
}
|
||||
|
||||
ent := folderEntry{
|
||||
RepoRef: fldP.String(),
|
||||
ShortRef: fldP.ShortRef(),
|
||||
ParentRef: fldP.Dir().String(),
|
||||
Info: info,
|
||||
}
|
||||
|
||||
fes = append(fes, ent)
|
||||
|
||||
// dirmeta
|
||||
info = ItemInfo{
|
||||
OneDrive: &OneDriveInfo{
|
||||
ItemName: itemName,
|
||||
},
|
||||
}
|
||||
|
||||
ent = folderEntry{
|
||||
RepoRef: itemP.String(),
|
||||
ShortRef: itemP.ShortRef(),
|
||||
ParentRef: itemP.Dir().String(),
|
||||
Info: info,
|
||||
}
|
||||
|
||||
fes = append(fes, ent)
|
||||
}
|
||||
|
||||
// shuffle the array, to avoid miscalculations due to ordering
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
rand.Shuffle(len(fes), func(i, j int) {
|
||||
fes[i], fes[j] = fes[j], fes[i]
|
||||
})
|
||||
|
||||
// return it as a details struct
|
||||
return toDetails(fes)
|
||||
}
|
||||
|
||||
var result *Details
|
||||
|
||||
func BenchmarkDetailsFiltering_512_both(b *testing.B) { benchmarkBoth(details512k, b) }
|
||||
func BenchmarkDetailsFiltering_256_both(b *testing.B) { benchmarkBoth(details256k, b) }
|
||||
func BenchmarkDetailsFiltering_102_both(b *testing.B) { benchmarkBoth(details102k, b) }
|
||||
func BenchmarkDetailsFiltering_512_each(b *testing.B) { benchmarkEach(details512k, b) }
|
||||
func BenchmarkDetailsFiltering_256_each(b *testing.B) { benchmarkEach(details256k, b) }
|
||||
func BenchmarkDetailsFiltering_102_each(b *testing.B) { benchmarkEach(details102k, b) }
|
||||
func BenchmarkDetailsFiltering_512_meta(b *testing.B) { benchmarkMeta(details512k, b) }
|
||||
func BenchmarkDetailsFiltering_256_meta(b *testing.B) { benchmarkMeta(details256k, b) }
|
||||
func BenchmarkDetailsFiltering_102_meta(b *testing.B) { benchmarkMeta(details102k, b) }
|
||||
func BenchmarkDetailsFiltering_512_container(b *testing.B) { benchmarkContainer(details512k, b) }
|
||||
func BenchmarkDetailsFiltering_256_container(b *testing.B) { benchmarkContainer(details256k, b) }
|
||||
func BenchmarkDetailsFiltering_102_container(b *testing.B) { benchmarkContainer(details102k, b) }
|
||||
|
||||
func benchmarkBoth(d *Details, b *testing.B) {
|
||||
var d2 *Details
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
d2 = d.FilterBoth()
|
||||
}
|
||||
|
||||
result = d2
|
||||
}
|
||||
|
||||
func benchmarkEach(d *Details, b *testing.B) {
|
||||
var d2 *Details
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
d2 = d.FilterMetaFiles().FilterEmptyContainers()
|
||||
}
|
||||
|
||||
result = d2
|
||||
}
|
||||
|
||||
func benchmarkMeta(d *Details, b *testing.B) {
|
||||
var d2 *Details
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
d2 = d.FilterMetaFiles()
|
||||
}
|
||||
|
||||
result = d2
|
||||
}
|
||||
|
||||
func benchmarkContainer(d *Details, b *testing.B) {
|
||||
var d2 *Details
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
d2 = d.FilterEmptyContainers()
|
||||
}
|
||||
|
||||
result = d2
|
||||
}
|
||||
@ -995,3 +995,103 @@ func (suite *DetailsUnitSuite) TestFolderEntriesForPath() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *DetailsUnitSuite) TestDetails_FilterMetaFiles() {
|
||||
t := suite.T()
|
||||
|
||||
dm := DetailsModel{
|
||||
Entries: []DetailsEntry{
|
||||
{
|
||||
RepoRef: "a.data",
|
||||
ItemInfo: ItemInfo{
|
||||
OneDrive: &OneDriveInfo{IsMeta: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
RepoRef: "b.meta",
|
||||
ItemInfo: ItemInfo{
|
||||
OneDrive: &OneDriveInfo{IsMeta: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
RepoRef: "c.meta",
|
||||
ItemInfo: ItemInfo{
|
||||
OneDrive: &OneDriveInfo{IsMeta: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := &Details{dm}
|
||||
|
||||
d2 := d.FilterMetaFiles()
|
||||
|
||||
assert.Len(t, d2.DetailsModel.Entries, 1)
|
||||
assert.Len(t, d.DetailsModel.Entries, 3)
|
||||
}
|
||||
|
||||
func toDetails(fes []folderEntry) *Details {
|
||||
d := &Details{
|
||||
DetailsModel: DetailsModel{
|
||||
Entries: make([]DetailsEntry, len(fes)),
|
||||
},
|
||||
}
|
||||
|
||||
for i, fe := range fes {
|
||||
d.DetailsModel.Entries[i] = DetailsEntry{
|
||||
RepoRef: fe.RepoRef,
|
||||
ShortRef: fe.ShortRef,
|
||||
ParentRef: fe.ParentRef,
|
||||
LocationRef: fe.LocationRef,
|
||||
ItemInfo: fe.Info,
|
||||
}
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func (suite *DetailsUnitSuite) TestDetails_FilterEmptyContainers() {
|
||||
var (
|
||||
t = suite.T()
|
||||
empty = basePath.Append("populated", "empty")
|
||||
fes = FolderEntriesForPath(empty, empty)
|
||||
d = toDetails(fes)
|
||||
itemP = basePath.Append("populated", "item")
|
||||
)
|
||||
|
||||
item := DetailsEntry{
|
||||
RepoRef: itemP.String(),
|
||||
ShortRef: itemP.ShortRef(),
|
||||
ParentRef: itemP.Dir().ShortRef(),
|
||||
LocationRef: "todo - not currently needed",
|
||||
ItemInfo: ItemInfo{
|
||||
OneDrive: &OneDriveInfo{
|
||||
ItemName: "item",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d.DetailsModel.Entries = append(d.DetailsModel.Entries, item)
|
||||
|
||||
var (
|
||||
ds = d.DetailsModel.Entries
|
||||
result = d.FilterEmptyContainers()
|
||||
rs = result.DetailsModel.Entries
|
||||
)
|
||||
|
||||
assert.Equal(t, len(ds)-1, len(rs), "one empty folder should have been removed")
|
||||
|
||||
for _, r := range rs {
|
||||
assert.NotEqual(t, empty.String(), r.RepoRef, "the empty path should have been removed")
|
||||
}
|
||||
|
||||
dFilt := make([]DetailsEntry, 0, len(ds)-1)
|
||||
|
||||
for _, d := range ds {
|
||||
if d.RepoRef != empty.String() {
|
||||
dFilt = append(dFilt, d)
|
||||
}
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, dFilt, rs, "all other paths should be present")
|
||||
}
|
||||
|
||||
@ -10,6 +10,7 @@ type Options struct {
|
||||
DisableMetrics bool `json:"disableMetrics"`
|
||||
FailFast bool `json:"failFast"`
|
||||
RestorePermissions bool `json:"restorePermissions"`
|
||||
SkipReduce bool `json:"skipReduce"`
|
||||
ToggleFeatures Toggles `json:"ToggleFeatures"`
|
||||
}
|
||||
|
||||
|
||||
@ -395,7 +395,7 @@ func normalizeCategorySet(t *testing.T, cats map[string]struct{}) []string {
|
||||
// multiple users
|
||||
|
||||
type RepositoryLoadTestExchangeSuite struct {
|
||||
suite.Suite
|
||||
tester.Suite
|
||||
ctx context.Context
|
||||
repo repository.Repository
|
||||
acct account.Account //lint:ignore U1000 future test use
|
||||
|
||||
@ -2,7 +2,6 @@ package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -10,14 +9,12 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/crash"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/operations"
|
||||
"github.com/alcionai/corso/src/internal/streamstore"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
@ -361,21 +358,7 @@ func (r repository) BackupDetails(
|
||||
return nil, nil, errs.Fail(err)
|
||||
}
|
||||
|
||||
// Retroactively fill in isMeta information for items in older
|
||||
// backup versions without that info
|
||||
// version.Restore2 introduces the IsMeta flag, so only v1 needs a check.
|
||||
if b.Version >= version.OneDrive1DataAndMetaFiles && b.Version < version.OneDrive3IsMetaMarker {
|
||||
for _, d := range deets.Entries {
|
||||
if d.OneDrive != nil {
|
||||
if strings.HasSuffix(d.RepoRef, onedrive.MetaFileSuffix) ||
|
||||
strings.HasSuffix(d.RepoRef, onedrive.DirMetaFileSuffix) {
|
||||
d.OneDrive.IsMeta = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return deets, b, errs
|
||||
return deets.FilterMetaFiles().FilterEmptyContainers(), b, errs
|
||||
}
|
||||
|
||||
// DeleteBackup removes the backup from both the model store and the backup storage.
|
||||
|
||||
@ -3,10 +3,15 @@ package repository_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -97,6 +102,7 @@ func (suite *RepositorySuite) TestConnect() {
|
||||
|
||||
type RepositoryIntegrationSuite struct {
|
||||
tester.Suite
|
||||
userID string
|
||||
}
|
||||
|
||||
func TestRepositoryIntegrationSuite(t *testing.T) {
|
||||
@ -109,6 +115,10 @@ func TestRepositoryIntegrationSuite(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *RepositoryIntegrationSuite) SetupSuite() {
|
||||
suite.userID = tester.M365UserID(suite.T())
|
||||
}
|
||||
|
||||
func (suite *RepositoryIntegrationSuite) TestInitialize() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
@ -219,3 +229,67 @@ func (suite *RepositoryIntegrationSuite) TestNewRestore() {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ro)
|
||||
}
|
||||
|
||||
func (suite *RepositoryIntegrationSuite) TestBackupDetails_regression() {
|
||||
ctx, flush := tester.NewContext()
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
t = suite.T()
|
||||
acct = tester.NewM365Account(t)
|
||||
st = tester.NewPrefixedS3Storage(t)
|
||||
dest = "Corso_Restore_empty_" + common.FormatNow(common.SimpleTimeTesting)
|
||||
)
|
||||
|
||||
m365, err := acct.M365Config()
|
||||
require.NoError(t, err)
|
||||
|
||||
adpt, err := graph.CreateAdapter(acct.ID(), m365.AzureClientID, m365.AzureClientSecret)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := graph.NewService(adpt)
|
||||
|
||||
pager, err := onedrive.PagerForSource(onedrive.OneDriveSource, srv, suite.userID, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
drives, err := onedrive.Drives(ctx, pager, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
d0 := drives[0]
|
||||
body := models.DriveItem{}
|
||||
body.SetName(&dest)
|
||||
|
||||
fld := models.Folder{}
|
||||
fld.SetChildCount(ptr.To[int32](0))
|
||||
body.SetFolder(&fld)
|
||||
|
||||
_, err = srv.Client().
|
||||
UsersById(suite.userID).
|
||||
DrivesById(*d0.GetId()).
|
||||
Items().
|
||||
Post(ctx, &body, nil)
|
||||
require.NoErrorf(t, err, "%+v", graph.ErrData(err))
|
||||
|
||||
r, err := repository.Initialize(ctx, acct, st, control.Options{})
|
||||
require.NoError(t, err)
|
||||
|
||||
sel := selectors.NewOneDriveBackup([]string{suite.userID})
|
||||
sel.Include(sel.Folders([]string{dest}))
|
||||
|
||||
op, err := r.NewBackup(ctx, sel.Selector)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, op.Run(ctx))
|
||||
require.NotZero(t, op.Results.ItemsWritten)
|
||||
|
||||
// the actual test. The backup details, having backed up an empty folder,
|
||||
// should not return the folder within the backup details. That value
|
||||
// should get filtered out, along with .meta and .dirmeta files.
|
||||
deets, _, ferr := r.BackupDetails(ctx, string(op.Results.BackupID))
|
||||
require.NoError(t, ferr.Failure())
|
||||
|
||||
for _, ent := range deets.Entries {
|
||||
assert.NotContains(t, ent.RepoRef, dest)
|
||||
assert.NotContains(t, ent.RepoRef, onedrive.MetaFileSuffix)
|
||||
assert.NotContains(t, ent.RepoRef, onedrive.DirMetaFileSuffix)
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user