Add benchmark for backup/hierarchy merging (#4327)

Basic test to check runtime of merging
many folders with minimal data in each
folder.

Based off some of the existing tests
in the kopia package

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

- [ ] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [x] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issue(s)

* #4117

#### Test Plan

- [x] 💪 Manual
- [ ]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2023-10-09 09:56:16 -07:00 committed by GitHub
parent 757007e027
commit 60b046f5d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -0,0 +1,165 @@
package kopia
import (
"context"
"fmt"
"testing"
"github.com/alcionai/clues"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/snapshot"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/internal/data"
exchMock "github.com/alcionai/corso/src/internal/m365/service/exchange/mock"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/backup/identity"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/path"
)
func BenchmarkHierarchyMerge(b *testing.B) {
ctx, flush := tester.NewContext(b)
defer flush()
c, err := openKopiaRepo(b, ctx)
require.NoError(b, err, clues.ToCore(err))
w := &Wrapper{c}
defer func() {
err := w.Close(ctx)
assert.NoError(b, err, clues.ToCore(err))
}()
var (
cols []data.BackupCollection
collectionLimit = 1000
collectionItemsLimit = 3
itemData = []byte("abcdefghijklmnopqrstuvwxyz")
)
baseStorePath, err := path.Build(
"a-tenant",
"a-user",
path.ExchangeService,
path.EmailCategory,
false,
"Inbox")
require.NoError(b, err, clues.ToCore(err))
for i := 0; i < collectionLimit; i++ {
folderName := fmt.Sprintf("folder%d", i)
storePath, err := baseStorePath.Append(false, folderName)
require.NoError(b, err, clues.ToCore(err))
col := exchMock.NewCollection(
storePath,
storePath,
collectionItemsLimit)
for j := 0; j < collectionItemsLimit; j++ {
itemName := fmt.Sprintf("item%d", j)
col.Names[j] = itemName
col.Data[j] = itemData
}
cols = append(cols, col)
}
reasons := []identity.Reasoner{
NewReason(
testTenant,
baseStorePath.ProtectedResource(),
baseStorePath.Service(),
baseStorePath.Category()),
}
type testCase struct {
name string
baseBackups func(base ManifestEntry) BackupBases
collections []data.BackupCollection
}
// Initial backup. All files should be considered new by kopia.
baseBackupCase := testCase{
name: "Setup",
baseBackups: func(ManifestEntry) BackupBases {
return NewMockBackupBases()
},
collections: cols,
}
runAndTestBackup := func(
t tester.TestT,
ctx context.Context,
test testCase,
base ManifestEntry,
) ManifestEntry {
bbs := test.baseBackups(base)
stats, _, _, err := w.ConsumeBackupCollections(
ctx,
reasons,
bbs,
test.collections,
nil,
nil,
true,
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
assert.Equal(t, 0, stats.IgnoredErrorCount)
assert.Equal(t, 0, stats.ErrorCount)
assert.False(t, stats.Incomplete)
snap, err := snapshot.LoadSnapshot(
ctx,
w.c,
manifest.ID(stats.SnapshotID))
require.NoError(t, err, clues.ToCore(err))
return ManifestEntry{
Manifest: snap,
Reasons: reasons,
}
}
b.Logf("setting up base backup\n")
base := runAndTestBackup(b, ctx, baseBackupCase, ManifestEntry{})
table := []testCase{
{
name: "Merge All",
baseBackups: func(base ManifestEntry) BackupBases {
return NewMockBackupBases().WithMergeBases(base)
},
collections: func() []data.BackupCollection {
p, err := baseStorePath.Dir()
require.NoError(b, err, clues.ToCore(err))
col := exchMock.NewCollection(p, p, 0)
col.ColState = data.NotMovedState
col.PrevPath = p
return []data.BackupCollection{col}
}(),
},
}
b.ResetTimer()
for _, test := range table {
b.Run(fmt.Sprintf("num_dirs_%d", collectionLimit), func(b *testing.B) {
ctx, flush := tester.NewContext(b)
defer flush()
for i := 0; i < b.N; i++ {
runAndTestBackup(b, ctx, test, base)
}
})
}
}