Finish wiring up Exchange preview backups (#4660)

This completes the wiring for Exchange preview backups. A few notable
points:
* Adds new config substruct(?) for limits on items/data
* Adds "reasonable defaults" (very small) for Exchange data categories
* Adds "important" and "skip" containers for Exchange data categories

All the above can be tweaked as we determine what values work best

Manually tested
1. regular backup
2. preview backup
3. regular backup

Verified that no merge base is used for the preview backup and the
merge base made in (1) is used for the backup in (3)

This feature is not exposed via CLI

May be easiest to review by commit

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

- [x] 🌻 Feature
- [ ] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issues

Merge after:
* #4657
* #4607

#### Test Plan

- [x] 💪 Manual
- [x]  Unit test
- [ ] 💚 E2E
This commit is contained in:
ashmrtn 2023-11-16 17:46:17 -08:00 committed by GitHub
parent 963d78b75e
commit 957a33b6d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 854 additions and 24 deletions

View File

@ -24,6 +24,12 @@ import (
"github.com/alcionai/corso/src/pkg/services/m365/api/pagers"
)
const (
defaultPreviewContainerLimit = 5
defaultPreviewItemsPerContainerLimit = 10
defaultPreviewItemLimit = defaultPreviewContainerLimit * defaultPreviewItemsPerContainerLimit
)
func CreateCollections(
ctx context.Context,
bpc inject.BackupProducerConfig,
@ -117,6 +123,7 @@ func populateCollections(
errs *fault.Bus,
) (map[string]data.BackupCollection, error) {
var (
err error
// folder ID -> BackupCollection.
collections = map[string]data.BackupCollection{}
// folder ID -> delta url or folder path lookups
@ -126,6 +133,15 @@ func populateCollections(
// deleted from this map, leaving only the deleted folders behind
tombstones = makeTombstones(dps)
category = qp.Category
// Limits and counters below are currently only used for preview backups
// since they only act on a subset of items.
maxContainers = ctrlOpts.PreviewLimits.MaxContainers
maxItemsPerContainer = ctrlOpts.PreviewLimits.MaxItemsPerContainer
maxItems = ctrlOpts.PreviewLimits.MaxItems
addedItems int
addedContainers int
)
logger.Ctx(ctx).Infow("filling collections", "len_deltapaths", len(dps))
@ -133,6 +149,45 @@ func populateCollections(
el := errs.Local()
// Preview backups select a reduced set of data. This is managed by ordering
// the set of results from the container resolver and reducing the number of
// items selected from each container.
if ctrlOpts.PreviewLimits.Enabled {
resolver, err = newRankedContainerResolver(
ctx,
resolver,
bh.folderGetter(),
qp.ProtectedResource.ID(),
// TODO(ashmrtn): Includes and excludes should really be associated with
// the service not the data category. This is because a single data
// handler may be used for multiple services (e.x. drive handler is used
// for OneDrive, SharePoint, and Groups/Teams).
bh.previewIncludeContainers(),
bh.previewExcludeContainers())
if err != nil {
return nil, clues.Wrap(err, "creating ranked container resolver")
}
// Configure limits with reasonable defaults if they're not set.
if maxContainers == 0 {
maxContainers = defaultPreviewContainerLimit
}
if maxItemsPerContainer == 0 {
maxItemsPerContainer = defaultPreviewItemsPerContainerLimit
}
if maxItems == 0 {
maxItems = defaultPreviewItemLimit
}
logger.Ctx(ctx).Infow(
"running preview backup",
"item_limit", maxItems,
"container_limit", maxContainers,
"items_per_container_limit", maxItemsPerContainer)
}
for _, c := range resolver.Items() {
if el.Failure() != nil {
return nil, el.Failure()
@ -187,6 +242,25 @@ func populateCollections(
ictx = clues.Add(ictx, "previous_path", prevPath)
// Since part of this is about figuring out how many items to get for this
// particular container we need to reconfigure for every container we see.
if ctrlOpts.PreviewLimits.Enabled {
toAdd := maxItems - addedItems
if addedContainers >= maxContainers || toAdd <= 0 {
break
}
if toAdd > maxItemsPerContainer {
toAdd = maxItemsPerContainer
}
// Delta tokens generated with this CallConfig shouldn't be used for
// regular backups. They may have different query parameters which will
// cause incorrect output for regular backups.
itemConfig.LimitResults = toAdd
}
addAndRem, err := bh.itemEnumerator().
GetAddedAndRemovedItemIDs(
ictx,
@ -240,6 +314,8 @@ func populateCollections(
// add the current path for the container ID to be used in the next backup
// as the "previous path", for reference in case of a rename or relocation.
currPaths[cID] = currPath.String()
addedItems += len(addAndRem.Added)
addedContainers++
}
// A tombstone is a folder that needs to be marked for deletion.

View File

@ -3,11 +3,14 @@ package exchange
import (
"bytes"
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/alcionai/clues"
"github.com/microsoft/kiota-abstractions-go/serialization"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
@ -24,6 +27,7 @@ import (
"github.com/alcionai/corso/src/internal/tester/tconfig"
"github.com/alcionai/corso/src/internal/version"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/backup/metadata"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/count"
@ -39,17 +43,49 @@ import (
// mocks
// ---------------------------------------------------------------------------
var _ backupHandler = &mockBackupHandler{}
var (
_ backupHandler = &mockBackupHandler{}
_ itemGetterSerializer = mockItemGetter{}
)
// mockItemGetter implmenets the basics required to allow calls to
// Collection.Items(). However, it returns static data.
type mockItemGetter struct{}
func (ig mockItemGetter) GetItem(
context.Context,
string,
string,
bool,
*fault.Bus,
) (serialization.Parsable, *details.ExchangeInfo, error) {
return models.NewMessage(), &details.ExchangeInfo{}, nil
}
func (ig mockItemGetter) Serialize(
context.Context,
serialization.Parsable,
string,
string,
) ([]byte, error) {
return []byte("foo"), nil
}
type mockBackupHandler struct {
mg mockGetter
category path.CategoryType
ac api.Client
userID string
mg mockGetter
fg containerGetter
category path.CategoryType
ac api.Client
userID string
previewIncludes []string
previewExcludes []string
}
func (bh mockBackupHandler) itemEnumerator() addedAndRemovedItemGetter { return bh.mg }
func (bh mockBackupHandler) itemHandler() itemGetterSerializer { return nil }
func (bh mockBackupHandler) itemHandler() itemGetterSerializer { return mockItemGetter{} }
func (bh mockBackupHandler) folderGetter() containerGetter { return bh.fg }
func (bh mockBackupHandler) previewIncludeContainers() []string { return bh.previewIncludes }
func (bh mockBackupHandler) previewExcludeContainers() []string { return bh.previewExcludes }
func (bh mockBackupHandler) NewContainerCache(
userID string,
@ -75,7 +111,7 @@ type (
func (mg mockGetter) GetAddedAndRemovedItemIDs(
ctx context.Context,
userID, cID, prevDelta string,
_ api.CallConfig,
config api.CallConfig,
) (pagers.AddedAndRemoved, error) {
results, ok := mg.results[cID]
if !ok {
@ -87,8 +123,13 @@ func (mg mockGetter) GetAddedAndRemovedItemIDs(
delta.URL = ""
}
resAdded := make(map[string]time.Time, len(results.added))
for _, add := range results.added {
toAdd := config.LimitResults
if toAdd == 0 || toAdd > len(results.added) {
toAdd = len(results.added)
}
resAdded := make(map[string]time.Time, toAdd)
for _, add := range results.added[:toAdd] {
resAdded[add] = time.Time{}
}
@ -102,15 +143,16 @@ func (mg mockGetter) GetAddedAndRemovedItemIDs(
return aar, results.err
}
var _ graph.ContainerResolver = &mockResolver{}
type (
mockResolver struct {
items []graph.CachedContainer
added map[string]string
}
var (
_ graph.ContainerResolver = &mockResolver{}
_ containerGetter = &mockResolver{}
)
type mockResolver struct {
items []graph.CachedContainer
added map[string]string
}
func newMockResolver(items ...mockContainer) mockResolver {
is := make([]graph.CachedContainer, 0, len(items))
@ -131,6 +173,21 @@ func (m mockResolver) ItemByID(id string) graph.CachedContainer {
return nil
}
// GetContainerByID returns the given container if it exists in the resolver.
// This is kind of merging functionality that we generally assume is separate,
// but it does allow for easier test setup.
func (m mockResolver) GetContainerByID(
ctx context.Context,
userID, dirID string,
) (graph.Container, error) {
c := m.ItemByID(dirID)
if c == nil {
return nil, data.ErrNotFound
}
return c, nil
}
func (m mockResolver) Items() []graph.CachedContainer {
return m.items
}
@ -1704,6 +1761,637 @@ func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_r
}
}
func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_PreviewBackup() {
type itemContainer struct {
container mockContainer
added []string
removed []string
}
type expected struct {
mustHave []itemContainer
maybeHave []itemContainer
// numItems is the total number of added items to expect. Needed because
// some tests can return one of a set of items depending on the order
// containers are processed in.
numItems int
}
var (
containers []mockContainer
newDelta = pagers.DeltaUpdate{URL: "delta_url"}
)
for i := 0; i < 10; i++ {
id := fmt.Sprintf("%d", i)
name := fmt.Sprintf("display_name_%d", i)
containers = append(containers, mockContainer{
id: strPtr(id),
displayName: strPtr(name),
p: path.Builder{}.Append(id),
l: path.Builder{}.Append(name),
})
}
table := []struct {
name string
limits control.PreviewItemLimits
data []itemContainer
includes []string
excludes []string
expect expected
}{
{
name: "IncludeContainer NoItemLimit ContainerLimit",
limits: control.PreviewItemLimits{
Enabled: true,
MaxItems: 999,
MaxItemsPerContainer: 999,
MaxContainers: 1,
},
data: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
includes: []string{ptr.Val(containers[1].GetId())},
expect: expected{
mustHave: []itemContainer{
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
},
numItems: 5,
},
},
{
name: "IncludeContainer ItemLimit ContainerLimit",
limits: control.PreviewItemLimits{
Enabled: true,
MaxItems: 3,
MaxItemsPerContainer: 999,
MaxContainers: 1,
},
data: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
includes: []string{ptr.Val(containers[1].GetId())},
expect: expected{
maybeHave: []itemContainer{
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
},
numItems: 3,
},
},
{
name: "IncludeContainer ItemLimit NoContainerLimit",
limits: control.PreviewItemLimits{
Enabled: true,
MaxItems: 8,
MaxItemsPerContainer: 999,
MaxContainers: 999,
},
data: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
includes: []string{ptr.Val(containers[1].GetId())},
expect: expected{
mustHave: []itemContainer{
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
},
maybeHave: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
numItems: 8,
},
},
{
name: "PerContainerItemLimit NoContainerLimit",
limits: control.PreviewItemLimits{
Enabled: true,
MaxItems: 999,
MaxItemsPerContainer: 3,
MaxContainers: 999,
},
data: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
expect: expected{
// The test isn't setup to handle partial containers so the best we can
// do is check that all items are expected and the item limit is hit.
maybeHave: []itemContainer{
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
numItems: 9,
},
},
{
name: "ExcludeContainer NoLimits",
limits: control.PreviewItemLimits{
Enabled: true,
MaxItems: 999,
MaxItemsPerContainer: 999,
MaxContainers: 999,
},
excludes: []string{ptr.Val(containers[1].GetId())},
data: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
expect: expected{
// The test isn't setup to handle partial containers so the best we can
// do is check that all items are expected and the item limit is hit.
maybeHave: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
numItems: 10,
},
},
{
name: "NotPreview IgnoresLimitsAndExcludeSet",
limits: control.PreviewItemLimits{
MaxItems: 1,
MaxItemsPerContainer: 1,
MaxContainers: 1,
},
excludes: []string{ptr.Val(containers[1].GetId())},
data: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
expect: expected{
mustHave: []itemContainer{
{
container: containers[0],
added: []string{"a1", "a2", "a3", "a4", "a5"},
},
{
container: containers[1],
added: []string{"a6", "a7", "a8", "a9", "a10"},
},
{
container: containers[2],
added: []string{"a11", "a12", "a13", "a14", "a15"},
},
},
numItems: 15,
},
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
qp = graph.QueryParams{
Category: path.EmailCategory, // doesn't matter which one we use.
ProtectedResource: inMock.NewProvider("user_id", "user_name"),
TenantID: suite.creds.AzureTenantID,
}
statusUpdater = func(*support.ControllerOperationStatus) {}
allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0]
dps = metadata.DeltaPaths{} // incrementals are tested separately
)
inputContainers := make([]mockContainer, 0, len(test.data))
inputItems := map[string]mockGetterResults{}
for _, item := range test.data {
inputContainers = append(inputContainers, item.container)
inputItems[ptr.Val(item.container.GetId())] = mockGetterResults{
added: item.added,
removed: item.removed,
newDelta: newDelta,
}
}
// Make sure concurrency limit is initialized to a non-zero value or we'll
// deadlock.
opts := control.DefaultOptions()
opts.FailureHandling = control.FailFast
opts.PreviewLimits = test.limits
resolver := newMockResolver(inputContainers...)
getter := mockGetter{results: inputItems}
mbh := mockBackupHandler{
mg: getter,
fg: resolver,
category: qp.Category,
previewIncludes: test.includes,
previewExcludes: test.excludes,
}
require.Equal(t, "user_id", qp.ProtectedResource.ID(), qp.ProtectedResource)
require.Equal(t, "user_name", qp.ProtectedResource.Name(), qp.ProtectedResource)
collections, err := populateCollections(
ctx,
qp,
mbh,
statusUpdater,
resolver,
allScope,
dps,
opts,
count.New(),
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
var totalItems int
// collection assertions
for _, c := range collections {
if c.FullPath().Service() == path.ExchangeMetadataService {
continue
}
// We don't expect any deleted containers in this test.
if !assert.NotEqual(
t,
data.DeletedState,
c.State(),
"container marked deleted") {
continue
}
// TODO(ashmrtn): Remove when we make LocationPath part of the
// Collection interface.
lp := c.(data.LocationPather)
mustHave := map[string]struct{}{}
maybeHave := map[string]struct{}{}
containerKey := lp.LocationPath().String()
for _, item := range test.expect.mustHave {
// Get the right container of items.
if containerKey != item.container.l.String() {
continue
}
for _, id := range item.added {
mustHave[id] = struct{}{}
}
}
for _, item := range test.expect.maybeHave {
// Get the right container of items.
if containerKey != item.container.l.String() {
continue
}
for _, id := range item.added {
maybeHave[id] = struct{}{}
}
}
errs := fault.New(true)
for item := range c.Items(ctx, errs) {
// We don't expect deleted items in the test or in practice because we
// never reuse delta tokens for preview backups.
if item.Deleted() {
continue
}
totalItems++
var found bool
if _, found = mustHave[item.ID()]; found {
delete(mustHave, item.ID())
continue
}
if _, found = maybeHave[item.ID()]; found {
delete(maybeHave, item.ID())
continue
}
assert.True(t, found, "unexpected item %v", item.ID())
}
require.NoError(t, errs.Failure())
assert.Empty(
t,
mustHave,
"container %v missing required items",
lp.LocationPath().String())
}
assert.Equal(
t,
test.expect.numItems,
totalItems,
"total items seen across collections")
})
}
}
// TestFilterContainersAndFillCollections_PreviewBackup_DefaultLimits tests that
// default limits are applied when making a preview backup if the user doesn't
// give limits. It doesn't do detailed comparisons on which items/containers
// were selected for backup. For that, run
// TestFilterContainersAndFillCollections_PreviewBackup.
func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_PreviewBackup_DefaultLimits() {
type expected struct {
// numContainers is the total number of containers expected to be returned.
numContainers int
// numItemsPerContainer is the total number of items in each container
// expected to be returned.
numItemsPerContainer int
// numItems is the total number of items expected to be returned.
numItems int
}
newDelta := pagers.DeltaUpdate{URL: "delta_url"}
table := []struct {
name string
numContainers int
numItemsPerContainer int
limits control.PreviewItemLimits
expect expected
}{
{
name: "DefaultMaxItems",
numContainers: 1,
numItemsPerContainer: defaultPreviewItemLimit + 1,
limits: control.PreviewItemLimits{
Enabled: true,
MaxItemsPerContainer: 999,
MaxContainers: 999,
},
expect: expected{
numContainers: 1,
numItemsPerContainer: defaultPreviewItemLimit,
numItems: defaultPreviewItemLimit,
},
},
{
name: "DefaultMaxContainers",
numContainers: defaultPreviewContainerLimit + 1,
numItemsPerContainer: 1,
limits: control.PreviewItemLimits{
Enabled: true,
MaxItemsPerContainer: 999,
MaxItems: 999,
},
expect: expected{
numContainers: defaultPreviewContainerLimit,
numItemsPerContainer: 1,
numItems: defaultPreviewContainerLimit,
},
},
{
name: "DefaultMaxItemsPerContainer",
numContainers: 5,
numItemsPerContainer: defaultPreviewItemsPerContainerLimit,
limits: control.PreviewItemLimits{
Enabled: true,
MaxItems: 999,
MaxContainers: 999,
},
expect: expected{
numContainers: 5,
numItemsPerContainer: defaultPreviewItemsPerContainerLimit,
numItems: 5 * defaultPreviewItemsPerContainerLimit,
},
},
}
for _, test := range table {
suite.Run(test.name, func() {
t := suite.T()
ctx, flush := tester.NewContext(t)
defer flush()
var (
qp = graph.QueryParams{
Category: path.EmailCategory, // doesn't matter which one we use.
ProtectedResource: inMock.NewProvider("user_id", "user_name"),
TenantID: suite.creds.AzureTenantID,
}
statusUpdater = func(*support.ControllerOperationStatus) {}
allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0]
dps = metadata.DeltaPaths{} // incrementals are tested separately
)
inputContainers := make([]mockContainer, 0, test.numContainers)
inputItems := map[string]mockGetterResults{}
for containerIdx := 0; containerIdx < test.numContainers; containerIdx++ {
id := fmt.Sprintf("container_%d", containerIdx)
name := fmt.Sprintf("display_name_%d", containerIdx)
container := mockContainer{
id: strPtr(id),
displayName: strPtr(name),
p: path.Builder{}.Append(id),
l: path.Builder{}.Append(name),
}
inputContainers = append(inputContainers, container)
added := make([]string, 0, test.numItemsPerContainer)
for itemIdx := 0; itemIdx < test.numItemsPerContainer; itemIdx++ {
added = append(
added,
fmt.Sprintf("item_%d-%d", containerIdx, itemIdx))
}
inputItems[id] = mockGetterResults{
added: added,
newDelta: newDelta,
}
}
// Make sure concurrency limit is initialized to a non-zero value or we'll
// deadlock.
opts := control.DefaultOptions()
opts.FailureHandling = control.FailFast
opts.PreviewLimits = test.limits
resolver := newMockResolver(inputContainers...)
getter := mockGetter{results: inputItems}
mbh := mockBackupHandler{
mg: getter,
fg: resolver,
category: qp.Category,
}
require.Equal(t, "user_id", qp.ProtectedResource.ID(), qp.ProtectedResource)
require.Equal(t, "user_name", qp.ProtectedResource.Name(), qp.ProtectedResource)
collections, err := populateCollections(
ctx,
qp,
mbh,
statusUpdater,
resolver,
allScope,
dps,
opts,
count.New(),
fault.New(true))
require.NoError(t, err, clues.ToCore(err))
var (
numContainers int
numItems int
)
// collection assertions
for _, c := range collections {
if c.FullPath().Service() == path.ExchangeMetadataService {
continue
}
// We don't expect any deleted containers in this test.
if !assert.NotEqual(
t,
data.DeletedState,
c.State(),
"container marked deleted") {
continue
}
numContainers++
var (
containerItems int
errs = fault.New(true)
)
for item := range c.Items(ctx, errs) {
// We don't expect deleted items in the test or in practice because we
// never reuse delta tokens for preview backups.
if !assert.False(t, item.Deleted(), "deleted item") {
continue
}
numItems++
containerItems++
}
require.NoError(t, errs.Failure())
assert.Equal(
t,
test.expect.numItemsPerContainer,
containerItems,
"items in container")
}
assert.Equal(
t,
test.expect.numItems,
numItems,
"total items seen across collections")
assert.Equal(
t,
test.expect.numContainers,
numContainers,
"total number of non-metadata containers")
})
}
}
func (suite *CollectionPopulationSuite) TestFilterContainersAndFillCollections_incrementals_nondelta() {
var (
userID = "user_id"

View File

@ -29,6 +29,20 @@ func (h contactBackupHandler) itemHandler() itemGetterSerializer {
return h.ac
}
func (h contactBackupHandler) folderGetter() containerGetter {
return h.ac
}
func (h contactBackupHandler) previewIncludeContainers() []string {
return []string{
"contacts",
}
}
func (h contactBackupHandler) previewExcludeContainers() []string {
return nil
}
func (h contactBackupHandler) NewContainerCache(
userID string,
) (string, graph.ContainerResolver) {

View File

@ -29,6 +29,20 @@ func (h eventBackupHandler) itemHandler() itemGetterSerializer {
return h.ac
}
func (h eventBackupHandler) folderGetter() containerGetter {
return h.ac
}
func (h eventBackupHandler) previewIncludeContainers() []string {
return []string{
"calendar",
}
}
func (h eventBackupHandler) previewExcludeContainers() []string {
return nil
}
func (h eventBackupHandler) NewContainerCache(
userID string,
) (string, graph.ContainerResolver) {

View File

@ -22,6 +22,9 @@ import (
type backupHandler interface {
itemEnumerator() addedAndRemovedItemGetter
itemHandler() itemGetterSerializer
folderGetter() containerGetter
previewIncludeContainers() []string
previewExcludeContainers() []string
NewContainerCache(userID string) (string, graph.ContainerResolver)
}

View File

@ -29,6 +29,25 @@ func (h mailBackupHandler) itemHandler() itemGetterSerializer {
return h.ac
}
func (h mailBackupHandler) folderGetter() containerGetter {
return h.ac
}
func (h mailBackupHandler) previewIncludeContainers() []string {
return []string{
"inbox",
}
}
func (h mailBackupHandler) previewExcludeContainers() []string {
return []string{
"drafts",
"outbox",
"recoverableitemsdeletions",
"junkemail",
}
}
func (h mailBackupHandler) NewContainerCache(
userID string,
) (string, graph.ContainerResolver) {

View File

@ -420,7 +420,7 @@ func (op *BackupOperation) do(
// TODO(ashmrtn): Until we use token versions to determine this, refactor
// input params to produceManifestsAndMetadata and do this in that function
// instead of here.
if op.Options.ToggleFeatures.PreviewBackup {
if op.Options.PreviewLimits.Enabled {
logger.Ctx(ctx).Info("disabling merge bases for preview backup")
mans.DisableMergeBases()
@ -971,7 +971,7 @@ func (op *BackupOperation) createBackupModels(
//
// model.BackupTypeTag has more info about how these tags are used.
switch {
case op.Options.ToggleFeatures.PreviewBackup:
case op.Options.PreviewLimits.Enabled:
// Preview backups need to be successful and without errors to be considered
// valid. Just reuse the merge base check for that since it has the same
// requirements.

View File

@ -1924,7 +1924,7 @@ func (suite *AssistBackupIntegrationSuite) TestBackupTypesForFailureModes() {
opts := control.DefaultOptions()
opts.FailureHandling = test.failurePolicy
opts.ToggleFeatures.PreviewBackup = test.previewBackup
opts.PreviewLimits.Enabled = test.previewBackup
bo, err := NewBackupOperation(
ctx,

View File

@ -17,6 +17,16 @@ type Options struct {
Repo repository.Options `json:"repo"`
SkipReduce bool `json:"skipReduce"`
ToggleFeatures Toggles `json:"toggleFeatures"`
// PreviewItemLimits defines the number of items and/or amount of data to
// fetch on a best-effort basis. Right now it's used for preview backups.
//
// Since this is not split out by service or data categories these limits
// apply independently to all data categories that appear in a single backup
// where they are set. For example, if doing a teams backup and there's both a
// SharePoint site and Messages available, both data categories would try to
// backup data until the set limits without paying attention to what the other
// had already backed up.
PreviewLimits PreviewItemLimits `json:"previewItemLimits"`
}
type Parallelism struct {
@ -26,6 +36,17 @@ type Parallelism struct {
ItemFetch int
}
// PreviewItemLimits describes best-effort maximum values to attempt to reach in
// this backup. Preview backups are used to demonstrate value by being quick to
// create.
type PreviewItemLimits struct {
MaxItems int
MaxItemsPerContainer int
MaxContainers int
MaxBytes int
Enabled bool
}
type FailurePolicy string
const (
@ -83,11 +104,6 @@ type Toggles struct {
RunMigrations bool `json:"runMigrations"`
// PreviewBackup denotes that this backup contains a subset of information for
// the protected resource. PreviewBackups are used to demonstrate value by
// being quick to create.
PreviewBackup bool `json:"previewBackup"`
// DisableSlidingWindowLimiter disables the experimental sliding window rate
// limiter for graph API requests. This is only relevant for exchange backups.
// Setting this flag switches exchange backups to fallback to the default token