From d8763298ce64ac0340a0148b6d22281085ff5285 Mon Sep 17 00:00:00 2001 From: Keepers Date: Fri, 6 Jan 2023 15:19:45 -0700 Subject: [PATCH 01/22] test service_iterators with mocks (#2020) ## Description Now that we have a mocked api layer in place, we can test filterContainersAndFillCollections without integration dependencies. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :robot: Test ## Issue(s) * #1967 ## Test Plan - [x] :zap: Unit test --- .../exchange/container_resolver_test.go | 98 +-- .../connector/exchange/service_iterators.go | 16 +- .../exchange/service_iterators_test.go | 681 ++++++++++++++++++ src/internal/tester/resource_owners.go | 11 + 4 files changed, 750 insertions(+), 56 deletions(-) create mode 100644 src/internal/connector/exchange/service_iterators_test.go diff --git a/src/internal/connector/exchange/container_resolver_test.go b/src/internal/connector/exchange/container_resolver_test.go index 866d69930..d7c6651f1 100644 --- a/src/internal/connector/exchange/container_resolver_test.go +++ b/src/internal/connector/exchange/container_resolver_test.go @@ -19,24 +19,26 @@ import ( // mocks and helpers // --------------------------------------------------------------------------- +var _ graph.CachedContainer = &mockContainer{} + type mockContainer struct { - id *string - name *string - parentID *string + id *string + displayName *string + parentID *string + p *path.Builder } //nolint:revive -func (m mockContainer) GetId() *string { - return m.id -} - -func (m mockContainer) GetDisplayName() *string { - return m.name -} +func (m mockContainer) GetId() *string { return m.id } //nolint:revive -func (m mockContainer) GetParentFolderId() *string { - return m.parentID +func (m mockContainer) GetParentFolderId() *string { return m.parentID } +func (m mockContainer) GetDisplayName() *string { return m.displayName } +func (m mockContainer) Path() *path.Builder { return m.p } +func (m mockContainer) SetPath(p *path.Builder) {} + +func strPtr(s string) *string { + return &s } // --------------------------------------------------------------------------- @@ -67,45 +69,45 @@ var ( { name: "NilID", c: mockContainer{ - id: nil, - name: &testName, - parentID: &testParentID, + id: nil, + displayName: &testName, + parentID: &testParentID, }, check: assert.Error, }, { name: "NilDisplayName", c: mockContainer{ - id: &testID, - name: nil, - parentID: &testParentID, + id: &testID, + displayName: nil, + parentID: &testParentID, }, check: assert.Error, }, { name: "EmptyID", c: mockContainer{ - id: &emptyString, - name: &testName, - parentID: &testParentID, + id: &emptyString, + displayName: &testName, + parentID: &testParentID, }, check: assert.Error, }, { name: "EmptyDisplayName", c: mockContainer{ - id: &testID, - name: &emptyString, - parentID: &testParentID, + id: &testID, + displayName: &emptyString, + parentID: &testParentID, }, check: assert.Error, }, { name: "AllValues", c: mockContainer{ - id: &testID, - name: &testName, - parentID: &testParentID, + id: &testID, + displayName: &testName, + parentID: &testParentID, }, check: assert.NoError, }, @@ -125,18 +127,18 @@ func (suite *FolderCacheUnitSuite) TestCheckRequiredValues() { { name: "NilParentFolderID", c: mockContainer{ - id: &testID, - name: &testName, - parentID: nil, + id: &testID, + displayName: &testName, + parentID: nil, }, check: assert.Error, }, { name: "EmptyParentFolderID", c: mockContainer{ - id: &testID, - name: &testName, - parentID: &emptyString, + id: &testID, + displayName: &testName, + parentID: &emptyString, }, check: assert.Error, }, @@ -161,9 +163,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() { name: "NoParentNoPath", cf: graph.NewCacheFolder( &mockContainer{ - id: &testID, - name: &testName, - parentID: nil, + id: &testID, + displayName: &testName, + parentID: nil, }, nil, ), @@ -173,9 +175,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() { name: "NoParentPath", cf: graph.NewCacheFolder( &mockContainer{ - id: &testID, - name: &testName, - parentID: nil, + id: &testID, + displayName: &testName, + parentID: nil, }, path.Builder{}.Append("foo"), ), @@ -185,9 +187,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() { name: "NoName", cf: graph.NewCacheFolder( &mockContainer{ - id: &testID, - name: nil, - parentID: &testParentID, + id: &testID, + displayName: nil, + parentID: &testParentID, }, path.Builder{}.Append("foo"), ), @@ -197,9 +199,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() { name: "NoID", cf: graph.NewCacheFolder( &mockContainer{ - id: nil, - name: &testName, - parentID: &testParentID, + id: nil, + displayName: &testName, + parentID: &testParentID, }, path.Builder{}.Append("foo"), ), @@ -209,9 +211,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() { name: "NoPath", cf: graph.NewCacheFolder( &mockContainer{ - id: &testID, - name: &testName, - parentID: &testParentID, + id: &testID, + displayName: &testName, + parentID: &testParentID, }, nil, ), diff --git a/src/internal/connector/exchange/service_iterators.go b/src/internal/connector/exchange/service_iterators.go index 86e598605..9a3ef3be7 100644 --- a/src/internal/connector/exchange/service_iterators.go +++ b/src/internal/connector/exchange/service_iterators.go @@ -95,18 +95,18 @@ func filterContainersAndFillCollections( added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta) if err != nil { + // note == nil check; only catches non-inFlight error cases. if graph.IsErrDeletedInFlight(err) == nil { errs = support.WrapAndAppend(qp.ResourceOwner, err, errs) - } else { - // race conditions happen, containers might get deleted while - // this process is in flight. If that happens, force the collection - // to reset which will prevent any old items from being retained in - // storage. If the container (or its children) are sill missing - // on the next backup, they'll get tombstoned. - newDelta = api.DeltaUpdate{Reset: true} + continue } - continue + // race conditions happen, containers might get deleted while + // this process is in flight. If that happens, force the collection + // to reset. This prevents any old items from being retained in + // storage. If the container (or its children) are sill missing + // on the next backup, they'll get tombstoned. + newDelta = api.DeltaUpdate{Reset: true} } if len(newDelta.URL) > 0 { diff --git a/src/internal/connector/exchange/service_iterators_test.go b/src/internal/connector/exchange/service_iterators_test.go new file mode 100644 index 000000000..bce71b5e9 --- /dev/null +++ b/src/internal/connector/exchange/service_iterators_test.go @@ -0,0 +1,681 @@ +package exchange + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/connector/exchange/api" + "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/corso/src/internal/connector/support" + "github.com/alcionai/corso/src/internal/data" + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" +) + +// --------------------------------------------------------------------------- +// mocks +// --------------------------------------------------------------------------- + +var _ addedAndRemovedItemIDsGetter = &mockGetter{} + +type ( + mockGetter map[string]mockGetterResults + mockGetterResults struct { + added []string + removed []string + newDelta api.DeltaUpdate + err error + } +) + +func (mg mockGetter) GetAddedAndRemovedItemIDs( + ctx context.Context, + userID, cID, prevDelta string, +) ( + []string, + []string, + api.DeltaUpdate, + error, +) { + results, ok := mg[cID] + if !ok { + return nil, nil, api.DeltaUpdate{}, errors.New("mock not found for " + cID) + } + + return results.added, results.removed, results.newDelta, results.err +} + +var _ graph.ContainerResolver = &mockResolver{} + +type ( + mockResolver struct { + items []graph.CachedContainer + } +) + +func newMockResolver(items ...mockContainer) mockResolver { + is := make([]graph.CachedContainer, 0, len(items)) + + for _, i := range items { + is = append(is, i) + } + + return mockResolver{items: is} +} + +func (m mockResolver) Items() []graph.CachedContainer { + return m.items +} + +func (m mockResolver) AddToCache(context.Context, graph.Container) error { return nil } +func (m mockResolver) IDToPath(context.Context, string) (*path.Builder, error) { return nil, nil } +func (m mockResolver) PathInCache(string) (string, bool) { return "", false } +func (m mockResolver) Populate(context.Context, string, ...string) error { return nil } + +// --------------------------------------------------------------------------- +// tests +// --------------------------------------------------------------------------- + +type ServiceIteratorsSuite struct { + suite.Suite + creds account.M365Config +} + +func TestServiceIteratorsSuite(t *testing.T) { + suite.Run(t, new(ServiceIteratorsSuite)) +} + +func (suite *ServiceIteratorsSuite) SetupSuite() { + a := tester.NewM365Account(suite.T()) + m365, err := a.M365Config() + require.NoError(suite.T(), err) + suite.creds = m365 +} + +func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { + var ( + userID = "user_id" + qp = graph.QueryParams{ + Category: path.EmailCategory, // doesn't matter which one we use. + ResourceOwner: userID, + Credentials: suite.creds, + } + statusUpdater = func(*support.ConnectorOperationStatus) {} + allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0] + dps = DeltaPaths{} // incrementals are tested separately + commonResult = mockGetterResults{ + added: []string{"a1", "a2", "a3"}, + removed: []string{"r1", "r2", "r3"}, + newDelta: api.DeltaUpdate{URL: "delta_url"}, + } + errorResult = mockGetterResults{ + added: []string{"a1", "a2", "a3"}, + removed: []string{"r1", "r2", "r3"}, + newDelta: api.DeltaUpdate{URL: "delta_url"}, + err: assert.AnError, + } + deletedInFlightResult = mockGetterResults{ + added: []string{"a1", "a2", "a3"}, + removed: []string{"r1", "r2", "r3"}, + newDelta: api.DeltaUpdate{URL: "delta_url"}, + err: graph.ErrDeletedInFlight{Err: *common.EncapsulateError(assert.AnError)}, + } + container1 = mockContainer{ + id: strPtr("1"), + displayName: strPtr("display_name_1"), + p: path.Builder{}.Append("display_name_1"), + } + container2 = mockContainer{ + id: strPtr("2"), + displayName: strPtr("display_name_2"), + p: path.Builder{}.Append("display_name_2"), + } + ) + + table := []struct { + name string + getter mockGetter + resolver graph.ContainerResolver + scope selectors.ExchangeScope + failFast bool + expectErr assert.ErrorAssertionFunc + expectNewColls int + expectMetadataColls int + expectDoNotMergeColls int + }{ + { + name: "happy path, one container", + getter: map[string]mockGetterResults{ + "1": commonResult, + }, + resolver: newMockResolver(container1), + scope: allScope, + expectErr: assert.NoError, + expectNewColls: 1, + expectMetadataColls: 1, + }, + { + name: "happy path, many containers", + getter: map[string]mockGetterResults{ + "1": commonResult, + "2": commonResult, + }, + resolver: newMockResolver(container1, container2), + scope: allScope, + expectErr: assert.NoError, + expectNewColls: 2, + expectMetadataColls: 1, + }, + { + name: "happy path, many containers, same display name", + getter: map[string]mockGetterResults{ + "1": commonResult, + "2": commonResult, + }, + resolver: newMockResolver(container1, container2), + scope: allScope, + expectErr: assert.NoError, + expectNewColls: 2, + expectMetadataColls: 1, + }, + { + name: "no containers pass scope", + getter: map[string]mockGetterResults{ + "1": commonResult, + "2": commonResult, + }, + resolver: newMockResolver(container1, container2), + scope: selectors.NewExchangeBackup(nil).MailFolders(selectors.None())[0], + expectErr: assert.NoError, + expectNewColls: 0, + expectMetadataColls: 1, + }, + { + name: "err: deleted in flight", + getter: map[string]mockGetterResults{ + "1": deletedInFlightResult, + }, + resolver: newMockResolver(container1), + scope: allScope, + expectErr: assert.NoError, + expectNewColls: 1, + expectMetadataColls: 1, + expectDoNotMergeColls: 1, + }, + { + name: "err: other error", + getter: map[string]mockGetterResults{ + "1": errorResult, + }, + resolver: newMockResolver(container1), + scope: allScope, + expectErr: assert.Error, + expectNewColls: 0, + expectMetadataColls: 1, + }, + { + name: "half collections error: deleted in flight", + getter: map[string]mockGetterResults{ + "1": deletedInFlightResult, + "2": commonResult, + }, + resolver: newMockResolver(container1, container2), + scope: allScope, + expectErr: assert.NoError, + expectNewColls: 2, + expectMetadataColls: 1, + expectDoNotMergeColls: 1, + }, + { + name: "half collections error: other error", + getter: map[string]mockGetterResults{ + "1": errorResult, + "2": commonResult, + }, + resolver: newMockResolver(container1, container2), + scope: allScope, + expectErr: assert.Error, + expectNewColls: 1, + expectMetadataColls: 1, + }, + { + name: "half collections error: deleted in flight, fail fast", + getter: map[string]mockGetterResults{ + "1": deletedInFlightResult, + "2": commonResult, + }, + resolver: newMockResolver(container1, container2), + scope: allScope, + failFast: true, + expectErr: assert.NoError, + expectNewColls: 2, + expectMetadataColls: 1, + expectDoNotMergeColls: 1, + }, + { + name: "half collections error: other error, fail fast", + getter: map[string]mockGetterResults{ + "1": errorResult, + "2": commonResult, + }, + resolver: newMockResolver(container1, container2), + scope: allScope, + failFast: true, + expectErr: assert.Error, + expectNewColls: 0, + expectMetadataColls: 0, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + ctx, flush := tester.NewContext() + defer flush() + + collections := map[string]data.Collection{} + + err := filterContainersAndFillCollections( + ctx, + qp, + test.getter, + collections, + statusUpdater, + test.resolver, + test.scope, + dps, + control.Options{FailFast: test.failFast}, + ) + test.expectErr(t, err) + + // collection assertions + + deleteds, news, metadatas, doNotMerges := 0, 0, 0, 0 + for _, c := range collections { + if c.FullPath().Service() == path.ExchangeMetadataService { + metadatas++ + continue + } + + if c.State() == data.DeletedState { + deleteds++ + } + + if c.State() == data.NewState { + news++ + } + + if c.DoNotMergeItems() { + doNotMerges++ + } + } + + assert.Zero(t, deleteds, "deleted collections") + assert.Equal(t, test.expectNewColls, news, "new collections") + assert.Equal(t, test.expectMetadataColls, metadatas, "metadata collections") + assert.Equal(t, test.expectDoNotMergeColls, doNotMerges, "doNotMerge collections") + + // items in collections assertions + for k, expect := range test.getter { + coll := collections[k] + + if coll == nil { + continue + } + + exColl, ok := coll.(*Collection) + require.True(t, ok, "collection is an *exchange.Collection") + + assert.ElementsMatch(t, expect.added, exColl.added, "added items") + assert.ElementsMatch(t, expect.removed, exColl.removed, "removed items") + } + }) + } +} + +func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incrementals() { + var ( + userID = "user_id" + tenantID = tester.M365TenantID(suite.T()) + cat = path.EmailCategory // doesn't matter which one we use, + qp = graph.QueryParams{ + Category: cat, + ResourceOwner: userID, + Credentials: suite.creds, + } + statusUpdater = func(*support.ConnectorOperationStatus) {} + allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0] + commonResults = mockGetterResults{ + added: []string{"added"}, + newDelta: api.DeltaUpdate{URL: "new_delta_url"}, + } + expiredResults = mockGetterResults{ + added: []string{"added"}, + newDelta: api.DeltaUpdate{ + URL: "new_delta_url", + Reset: true, + }, + } + ) + + prevPath := func(t *testing.T, at ...string) path.Path { + p, err := path.Builder{}. + Append(at...). + ToDataLayerExchangePathForCategory(tenantID, userID, cat, false) + require.NoError(t, err) + + return p + } + + type endState struct { + state data.CollectionState + doNotMerge bool + } + + table := []struct { + name string + getter mockGetter + resolver graph.ContainerResolver + dps DeltaPaths + expect map[string]endState + }{ + { + name: "new container", + getter: map[string]mockGetterResults{ + "1": commonResults, + }, + resolver: newMockResolver(mockContainer{ + id: strPtr("1"), + displayName: strPtr("new"), + p: path.Builder{}.Append("1", "new"), + }), + dps: DeltaPaths{}, + expect: map[string]endState{ + "1": {data.NewState, false}, + }, + }, + { + name: "not moved container", + getter: map[string]mockGetterResults{ + "1": commonResults, + }, + resolver: newMockResolver(mockContainer{ + id: strPtr("1"), + displayName: strPtr("not_moved"), + p: path.Builder{}.Append("1", "not_moved"), + }), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "1", "not_moved").String(), + }, + }, + expect: map[string]endState{ + "1": {data.NotMovedState, false}, + }, + }, + { + name: "moved container", + getter: map[string]mockGetterResults{ + "1": commonResults, + }, + resolver: newMockResolver(mockContainer{ + id: strPtr("1"), + displayName: strPtr("moved"), + p: path.Builder{}.Append("1", "moved"), + }), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "1", "prev").String(), + }, + }, + expect: map[string]endState{ + "1": {data.MovedState, false}, + }, + }, + { + name: "deleted container", + getter: map[string]mockGetterResults{}, + resolver: newMockResolver(), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "1", "deleted").String(), + }, + }, + expect: map[string]endState{ + "1": {data.DeletedState, false}, + }, + }, + { + name: "one deleted, one new", + getter: map[string]mockGetterResults{ + "2": commonResults, + }, + resolver: newMockResolver(mockContainer{ + id: strPtr("2"), + displayName: strPtr("new"), + p: path.Builder{}.Append("2", "new"), + }), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "1", "deleted").String(), + }, + }, + expect: map[string]endState{ + "1": {data.DeletedState, false}, + "2": {data.NewState, false}, + }, + }, + { + name: "one deleted, one new, same path", + getter: map[string]mockGetterResults{ + "2": commonResults, + }, + resolver: newMockResolver(mockContainer{ + id: strPtr("2"), + displayName: strPtr("same"), + p: path.Builder{}.Append("2", "same"), + }), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "1", "same").String(), + }, + }, + expect: map[string]endState{ + "1": {data.DeletedState, false}, + "2": {data.NewState, false}, + }, + }, + { + name: "one moved, one new, same path", + getter: map[string]mockGetterResults{ + "1": commonResults, + "2": commonResults, + }, + resolver: newMockResolver( + mockContainer{ + id: strPtr("1"), + displayName: strPtr("moved"), + p: path.Builder{}.Append("1", "moved"), + }, + mockContainer{ + id: strPtr("2"), + displayName: strPtr("prev"), + p: path.Builder{}.Append("2", "prev"), + }, + ), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "1", "prev").String(), + }, + }, + expect: map[string]endState{ + "1": {data.MovedState, false}, + "2": {data.NewState, false}, + }, + }, + { + name: "bad previous path strings", + getter: map[string]mockGetterResults{ + "1": commonResults, + }, + resolver: newMockResolver(mockContainer{ + id: strPtr("1"), + displayName: strPtr("not_moved"), + p: path.Builder{}.Append("1", "not_moved"), + }), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: "1/fnords/mc/smarfs", + }, + "2": DeltaPath{ + delta: "old_delta_url", + path: "2/fnords/mc/smarfs", + }, + }, + expect: map[string]endState{ + "1": {data.NewState, false}, + }, + }, + { + name: "delta expiration", + getter: map[string]mockGetterResults{ + "1": expiredResults, + }, + resolver: newMockResolver(mockContainer{ + id: strPtr("1"), + displayName: strPtr("same"), + p: path.Builder{}.Append("1", "same"), + }), + dps: DeltaPaths{ + "1": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "1", "same").String(), + }, + }, + expect: map[string]endState{ + "1": {data.NotMovedState, true}, + }, + }, + { + name: "a little bit of everything", + getter: map[string]mockGetterResults{ + "1": commonResults, // new + "2": commonResults, // notMoved + "3": commonResults, // moved + "4": expiredResults, // moved + // "5" gets deleted + }, + resolver: newMockResolver( + mockContainer{ + id: strPtr("1"), + displayName: strPtr("new"), + p: path.Builder{}.Append("1", "new"), + }, + mockContainer{ + id: strPtr("2"), + displayName: strPtr("not_moved"), + p: path.Builder{}.Append("2", "not_moved"), + }, + mockContainer{ + id: strPtr("3"), + displayName: strPtr("moved"), + p: path.Builder{}.Append("3", "moved"), + }, + mockContainer{ + id: strPtr("4"), + displayName: strPtr("moved"), + p: path.Builder{}.Append("4", "moved"), + }, + ), + dps: DeltaPaths{ + "2": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "2", "not_moved").String(), + }, + "3": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "3", "prev").String(), + }, + "4": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "4", "prev").String(), + }, + "5": DeltaPath{ + delta: "old_delta_url", + path: prevPath(suite.T(), "5", "deleted").String(), + }, + }, + expect: map[string]endState{ + "1": {data.NewState, false}, + "2": {data.NotMovedState, false}, + "3": {data.MovedState, false}, + "4": {data.MovedState, true}, + "5": {data.DeletedState, false}, + }, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + ctx, flush := tester.NewContext() + defer flush() + + collections := map[string]data.Collection{} + + err := filterContainersAndFillCollections( + ctx, + qp, + test.getter, + collections, + statusUpdater, + test.resolver, + allScope, + test.dps, + control.Options{ + EnabledFeatures: control.FeatureFlags{ + ExchangeIncrementals: true, + }, + }, + ) + assert.NoError(t, err) + + metadatas := 0 + for _, c := range collections { + p := c.FullPath() + if p == nil { + p = c.PreviousPath() + } + + require.NotNil(t, p) + + if p.Service() == path.ExchangeMetadataService { + metadatas++ + continue + } + + p0 := p.Folders()[0] + + expect, ok := test.expect[p0] + assert.True(t, ok, "collection is expected in result") + + assert.Equalf(t, expect.state, c.State(), "collection %s state", p0) + assert.Equalf(t, expect.doNotMerge, c.DoNotMergeItems(), "collection %s DoNotMergeItems", p0) + } + + assert.Equal(t, 1, metadatas, "metadata collections") + }) + } +} diff --git a/src/internal/tester/resource_owners.go b/src/internal/tester/resource_owners.go index e7b4cef4f..a322e5a1b 100644 --- a/src/internal/tester/resource_owners.go +++ b/src/internal/tester/resource_owners.go @@ -8,6 +8,17 @@ import ( "github.com/stretchr/testify/require" ) +// M365TenantID returns a tenantID string representing the azureTenantID described +// by either the env var AZURE_TENANT_ID, the corso_test.toml config +// file or the default value (in that order of priority). The default is a +// last-attempt fallback that will only work on alcion's testing org. +func M365TenantID(t *testing.T) string { + cfg, err := readTestConfig() + require.NoError(t, err, "retrieving m365 user id from test configuration") + + return cfg[TestCfgAzureTenantID] +} + // M365UserID returns an userID string representing the m365UserID described // by either the env var CORSO_M365_TEST_USER_ID, the corso_test.toml config // file or the default value (in that order of priority). The default is a From 70a8f53d65549de82924f6ba3267968c10ad4418 Mon Sep 17 00:00:00 2001 From: Keepers Date: Fri, 6 Jan 2023 15:47:16 -0700 Subject: [PATCH 02/22] full backup if prev snapshots have no details (#2049) ## Description In the event that a backup is completed but the details, somehow, isn't persisted, we want the next backup to do a full, instead of an incremental, backup. If we don't have this protection the following backups could end up in a bad state. Future changes will add better resilience so that the fallback isn't needed. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature ## Issue(s) * #1878 ## Test Plan - [x] :green_heart: E2E --- src/internal/kopia/snapshot_manager.go | 6 ++---- src/internal/operations/backup.go | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/src/internal/kopia/snapshot_manager.go b/src/internal/kopia/snapshot_manager.go index ef8e82e01..f82d742ed 100644 --- a/src/internal/kopia/snapshot_manager.go +++ b/src/internal/kopia/snapshot_manager.go @@ -322,11 +322,9 @@ func fetchPrevSnapshotManifests( // If the manifest already exists and it's incomplete then we should // merge the reasons for consistency. This will become easier to handle // once we update how checkpoint manifests are tagged. - if len(found.IncompleteReason) == 0 { - continue + if len(found.IncompleteReason) > 0 { + found.Reasons = append(found.Reasons, m.Reasons...) } - - found.Reasons = append(found.Reasons, m.Reasons...) } } } diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index e099d1b48..4de5c6aef 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -283,6 +283,31 @@ func produceManifestsAndMetadata( continue } + tk, _ := kopia.MakeTagKV(kopia.TagBackupID) + + bID := man.Tags[tk] + if len(bID) == 0 { + return nil, nil, errors.New("missing backup id in prior manifest") + } + + dID, _, err := sw.GetDetailsIDFromBackupID(ctx, model.StableID(bID)) + if err != nil { + return nil, nil, errors.Wrap(err, "retrieving prior backup data") + } + + // if no detailsID exists for any of the complete manifests, we want + // to fall back to a complete backup. This is a temporary prevention + // mechanism to keep backups from falling into a perpetually bad state. + // This makes an assumption that the ID points to a populated set of + // details; we aren't doing the work to look them up. + if len(dID) == 0 { + logger.Ctx(ctx).Infow( + "backup missing details ID, falling back to full backup", + "backup_id", bID) + + return ms, nil, nil + } + colls, err := collectMetadata(ctx, kw, man, metadataFiles, tenantID) if err != nil && !errors.Is(err, kopia.ErrNotFound) { // prior metadata isn't guaranteed to exist. From 186569087c68aeef81f62b414387b96d2852cec1 Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Fri, 6 Jan 2023 15:10:55 -0800 Subject: [PATCH 03/22] Only make incremental backup if Reason:base is 1:1 (#2050) ## Description Ensure that each (resource owner, service, category) set of data is only sourced from a single base snapshot when doing an incremental backup. If not, fallback to doing a full backup. Failure to error out or fallback to a full backup may result in repeated or zombie items in the resulting backup as multiple Point-In-Time backups will be used to source the same data Incomplete manifests are ignored as they are currently only used for kopia-assisted incrementals, not sourcing items/backup details info when making a delta token-based incremental backup ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature - [x] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [ ] :broom: Tech Debt/Cleanup ## Issue(s) * closes #1945 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/operations/backup.go | 57 +++++++++++ src/internal/operations/backup_test.go | 131 +++++++++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 4de5c6aef..8ba5869f3 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -6,6 +6,7 @@ import ( "github.com/google/uuid" multierror "github.com/hashicorp/go-multierror" + "github.com/kopia/kopia/repo/manifest" "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/common" @@ -252,6 +253,46 @@ type backuper interface { ) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) } +func verifyDistinctBases(mans []*kopia.ManifestEntry) error { + var ( + errs *multierror.Error + reasons = map[string]manifest.ID{} + ) + + for _, man := range mans { + // Incomplete snapshots are used only for kopia-assisted incrementals. The + // fact that we need this check here makes it seem like this should live in + // the kopia code. However, keeping it here allows for better debugging as + // the kopia code only has access to a path builder which means it cannot + // remove the resource owner from the error/log output. That is also below + // the point where we decide if we should do a full backup or an + // incremental. + if len(man.IncompleteReason) > 0 { + continue + } + + for _, reason := range man.Reasons { + reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String() + + if b, ok := reasons[reasonKey]; ok { + errs = multierror.Append(errs, errors.Errorf( + "multiple base snapshots source data for %s %s. IDs: %s, %s", + reason.Service.String(), + reason.Category.String(), + b, + man.ID, + )) + + continue + } + + reasons[reasonKey] = man.ID + } + } + + return errs.ErrorOrNil() +} + // calls kopia to retrieve prior backup manifests, metadata collections to supply backup heuristics. func produceManifestsAndMetadata( ctx context.Context, @@ -278,6 +319,22 @@ func produceManifestsAndMetadata( return ms, nil, nil } + // We only need to check that we have 1:1 reason:base if we're doing an + // incremental with associated metadata. This ensures that we're only sourcing + // data from a single Point-In-Time (base) for each incremental backup. + // + // TODO(ashmrtn): This may need updating if we start sourcing item backup + // details from previous snapshots when using kopia-assisted incrementals. + if err := verifyDistinctBases(ms); err != nil { + logger.Ctx(ctx).Warnw( + "base snapshot collision, falling back to full backup", + "error", + err, + ) + + return ms, nil, nil + } + for _, man := range ms { if len(man.IncompleteReason) > 0 { continue diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index ab0f5cd97..382c55515 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -386,6 +386,137 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() { } } +func (suite *BackupOpSuite) TestBackupOperation_VerifyDistinctBases() { + const user = "a-user" + + table := []struct { + name string + input []*kopia.ManifestEntry + errCheck assert.ErrorAssertionFunc + }{ + { + name: "SingleManifestMultipleReasons", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EventsCategory, + }, + }, + }, + }, + errCheck: assert.NoError, + }, + { + name: "MultipleManifestsDistinctReason", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + { + Manifest: &snapshot.Manifest{ + ID: "id2", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EventsCategory, + }, + }, + }, + }, + errCheck: assert.NoError, + }, + { + name: "MultipleManifestsSameReason", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + { + Manifest: &snapshot.Manifest{ + ID: "id2", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + }, + errCheck: assert.Error, + }, + { + name: "MultipleManifestsSameReasonOneIncomplete", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + { + Manifest: &snapshot.Manifest{ + ID: "id2", + IncompleteReason: "checkpoint", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + }, + errCheck: assert.NoError, + }, + } + + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + test.errCheck(t, verifyDistinctBases(test.input)) + }) + } +} + func (suite *BackupOpSuite) TestBackupOperation_CollectMetadata() { var ( tenant = "a-tenant" From 059b860fde6ed21db2248602e69885336c39b867 Mon Sep 17 00:00:00 2001 From: Keepers Date: Sun, 8 Jan 2023 13:22:56 -0700 Subject: [PATCH 04/22] Invert incremental flags, default to incremental (#2051) ## Description Sets the default run behavior for exchange to use incremental backups. The cli feature flag for enabling exchange incrementals has been swapped for a toggle that disables incrementals, forcing a full backup ## Does this PR need a docs update or release note? - [x] :white_check_mark: Yes, it's included ## Type of change - [x] :sunflower: Feature ## Issue(s) * #1901 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- CHANGELOG.md | 4 +++ src/cli/backup/exchange.go | 2 +- src/cli/options/options.go | 24 ++++++++--------- .../exchange/service_iterators_test.go | 6 +---- src/internal/operations/backup.go | 2 +- .../operations/backup_integration_test.go | 16 ++++++------ src/pkg/control/options.go | 26 +++++++++++-------- 7 files changed, 42 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b93597507..26296e27d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] (alpha) +### Added + +- Incremental backup support for exchange is now enabled by default. + ### Changed - The selectors Reduce() process will only include details that match the DiscreteOwner, if one is specified. diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 6f068a8c3..1501ddcb8 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -105,7 +105,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command { switch cmd.Use { case createCommand: c, fs = utils.AddCommand(cmd, exchangeCreateCmd()) - options.AddFeatureFlags(cmd, options.ExchangeIncrementals()) + options.AddFeatureToggle(cmd, options.DisableIncrementals()) c.Use = c.Use + " " + exchangeServiceCommandCreateUseSuffix c.Example = exchangeServiceCommandCreateExamples diff --git a/src/cli/options/options.go b/src/cli/options/options.go index 2d72f8d68..4988c29ca 100644 --- a/src/cli/options/options.go +++ b/src/cli/options/options.go @@ -19,8 +19,8 @@ func Control() control.Options { opt.DisableMetrics = true } - if exchangeIncrementals { - opt.EnabledFeatures.ExchangeIncrementals = true + if disableIncrementals { + opt.ToggleFeatures.DisableIncrementals = true } return opt @@ -53,28 +53,28 @@ func AddGlobalOperationFlags(cmd *cobra.Command) { // Feature Flags // --------------------------------------------------------------------------- -var exchangeIncrementals bool +var disableIncrementals bool type exposeFeatureFlag func(*pflag.FlagSet) -// AddFeatureFlags adds CLI flags for each exposed feature flags to the +// AddFeatureToggle adds CLI flags for each exposed feature toggle to the // persistent flag set within the command. -func AddFeatureFlags(cmd *cobra.Command, effs ...exposeFeatureFlag) { +func AddFeatureToggle(cmd *cobra.Command, effs ...exposeFeatureFlag) { fs := cmd.PersistentFlags() for _, fflag := range effs { fflag(fs) } } -// Adds the '--exchange-incrementals' cli flag which, when set, enables -// incrementals data retrieval for exchange backups. -func ExchangeIncrementals() func(*pflag.FlagSet) { +// Adds the hidden '--no-incrementals' cli flag which, when set, disables +// incremental backups. +func DisableIncrementals() func(*pflag.FlagSet) { return func(fs *pflag.FlagSet) { fs.BoolVar( - &exchangeIncrementals, - "exchange-incrementals", + &disableIncrementals, + "disable-incrementals", false, - "Enable incremental data retrieval in Exchange backups.") - cobra.CheckErr(fs.MarkHidden("exchange-incrementals")) + "Disable incremental data retrieval in backups.") + cobra.CheckErr(fs.MarkHidden("disable-incrementals")) } } diff --git a/src/internal/connector/exchange/service_iterators_test.go b/src/internal/connector/exchange/service_iterators_test.go index bce71b5e9..75a03c3d7 100644 --- a/src/internal/connector/exchange/service_iterators_test.go +++ b/src/internal/connector/exchange/service_iterators_test.go @@ -644,11 +644,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incre test.resolver, allScope, test.dps, - control.Options{ - EnabledFeatures: control.FeatureFlags{ - ExchangeIncrementals: true, - }, - }, + control.Options{}, ) assert.NoError(t, err) diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 8ba5869f3..65e8bc037 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -212,7 +212,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { // checker to see if conditions are correct for incremental backup behavior such as // retrieving metadata like delta tokens and previous paths. func useIncrementalBackup(sel selectors.Selector, opts control.Options) bool { - return opts.EnabledFeatures.ExchangeIncrementals && sel.Service == selectors.ServiceExchange + return !opts.ToggleFeatures.DisableIncrementals } // --------------------------------------------------------------------------- diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index 0f7495b13..1b9e50c9d 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -52,7 +52,7 @@ func prepNewTestBackupOp( ctx context.Context, bus events.Eventer, sel selectors.Selector, - featureFlags control.FeatureFlags, + featureToggles control.Toggles, ) (BackupOperation, account.Account, *kopia.Wrapper, *kopia.ModelStore, func()) { //revive:enable:context-as-argument acct := tester.NewM365Account(t) @@ -90,7 +90,7 @@ func prepNewTestBackupOp( ms.Close(ctx) } - bo := newTestBackupOp(t, ctx, kw, ms, acct, sel, bus, featureFlags, closer) + bo := newTestBackupOp(t, ctx, kw, ms, acct, sel, bus, featureToggles, closer) return bo, acct, kw, ms, closer } @@ -109,7 +109,7 @@ func newTestBackupOp( acct account.Account, sel selectors.Selector, bus events.Eventer, - featureFlags control.FeatureFlags, + featureToggles control.Toggles, closer func(), ) BackupOperation { //revive:enable:context-as-argument @@ -118,7 +118,7 @@ func newTestBackupOp( opts = control.Options{} ) - opts.EnabledFeatures = featureFlags + opts.ToggleFeatures = featureToggles bo, err := NewBackupOperation(ctx, opts, kw, sw, acct, sel, bus) if !assert.NoError(t, err) { @@ -554,7 +554,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() { var ( mb = evmock.NewBus() sel = test.selector().Selector - ffs = control.FeatureFlags{ExchangeIncrementals: test.runIncremental} + ffs = control.Toggles{} ) bo, acct, kw, ms, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs) @@ -630,7 +630,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() { var ( t = suite.T() acct = tester.NewM365Account(t) - ffs = control.FeatureFlags{ExchangeIncrementals: true} + ffs = control.Toggles{} mb = evmock.NewBus() now = common.Now() users = []string{suite.user} @@ -1010,7 +1010,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDrive() { sel.Include(sel.AllData()) - bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.FeatureFlags{}) + bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}) defer closer() runAndCheckBackup(t, ctx, &bo, mb) @@ -1032,7 +1032,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_sharePoint() { sel.Include(sel.AllData()) - bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.FeatureFlags{}) + bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}) defer closer() runAndCheckBackup(t, ctx, &bo, mb) diff --git a/src/pkg/control/options.go b/src/pkg/control/options.go index 47abe7111..9cc5a334a 100644 --- a/src/pkg/control/options.go +++ b/src/pkg/control/options.go @@ -6,17 +6,17 @@ import ( // Options holds the optional configurations for a process type Options struct { - Collision CollisionPolicy `json:"-"` - DisableMetrics bool `json:"disableMetrics"` - FailFast bool `json:"failFast"` - EnabledFeatures FeatureFlags `json:"enabledFeatures"` + Collision CollisionPolicy `json:"-"` + DisableMetrics bool `json:"disableMetrics"` + FailFast bool `json:"failFast"` + ToggleFeatures Toggles `json:"ToggleFeatures"` } // Defaults provides an Options with the default values set. func Defaults() Options { return Options{ - FailFast: true, - EnabledFeatures: FeatureFlags{}, + FailFast: true, + ToggleFeatures: Toggles{}, } } @@ -63,11 +63,15 @@ func DefaultRestoreDestination(timeFormat common.TimeFormat) RestoreDestination } // --------------------------------------------------------------------------- -// Feature Flags +// Feature Flags and Toggles // --------------------------------------------------------------------------- -type FeatureFlags struct { - // ExchangeIncrementals allow for re-use of delta links when backing up - // exchange data, reducing the amount of data pulled from graph. - ExchangeIncrementals bool `json:"incrementals,omitempty"` +// Toggles allows callers to force corso to behave in ways that deviate from +// the default expectations by turning on or shutting off certain features. +// The default state for every toggle is false; toggles are only turned on +// if specified by the caller. +type Toggles struct { + // DisableIncrementals prevents backups from using incremental lookups, + // forcing a new, complete backup of all data regardless of prior state. + DisableIncrementals bool `json:"exchangeIncrementals,omitempty"` } From ef050e67be4a550bdd2cc56be4e0e2a91643956e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jan 2023 05:58:59 +0000 Subject: [PATCH 05/22] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20dominikh/stat?= =?UTF-8?q?iccheck-action=20from=201.2.0=20to=201.3.0=20(#2057)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [dominikh/staticcheck-action](https://github.com/dominikh/staticcheck-action) from 1.2.0 to 1.3.0.
Changelog

Sourced from dominikh/staticcheck-action's changelog.

1.3.0

  • Add the output-format option
  • Add the output-file option
  • Add the merge-files option
  • Update action/cache dependency from v2 to v3
  • Update setup-go-faster dependency from v1.7.0 to v1.8.0
  • Update installed Go version from 1.17.x to 1.19.x
Commits
  • ba60535 Fix formatting of changelog
  • 97b4433 Update versions used in examples
  • 51e00fb Add changelog
  • 0c5d998 Update installed Go version to 1.19.x
  • ca65062 deps: upgrade setup-go-faster
  • b599c1d deps: upgrade action versions for CI
  • 9f77055 Support setting output format, writing to file and merging results
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dominikh/staticcheck-action&package-manager=github_actions&previous-version=1.2.0&new-version=1.3.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 962424f78..008bb6e35 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -369,7 +369,7 @@ jobs: run: go-licenses check github.com/alcionai/corso/src --ignore github.com/alcionai/corso/src - name: Run staticcheck - uses: dominikh/staticcheck-action@v1.2.0 + uses: dominikh/staticcheck-action@v1.3.0 with: install-go: false working-directory: src From aee508dfa7d135c202054e6182608f784fe135a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jan 2023 06:26:21 +0000 Subject: [PATCH 06/22] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.174=20to=201.44.175=20in=20/src=20(#?= =?UTF-8?q?2058)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.174 to 1.44.175.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.175 (2023-01-06)

Service Client Updates

  • service/acm-pca: Updates service API and documentation
  • service/auditmanager: Updates service API and documentation
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.174&new-version=1.44.175)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index bb04dd03a..dbcf2456c 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 - github.com/aws/aws-sdk-go v1.44.174 + github.com/aws/aws-sdk-go v1.44.175 github.com/aws/aws-xray-sdk-go v1.8.0 github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 diff --git a/src/go.sum b/src/go.sum index 035884040..88697a8c4 100644 --- a/src/go.sum +++ b/src/go.sum @@ -58,8 +58,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/aws/aws-sdk-go v1.44.174 h1:9lR4a6MKQW/t6YCG0ZKAt1GAkjdEPP8sWch/pfcuR0c= -github.com/aws/aws-sdk-go v1.44.174/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.175 h1:c0NzHHnPXV5kJoTUFQxFN5cUPpX1SxO635XnwL5/oIY= +github.com/aws/aws-sdk-go v1.44.175/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY= github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= From a70547d63354de01efc524fd853c7a15d98d3c5a Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Tue, 10 Jan 2023 01:01:14 +0530 Subject: [PATCH 07/22] Run staticcheck on make lint (#2060) ## Description Staticcheck is disabled in golangci-lint as it was causing issues in the CI and so have to be run separately. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * # ## Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Makefile b/src/Makefile index dc6a448f6..0d31d26d1 100644 --- a/src/Makefile +++ b/src/Makefile @@ -13,6 +13,7 @@ build: lint: check-lint-version golangci-lint run + staticcheck ./... check-lint-version: check-lint @if [ "$(LINT_VERSION)" != "$(WANTED_LINT_VERSION)" ]; then \ From 04d25e171ee773bf1b8c1ec942d386c5f8bc764d Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Mon, 9 Jan 2023 12:04:34 -0800 Subject: [PATCH 08/22] Add a common prefix to test storage prefixes (#2054) ## Description This results in all test kopia repositories being created under the same "folder" in S3, thus making manual cleanup much easier. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * closes #2053 ## Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/tester/storage.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/internal/tester/storage.go b/src/internal/tester/storage.go index d4b50bb09..0cfabba20 100644 --- a/src/internal/tester/storage.go +++ b/src/internal/tester/storage.go @@ -9,6 +9,8 @@ import ( "github.com/alcionai/corso/src/pkg/storage" ) +const testRepoRootPrefix = "corso_integration_test/" + var AWSStorageCredEnvs = []string{ credentials.AWSAccessKeyID, credentials.AWSSecretAccessKey, @@ -31,7 +33,7 @@ func NewPrefixedS3Storage(t *testing.T) storage.Storage { storage.ProviderS3, storage.S3Config{ Bucket: cfg[TestCfgBucket], - Prefix: t.Name() + "-" + now, + Prefix: testRepoRootPrefix + t.Name() + "-" + now, }, storage.CommonConfig{ Corso: credentials.GetCorso(), From f3bf09ba8a081fb643edb30cd40a2f0f9950671c Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 9 Jan 2023 15:01:21 -0700 Subject: [PATCH 09/22] protect against missing backups in manifests (#2063) ## Description If a prior manifest is missing a backup, it should be handled the same way as when a manifest's backup is missing a details ID: the metadata is skipped, and a full backup is performed. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :bug: Bugfix ## Issue(s) * #2062 ## Test Plan - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/operations/backup.go | 32 ++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 65e8bc037..718b964bc 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -151,7 +151,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { } }() - mans, mdColls, err := produceManifestsAndMetadata(ctx, op.kopia, op.store, oc, tenantID, uib) + mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata(ctx, op.kopia, op.store, oc, tenantID, uib) if err != nil { opStats.readErr = errors.Wrap(err, "connecting to M365") return opStats.readErr @@ -178,7 +178,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { mans, cs, op.Results.BackupID, - uib) + uib && canUseMetaData) if err != nil { opStats.writeErr = errors.Wrap(err, "backing up service data") return opStats.writeErr @@ -301,7 +301,7 @@ func produceManifestsAndMetadata( oc *kopia.OwnersCats, tenantID string, getMetadata bool, -) ([]*kopia.ManifestEntry, []data.Collection, error) { +) ([]*kopia.ManifestEntry, []data.Collection, bool, error) { var ( metadataFiles = graph.AllMetadataFileNames() collections []data.Collection @@ -312,11 +312,11 @@ func produceManifestsAndMetadata( oc, map[string]string{kopia.TagBackupCategory: ""}) if err != nil { - return nil, nil, err + return nil, nil, false, err } if !getMetadata { - return ms, nil, nil + return ms, nil, false, nil } // We only need to check that we have 1:1 reason:base if we're doing an @@ -332,7 +332,7 @@ func produceManifestsAndMetadata( err, ) - return ms, nil, nil + return ms, nil, false, nil } for _, man := range ms { @@ -344,12 +344,22 @@ func produceManifestsAndMetadata( bID := man.Tags[tk] if len(bID) == 0 { - return nil, nil, errors.New("missing backup id in prior manifest") + return nil, nil, false, errors.New("snapshot manifest missing backup ID") } dID, _, err := sw.GetDetailsIDFromBackupID(ctx, model.StableID(bID)) if err != nil { - return nil, nil, errors.Wrap(err, "retrieving prior backup data") + // if no backup exists for any of the complete manifests, we want + // to fall back to a complete backup. + if errors.Is(err, kopia.ErrNotFound) { + logger.Ctx(ctx).Infow( + "backup missing, falling back to full backup", + "backup_id", bID) + + return ms, nil, false, nil + } + + return nil, nil, false, errors.Wrap(err, "retrieving prior backup data") } // if no detailsID exists for any of the complete manifests, we want @@ -362,7 +372,7 @@ func produceManifestsAndMetadata( "backup missing details ID, falling back to full backup", "backup_id", bID) - return ms, nil, nil + return ms, nil, false, nil } colls, err := collectMetadata(ctx, kw, man, metadataFiles, tenantID) @@ -370,13 +380,13 @@ func produceManifestsAndMetadata( // prior metadata isn't guaranteed to exist. // if it doesn't, we'll just have to do a // full backup for that data. - return nil, nil, err + return nil, nil, false, err } collections = append(collections, colls...) } - return ms, collections, err + return ms, collections, true, err } func collectMetadata( From bf0a01708a3562fda5c9babd06ceba231da26de9 Mon Sep 17 00:00:00 2001 From: Keepers Date: Mon, 9 Jan 2023 15:42:17 -0700 Subject: [PATCH 10/22] remove selector printable, list discreteOwner (#2034) ## Description The selectors printable code wasn't being used in any valid way, so that's out. Backup List printing now refers to the DiscreteOwner in the embedded selector as the ResourceOwner reference. Renamed the "Selectors" column in the List print table to "Resource Owner" to better match what should be contained in that field. ## Does this PR need a docs update or release note? - [x] :white_check_mark: Yes, it's included ## Type of change - [x] :bug: Bugfix - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1617 ## Test Plan - [x] :muscle: Manual - [x] :zap: Unit test --- src/cli/utils/exchange_test.go | 6 - .../exchange/data_collections_test.go | 6 +- src/pkg/backup/backup.go | 26 ++-- src/pkg/backup/backup_test.go | 13 +- src/pkg/repository/repository.go | 2 +- src/pkg/selectors/exchange.go | 6 - src/pkg/selectors/helpers_test.go | 4 - src/pkg/selectors/onedrive.go | 6 - src/pkg/selectors/scopes_test.go | 27 ---- src/pkg/selectors/selectors.go | 123 ------------------ src/pkg/selectors/selectors_test.go | 122 ----------------- src/pkg/selectors/sharepoint.go | 6 - website/docs/quickstart.md | 4 +- 13 files changed, 23 insertions(+), 328 deletions(-) diff --git a/src/cli/utils/exchange_test.go b/src/cli/utils/exchange_test.go index 5559e72bd..38311c267 100644 --- a/src/cli/utils/exchange_test.go +++ b/src/cli/utils/exchange_test.go @@ -325,42 +325,36 @@ func (suite *ExchangeUtilsSuite) TestAddExchangeInclude() { }{ { name: "no inputs", - resources: empty, folders: empty, items: empty, expectIncludeLen: 0, }, { name: "single inputs", - resources: single, folders: single, items: single, expectIncludeLen: 1, }, { name: "multi inputs", - resources: multi, folders: multi, items: multi, expectIncludeLen: 1, }, { name: "folder contains", - resources: empty, folders: containsOnly, items: empty, expectIncludeLen: 1, }, { name: "folder prefixes", - resources: empty, folders: prefixOnly, items: empty, expectIncludeLen: 1, }, { name: "folder prefixes and contains", - resources: empty, folders: containsAndPrefix, items: empty, expectIncludeLen: 2, diff --git a/src/internal/connector/exchange/data_collections_test.go b/src/internal/connector/exchange/data_collections_test.go index 8b2422bc5..3d69ba79c 100644 --- a/src/internal/connector/exchange/data_collections_test.go +++ b/src/internal/connector/exchange/data_collections_test.go @@ -525,14 +525,16 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression( expected: DefaultCalendar, scope: selectors.NewExchangeBackup(users).EventCalendars( []string{DefaultCalendar}, - selectors.PrefixMatch())[0], + selectors.PrefixMatch(), + )[0], }, { name: "Birthday Calendar", expected: "Birthdays", scope: selectors.NewExchangeBackup(users).EventCalendars( []string{"Birthdays"}, - selectors.PrefixMatch())[0], + selectors.PrefixMatch(), + )[0], }, } diff --git a/src/pkg/backup/backup.go b/src/pkg/backup/backup.go index cef46aa3e..6d73498eb 100644 --- a/src/pkg/backup/backup.go +++ b/src/pkg/backup/backup.go @@ -28,8 +28,8 @@ type Backup struct { // Status of the operation Status string `json:"status"` - // Selectors used in this operation - Selectors selectors.Selector `json:"selectors"` + // Selector used in this operation + Selector selectors.Selector `json:"selectors"` // stats are embedded so that the values appear as top-level properties stats.Errs @@ -58,7 +58,7 @@ func New( SnapshotID: snapshotID, DetailsID: detailsID, Status: status, - Selectors: selector, + Selector: selector, ReadWrites: rw, StartAndEndTime: se, } @@ -89,14 +89,13 @@ func PrintAll(ctx context.Context, bs []*Backup) { } type Printable struct { - ID model.StableID `json:"id"` - ErrorCount int `json:"errorCount"` - StartedAt time.Time `json:"started at"` - Status string `json:"status"` - Version string `json:"version"` - Selectors selectors.Printable `json:"selectors"` - BytesRead int64 `json:"bytesRead"` - BytesUploaded int64 `json:"bytesUploaded"` + ID model.StableID `json:"id"` + ErrorCount int `json:"errorCount"` + StartedAt time.Time `json:"started at"` + Status string `json:"status"` + Version string `json:"version"` + BytesRead int64 `json:"bytesRead"` + BytesUploaded int64 `json:"bytesUploaded"` } // MinimumPrintable reduces the Backup to its minimally printable details. @@ -107,7 +106,6 @@ func (b Backup) MinimumPrintable() any { StartedAt: b.StartedAt, Status: b.Status, Version: "0", - Selectors: b.Selectors.ToPrintable(), BytesRead: b.BytesRead, BytesUploaded: b.BytesUploaded, } @@ -120,7 +118,7 @@ func (b Backup) Headers() []string { "Started At", "ID", "Status", - "Selectors", + "Resource Owner", } } @@ -134,6 +132,6 @@ func (b Backup) Values() []string { common.FormatTabularDisplayTime(b.StartedAt), string(b.ID), status, - b.Selectors.ToPrintable().Resources(), + b.Selector.DiscreteOwner, } } diff --git a/src/pkg/backup/backup_test.go b/src/pkg/backup/backup_test.go index 2e2429b4d..1ddf4f4a2 100644 --- a/src/pkg/backup/backup_test.go +++ b/src/pkg/backup/backup_test.go @@ -25,7 +25,7 @@ func TestBackupSuite(t *testing.T) { } func stubBackup(t time.Time) backup.Backup { - sel := selectors.NewExchangeBackup(selectors.Any()) + sel := selectors.NewExchangeBackup([]string{"test"}) sel.Include(sel.AllData()) return backup.Backup{ @@ -39,7 +39,7 @@ func stubBackup(t time.Time) backup.Backup { SnapshotID: "snapshot", DetailsID: "details", Status: "status", - Selectors: sel.Selector, + Selector: sel.Selector, Errs: stats.Errs{ ReadErrors: errors.New("1"), WriteErrors: errors.New("1"), @@ -66,7 +66,7 @@ func (suite *BackupSuite) TestBackup_HeadersValues() { "Started At", "ID", "Status", - "Selectors", + "Resource Owner", } hs := b.Headers() assert.Equal(t, expectHs, hs) @@ -76,7 +76,7 @@ func (suite *BackupSuite) TestBackup_HeadersValues() { nowFmt, "id", "status (2 errors)", - selectors.All, + "test", } vs := b.Values() @@ -96,11 +96,6 @@ func (suite *BackupSuite) TestBackup_MinimumPrintable() { assert.Equal(t, 2, result.ErrorCount, "error count") assert.Equal(t, now, result.StartedAt, "started at") assert.Equal(t, b.Status, result.Status, "status") - - bselp := b.Selectors.ToPrintable() - assert.Equal(t, bselp, result.Selectors, "selectors") - assert.Equal(t, bselp.Resources(), result.Selectors.Resources(), "selector resources") - assert.Equal(t, b.BytesRead, result.BytesRead, "size") assert.Equal(t, b.BytesUploaded, result.BytesUploaded, "stored size") } diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index c309723e5..e7d2a3c56 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -306,7 +306,7 @@ func (r repository) BackupDetails(ctx context.Context, backupID string) (*detail deets, err := streamstore.New( r.dataLayer, r.Account.ID(), - b.Selectors.PathService()).ReadBackupDetails(ctx, dID) + b.Selector.PathService()).ReadBackupDetails(ctx, dID) if err != nil { return nil, nil, err } diff --git a/src/pkg/selectors/exchange.go b/src/pkg/selectors/exchange.go index 79f5de9f6..109ee2ea4 100644 --- a/src/pkg/selectors/exchange.go +++ b/src/pkg/selectors/exchange.go @@ -38,7 +38,6 @@ type ( var ( _ Reducer = &ExchangeRestore{} - _ printabler = &ExchangeRestore{} _ pathCategorier = &ExchangeRestore{} ) @@ -110,11 +109,6 @@ func (sr ExchangeRestore) SplitByResourceOwner(users []string) []ExchangeRestore return ss } -// Printable creates the minimized display of a selector, formatted for human readability. -func (s exchange) Printable() Printable { - return toPrintable[ExchangeScope](s.Selector) -} - // PathCategories produces the aggregation of discrete users described by each type of scope. func (s exchange) PathCategories() selectorPathCategories { return selectorPathCategories{ diff --git a/src/pkg/selectors/helpers_test.go b/src/pkg/selectors/helpers_test.go index 32587a85e..6b28ea443 100644 --- a/src/pkg/selectors/helpers_test.go +++ b/src/pkg/selectors/helpers_test.go @@ -160,10 +160,6 @@ func stubSelector(resourceOwners []string) mockSel { } } -func (s mockSel) Printable() Printable { - return toPrintable[mockScope](s.Selector) -} - // --------------------------------------------------------------------------- // helper funcs // --------------------------------------------------------------------------- diff --git a/src/pkg/selectors/onedrive.go b/src/pkg/selectors/onedrive.go index 22660db30..69cc0687a 100644 --- a/src/pkg/selectors/onedrive.go +++ b/src/pkg/selectors/onedrive.go @@ -37,7 +37,6 @@ type ( var ( _ Reducer = &OneDriveRestore{} - _ printabler = &OneDriveRestore{} _ pathCategorier = &OneDriveRestore{} ) @@ -109,11 +108,6 @@ func (s OneDriveRestore) SplitByResourceOwner(users []string) []OneDriveRestore return ss } -// Printable creates the minimized display of a selector, formatted for human readability. -func (s oneDrive) Printable() Printable { - return toPrintable[OneDriveScope](s.Selector) -} - // PathCategories produces the aggregation of discrete users described by each type of scope. func (s oneDrive) PathCategories() selectorPathCategories { return selectorPathCategories{ diff --git a/src/pkg/selectors/scopes_test.go b/src/pkg/selectors/scopes_test.go index 86139dcee..9758b1109 100644 --- a/src/pkg/selectors/scopes_test.go +++ b/src/pkg/selectors/scopes_test.go @@ -393,33 +393,6 @@ func (suite *SelectorScopesSuite) TestMatchesPathValues() { } } -func (suite *SelectorScopesSuite) TestAddToSet() { - t := suite.T() - set := []string{} - - set = addToSet(set, []string{}) - assert.Len(t, set, 0) - - set = addToSet(set, []string{"a"}) - assert.Len(t, set, 1) - assert.Equal(t, set[0], "a") - - set = addToSet(set, []string{"a"}) - assert.Len(t, set, 1) - - set = addToSet(set, []string{"a", "b"}) - assert.Len(t, set, 2) - assert.Equal(t, set[0], "a") - assert.Equal(t, set[1], "b") - - set = addToSet(set, []string{"c", "d"}) - assert.Len(t, set, 4) - assert.Equal(t, set[0], "a") - assert.Equal(t, set[1], "b") - assert.Equal(t, set[2], "c") - assert.Equal(t, set[3], "d") -} - func (suite *SelectorScopesSuite) TestClean() { table := []struct { name string diff --git a/src/pkg/selectors/selectors.go b/src/pkg/selectors/selectors.go index b423c0405..a51ed24bb 100644 --- a/src/pkg/selectors/selectors.go +++ b/src/pkg/selectors/selectors.go @@ -3,7 +3,6 @@ package selectors import ( "context" "encoding/json" - "fmt" "strings" "github.com/pkg/errors" @@ -318,128 +317,6 @@ func selectorAsIface[T any](s Selector) (T, error) { return t, err } -// --------------------------------------------------------------------------- -// Printing Selectors for Human Reading -// --------------------------------------------------------------------------- - -type Printable struct { - ResourceOwners []string `json:"resourceOwners"` - Service string `json:"service"` - Excludes map[string][]string `json:"excludes,omitempty"` - Filters map[string][]string `json:"filters,omitempty"` - Includes map[string][]string `json:"includes,omitempty"` -} - -type printabler interface { - Printable() Printable -} - -// ToPrintable creates the minimized display of a selector, formatted for human readability. -func (s Selector) ToPrintable() Printable { - p, err := selectorAsIface[printabler](s) - if err != nil { - return Printable{} - } - - return p.Printable() -} - -// toPrintable creates the minimized display of a selector, formatted for human readability. -func toPrintable[T scopeT](s Selector) Printable { - return Printable{ - ResourceOwners: s.DiscreteResourceOwners(), - Service: s.Service.String(), - Excludes: toResourceTypeMap[T](s.Excludes), - Filters: toResourceTypeMap[T](s.Filters), - Includes: toResourceTypeMap[T](s.Includes), - } -} - -// Resources generates a tabular-readable output of the resources in Printable. -// Only the first (arbitrarily picked) resource is displayed. All others are -// simply counted. If no inclusions exist, uses Filters. If no filters exist, -// defaults to "None". -// Resource refers to the top-level entity in the service. User for Exchange, -// Site for sharepoint, etc. -func (p Printable) Resources() string { - s := resourcesShortFormat(p.ResourceOwners) - - if len(s) == 0 { - s = "None" - } - - if s == AnyTgt { - s = "All" - } - - return s -} - -// returns a string with the resources in the map. Shortened to the first resource key, -// plus, if more exist, " (len-1 more)" -func resourcesShortFormat(ros []string) string { - switch len(ros) { - case 0: - return "" - case 1: - return ros[0] - default: - return fmt.Sprintf("%s (%d more)", ros[0], len(ros)-1) - } -} - -// Transforms the slice to a single map. -// Keys are each service's rootCat value. -// Values are the set of all scopeKeyDataTypes for the resource. -func toResourceTypeMap[T scopeT](s []scope) map[string][]string { - if len(s) == 0 { - return nil - } - - r := make(map[string][]string) - - for _, sc := range s { - t := T(sc) - res := sc[t.categorizer().rootCat().String()] - k := res.Target - - if res.Target == AnyTgt { - k = All - } - - for _, sk := range split(k) { - r[sk] = addToSet(r[sk], split(sc[scopeKeyDataType].Target)) - } - } - - return r -} - -// returns v if set is empty, -// unions v with set, otherwise. -func addToSet(set []string, v []string) []string { - if len(set) == 0 { - return v - } - - for _, vv := range v { - var matched bool - - for _, s := range set { - if vv == s { - matched = true - break - } - } - - if !matched { - set = append(set, vv) - } - } - - return set -} - // --------------------------------------------------------------------------- // helpers // --------------------------------------------------------------------------- diff --git a/src/pkg/selectors/selectors_test.go b/src/pkg/selectors/selectors_test.go index 55e6a12ae..08da905d5 100644 --- a/src/pkg/selectors/selectors_test.go +++ b/src/pkg/selectors/selectors_test.go @@ -1,7 +1,6 @@ package selectors import ( - "strings" "testing" "github.com/stretchr/testify/assert" @@ -32,127 +31,6 @@ func (suite *SelectorSuite) TestBadCastErr() { assert.Error(suite.T(), err) } -func (suite *SelectorSuite) TestPrintable() { - t := suite.T() - - sel := stubSelector(Any()) - p := sel.Printable() - - assert.Equal(t, sel.Service.String(), p.Service) - assert.Equal(t, 1, len(p.Excludes)) - assert.Equal(t, 1, len(p.Filters)) - assert.Equal(t, 1, len(p.Includes)) -} - -func (suite *SelectorSuite) TestPrintable_IncludedResources() { - table := []struct { - name string - resourceOwners []string - expect func(string) bool - reason string - }{ - { - name: "distinct", - resourceOwners: []string{"foo", "smarf", "fnords"}, - expect: func(s string) bool { - return strings.HasSuffix(s, "(2 more)") - }, - reason: "should end with (2 more)", - }, - { - name: "distinct", - resourceOwners: nil, - expect: func(s string) bool { - return strings.HasSuffix(s, "None") - }, - reason: "no resource owners should produce None", - }, - } - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - sel := stubSelector(test.resourceOwners) - - stubWithResource := func(resource string) scope { - ss := stubScope("") - ss[rootCatStub.String()] = filterize(scopeConfig{}, resource) - return scope(ss) - } - - sel.Includes = []scope{} - sel.Filters = []scope{} - - for _, ro := range test.resourceOwners { - sel.Includes = append(sel.Includes, stubWithResource(ro)) - sel.Filters = append(sel.Filters, stubWithResource(ro)) - } - }) - } -} - -func (suite *SelectorSuite) TestToResourceTypeMap() { - table := []struct { - name string - input []scope - expect map[string][]string - }{ - { - name: "single scope", - input: []scope{scope(stubScope(""))}, - expect: map[string][]string{ - "All": {rootCatStub.String()}, - }, - }, - { - name: "disjoint resources", - input: []scope{ - scope(stubScope("")), - { - rootCatStub.String(): filterize(scopeConfig{}, "smarf"), - scopeKeyDataType: filterize(scopeConfig{}, unknownCatStub.String()), - }, - }, - expect: map[string][]string{ - "All": {rootCatStub.String()}, - "smarf": {unknownCatStub.String()}, - }, - }, - { - name: "multiple resources", - input: []scope{ - scope(stubScope("")), - { - rootCatStub.String(): filterize(scopeConfig{}, join("smarf", "fnords")), - scopeKeyDataType: filterize(scopeConfig{}, unknownCatStub.String()), - }, - }, - expect: map[string][]string{ - "All": {rootCatStub.String()}, - "smarf": {unknownCatStub.String()}, - "fnords": {unknownCatStub.String()}, - }, - }, - { - name: "disjoint types", - input: []scope{ - scope(stubScope("")), - { - rootCatStub.String(): filterize(scopeConfig{}, AnyTgt), - scopeKeyDataType: filterize(scopeConfig{}, "other"), - }, - }, - expect: map[string][]string{ - "All": {rootCatStub.String(), "other"}, - }, - }, - } - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - rtm := toResourceTypeMap[mockScope](test.input) - assert.Equal(t, test.expect, rtm) - }) - } -} - func (suite *SelectorSuite) TestResourceOwnersIn() { rootCat := rootCatStub.String() diff --git a/src/pkg/selectors/sharepoint.go b/src/pkg/selectors/sharepoint.go index 5701b005e..af4901ac6 100644 --- a/src/pkg/selectors/sharepoint.go +++ b/src/pkg/selectors/sharepoint.go @@ -35,7 +35,6 @@ type ( var ( _ Reducer = &SharePointRestore{} - _ printabler = &SharePointRestore{} _ pathCategorier = &SharePointRestore{} ) @@ -107,11 +106,6 @@ func (s SharePointRestore) SplitByResourceOwner(users []string) []SharePointRest return ss } -// Printable creates the minimized display of a selector, formatted for human readability. -func (s sharePoint) Printable() Printable { - return toPrintable[SharePointScope](s.Selector) -} - // PathCategories produces the aggregation of discrete users described by each type of scope. func (s sharePoint) PathCategories() selectorPathCategories { return selectorPathCategories{ diff --git a/website/docs/quickstart.md b/website/docs/quickstart.md index 89f3e69a1..d51af87b0 100644 --- a/website/docs/quickstart.md +++ b/website/docs/quickstart.md @@ -156,7 +156,7 @@ Your first backup may take some time if your mailbox is large. There will be progress indicators as the backup and, on completion, you should see output similar to: ```text - Started At ID Status Selectors + Started At ID Status Resource Owner 2022-10-20T18:28:53Z d8cd833a-fc63-4872-8981-de5c08e0661b Completed (0 errors) alice@contoso.com ``` @@ -195,7 +195,7 @@ docker run --env-file $HOME/.corso/corso.env \\ ```text - Started At ID Status Selectors + Started At ID Status Resource Owner 2022-10-20T18:28:53Z d8cd833a-fc63-4872-8981-de5c08e0661b Completed (0 errors) alice@contoso.com 2022-10-20T18:40:45Z 391ceeb3-b44d-4365-9a8e-8a8e1315b565 Completed (0 errors) alice@contoso.com ... From 43c2017492936b05af4f5bba50bce01897925510 Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Mon, 9 Jan 2023 17:10:19 -0800 Subject: [PATCH 11/22] Don't use OwnersCats struct (#2055) ## Description Remove OwnersCats so only the Reason struct or tags pass information between BackupOp and kopia Instead of having a separate struct (OwnersCats) to fetch previous snapshots, generate and use reasons. While this results in some repeated data, it cuts down on the number of distinct structs and simplifies some of the code for getting previous manifests. A future PR should create a shared function to create a service/cat tag given a reason. Only pass in a set of tags to BackupCollections. This pushes the onus of generating the tags for later snapshot lookups to BackupOp and creates a somewhat asymmetric interface as Reason is used for the lookup but tags is used for the backup. This will be updated later so that both paths use a common function to convert from Reason->tags. Despite that, it may result in a cleaner interface with kopia (depending on how far we want to push it) where tags become the main mean of communication. ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1916 ## Test Plan - [x] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- src/internal/kopia/snapshot_manager.go | 90 +++++++++---------- src/internal/kopia/snapshot_manager_test.go | 62 +++++++++---- src/internal/kopia/wrapper.go | 11 +-- src/internal/kopia/wrapper_test.go | 87 +++++------------- src/internal/operations/backup.go | 56 +++++++----- .../operations/backup_integration_test.go | 12 +-- src/internal/operations/backup_test.go | 11 +-- src/internal/streamstore/streamstore.go | 2 - 8 files changed, 155 insertions(+), 176 deletions(-) diff --git a/src/internal/kopia/snapshot_manager.go b/src/internal/kopia/snapshot_manager.go index f82d742ed..fa2cb78b1 100644 --- a/src/internal/kopia/snapshot_manager.go +++ b/src/internal/kopia/snapshot_manager.go @@ -89,6 +89,10 @@ func MakeTagKV(k string) (string, string) { // tagsFromStrings returns a map[string]string with tags for all ownersCats // passed in. Currently uses placeholder values for each tag because there can // be multiple instances of resource owners and categories in a single snapshot. +// TODO(ashmrtn): Remove in future PR. +// +//nolint:unused +//lint:ignore U1000 will be removed in future PR. func tagsFromStrings(oc *OwnersCats) map[string]string { if oc == nil { return map[string]string{} @@ -188,25 +192,18 @@ func fetchPrevManifests( ctx context.Context, sm snapshotManager, foundMans map[manifest.ID]*ManifestEntry, - serviceCat ServiceCat, - resourceOwner string, + reason Reason, tags map[string]string, ) ([]*ManifestEntry, error) { tags = normalizeTagKVs(tags) - serviceCatKey, _ := MakeServiceCat(serviceCat.Service, serviceCat.Category) + serviceCatKey, _ := MakeServiceCat(reason.Service, reason.Category) allTags := normalizeTagKVs(map[string]string{ - serviceCatKey: "", - resourceOwner: "", + serviceCatKey: "", + reason.ResourceOwner: "", }) maps.Copy(allTags, tags) - reason := Reason{ - ResourceOwner: resourceOwner, - Service: serviceCat.Service, - Category: serviceCat.Category, - } - metas, err := sm.FindManifests(ctx, allTags) if err != nil { return nil, errors.Wrap(err, "fetching manifest metas by tag") @@ -275,57 +272,54 @@ func fetchPrevManifests( func fetchPrevSnapshotManifests( ctx context.Context, sm snapshotManager, - oc *OwnersCats, + reasons []Reason, tags map[string]string, ) []*ManifestEntry { - if oc == nil { - return nil - } - mans := map[manifest.ID]*ManifestEntry{} // For each serviceCat/resource owner pair that we will be backing up, see if // there's a previous incomplete snapshot and/or a previous complete snapshot // we can pass in. Can be expanded to return more than the most recent // snapshots, but may require more memory at runtime. - for _, serviceCat := range oc.ServiceCats { - for resourceOwner := range oc.ResourceOwners { - found, err := fetchPrevManifests( - ctx, - sm, - mans, - serviceCat, - resourceOwner, - tags, + for _, reason := range reasons { + found, err := fetchPrevManifests( + ctx, + sm, + mans, + reason, + tags, + ) + if err != nil { + logger.Ctx(ctx).Warnw( + "fetching previous snapshot manifests for service/category/resource owner", + "error", + err, + "service", + reason.Service.String(), + "category", + reason.Category.String(), ) - if err != nil { - logger.Ctx(ctx).Warnw( - "fetching previous snapshot manifests for service/category/resource owner", - "error", - err, - "service/category", - serviceCat, - ) - // Snapshot can still complete fine, just not as efficient. + // Snapshot can still complete fine, just not as efficient. + continue + } + + // If we found more recent snapshots then add them. + for _, m := range found { + man := mans[m.ID] + if man == nil { + mans[m.ID] = m continue } - // If we found more recent snapshots then add them. - for _, m := range found { - found := mans[m.ID] - if found == nil { - mans[m.ID] = m - continue - } - - // If the manifest already exists and it's incomplete then we should - // merge the reasons for consistency. This will become easier to handle - // once we update how checkpoint manifests are tagged. - if len(found.IncompleteReason) > 0 { - found.Reasons = append(found.Reasons, m.Reasons...) - } + // If the manifest already exists and it's incomplete then we should + // merge the reasons for consistency. This will become easier to handle + // once we update how checkpoint manifests are tagged. + if len(man.IncompleteReason) == 0 { + continue } + + man.Reasons = append(man.Reasons, m.Reasons...) } } diff --git a/src/internal/kopia/snapshot_manager_test.go b/src/internal/kopia/snapshot_manager_test.go index 9cf2246b3..324f471ad 100644 --- a/src/internal/kopia/snapshot_manager_test.go +++ b/src/internal/kopia/snapshot_manager_test.go @@ -43,25 +43,53 @@ var ( testUser2 = "user2" testUser3 = "user3" - testAllUsersAllCats = &OwnersCats{ - ResourceOwners: map[string]struct{}{ - testUser1: {}, - testUser2: {}, - testUser3: {}, + testAllUsersAllCats = []Reason{ + { + ResourceOwner: testUser1, + Service: path.ExchangeService, + Category: path.EmailCategory, }, - ServiceCats: map[string]ServiceCat{ - testMail: testMailServiceCat, - testEvents: testEventsServiceCat, + { + ResourceOwner: testUser1, + Service: path.ExchangeService, + Category: path.EventsCategory, + }, + { + ResourceOwner: testUser2, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + { + ResourceOwner: testUser2, + Service: path.ExchangeService, + Category: path.EventsCategory, + }, + { + ResourceOwner: testUser3, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + { + ResourceOwner: testUser3, + Service: path.ExchangeService, + Category: path.EventsCategory, }, } - testAllUsersMail = &OwnersCats{ - ResourceOwners: map[string]struct{}{ - testUser1: {}, - testUser2: {}, - testUser3: {}, + testAllUsersMail = []Reason{ + { + ResourceOwner: testUser1, + Service: path.ExchangeService, + Category: path.EmailCategory, }, - ServiceCats: map[string]ServiceCat{ - testMail: testMailServiceCat, + { + ResourceOwner: testUser2, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + { + ResourceOwner: testUser3, + Service: path.ExchangeService, + Category: path.EmailCategory, }, } ) @@ -176,7 +204,7 @@ func TestSnapshotFetchUnitSuite(t *testing.T) { func (suite *SnapshotFetchUnitSuite) TestFetchPrevSnapshots() { table := []struct { name string - input *OwnersCats + input []Reason data []manifestInfo // Use this to denote which manifests in data should be expected. Allows // defining data in a table while not repeating things between data and @@ -813,7 +841,7 @@ func (suite *SnapshotFetchUnitSuite) TestFetchPrevSnapshots_customTags() { table := []struct { name string - input *OwnersCats + input []Reason tags map[string]string // Use this to denote which manifests in data should be expected. Allows // defining data in a table while not repeating things between data and diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index 859892a1c..ab0331ff8 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -119,8 +119,6 @@ func (w Wrapper) BackupCollections( ctx context.Context, previousSnapshots []IncrementalBase, collections []data.Collection, - service path.ServiceType, - oc *OwnersCats, tags map[string]string, buildTreeWithBase bool, ) (*BackupStats, *details.Builder, map[string]path.Path, error) { @@ -158,7 +156,6 @@ func (w Wrapper) BackupCollections( ctx, previousSnapshots, dirTree, - oc, tags, progress, ) @@ -173,7 +170,6 @@ func (w Wrapper) makeSnapshotWithRoot( ctx context.Context, prevSnapEntries []IncrementalBase, root fs.Directory, - oc *OwnersCats, addlTags map[string]string, progress *corsoProgress, ) (*BackupStats, error) { @@ -231,7 +227,8 @@ func (w Wrapper) makeSnapshotWithRoot( return err } - man.Tags = tagsFromStrings(oc) + man.Tags = map[string]string{} + for k, v := range addlTags { mk, mv := MakeTagKV(k) @@ -442,12 +439,12 @@ func (w Wrapper) DeleteSnapshot( // normalized inside the func using MakeTagKV. func (w Wrapper) FetchPrevSnapshotManifests( ctx context.Context, - oc *OwnersCats, + reasons []Reason, tags map[string]string, ) ([]*ManifestEntry, error) { if w.c == nil { return nil, errors.WithStack(errNotConnected) } - return fetchPrevSnapshotManifests(ctx, w.c, oc, tags), nil + return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil } diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index e650723b6..947b4439e 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -207,39 +207,21 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() { ), } - k, v := MakeServiceCat(path.ExchangeService, path.EmailCategory) - oc := &OwnersCats{ - ResourceOwners: map[string]struct{}{ - testUser: {}, - }, - ServiceCats: map[string]ServiceCat{ - k: v, - }, - } - - // tags that are expected to populate as a side effect - // of the backup process. - baseTagKeys := []string{ - serviceCatTag(suite.testPath1), - suite.testPath1.ResourceOwner(), - serviceCatTag(suite.testPath2), - suite.testPath2.ResourceOwner(), - } - - // tags that are supplied by the caller. - customTags := map[string]string{ + // tags that are supplied by the caller. This includes basic tags to support + // lookups and extra tags the caller may want to apply. + tags := map[string]string{ "fnords": "smarf", "brunhilda": "", + + suite.testPath1.ResourceOwner(): "", + suite.testPath2.ResourceOwner(): "", + serviceCatTag(suite.testPath1): "", + serviceCatTag(suite.testPath2): "", } expectedTags := map[string]string{} - for _, k := range baseTagKeys { - tk, tv := MakeTagKV(k) - expectedTags[tk] = tv - } - - maps.Copy(expectedTags, normalizeTagKVs(customTags)) + maps.Copy(expectedTags, normalizeTagKVs(tags)) table := []struct { name string @@ -266,9 +248,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() { suite.ctx, prevSnaps, collections, - path.ExchangeService, - oc, - customTags, + tags, true, ) assert.NoError(t, err) @@ -325,14 +305,9 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { w := &Wrapper{k} - mapK, mapV := MakeServiceCat(path.ExchangeService, path.EmailCategory) - oc := &OwnersCats{ - ResourceOwners: map[string]struct{}{ - testUser: {}, - }, - ServiceCats: map[string]ServiceCat{ - mapK: mapV, - }, + tags := map[string]string{ + testUser: "", + serviceCatString(path.ExchangeService, path.EmailCategory): "", } dc1 := mockconnector.NewMockExchangeCollection(suite.testPath1, 1) @@ -348,9 +323,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { ctx, nil, []data.Collection{dc1, dc2}, - path.ExchangeService, - oc, - nil, + tags, true, ) require.NoError(t, err) @@ -380,14 +353,9 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { t := suite.T() - k, v := MakeServiceCat(path.ExchangeService, path.EmailCategory) - oc := &OwnersCats{ - ResourceOwners: map[string]struct{}{ - testUser: {}, - }, - ServiceCats: map[string]ServiceCat{ - k: v, - }, + tags := map[string]string{ + testUser: "", + serviceCatString(path.ExchangeService, path.EmailCategory): "", } collections := []data.Collection{ @@ -431,9 +399,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { suite.ctx, nil, collections, - path.ExchangeService, - oc, - nil, + tags, true, ) require.NoError(t, err) @@ -477,8 +443,6 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections() ctx, nil, test.collections, - path.UnknownService, - &OwnersCats{}, nil, true, ) @@ -622,23 +586,16 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { collections = append(collections, collection) } - k, v := MakeServiceCat(path.ExchangeService, path.EmailCategory) - oc := &OwnersCats{ - ResourceOwners: map[string]struct{}{ - testUser: {}, - }, - ServiceCats: map[string]ServiceCat{ - k: v, - }, + tags := map[string]string{ + testUser: "", + serviceCatString(path.ExchangeService, path.EmailCategory): "", } stats, deets, _, err := suite.w.BackupCollections( suite.ctx, nil, collections, - path.ExchangeService, - oc, - nil, + tags, false, ) require.NoError(t, err) diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 718b964bc..1c6f790e3 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -115,7 +115,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { tenantID = op.account.ID() startTime = time.Now() detailsStore = streamstore.New(op.kopia, tenantID, op.Selectors.PathService()) - oc = selectorToOwnersCats(op.Selectors) + reasons = selectorToReasons(op.Selectors) uib = useIncrementalBackup(op.Selectors, op.Options) ) @@ -151,7 +151,14 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { } }() - mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata(ctx, op.kopia, op.store, oc, tenantID, uib) + mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata( + ctx, + op.kopia, + op.store, + reasons, + tenantID, + uib, + ) if err != nil { opStats.readErr = errors.Wrap(err, "connecting to M365") return opStats.readErr @@ -173,8 +180,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { ctx, op.kopia, tenantID, - op.Selectors, - oc, + reasons, mans, cs, op.Results.BackupID, @@ -246,8 +252,6 @@ type backuper interface { ctx context.Context, bases []kopia.IncrementalBase, cs []data.Collection, - service path.ServiceType, - oc *kopia.OwnersCats, tags map[string]string, buildTreeWithBase bool, ) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) @@ -298,7 +302,7 @@ func produceManifestsAndMetadata( ctx context.Context, kw *kopia.Wrapper, sw *store.Wrapper, - oc *kopia.OwnersCats, + reasons []kopia.Reason, tenantID string, getMetadata bool, ) ([]*kopia.ManifestEntry, []data.Collection, bool, error) { @@ -309,7 +313,7 @@ func produceManifestsAndMetadata( ms, err := kw.FetchPrevSnapshotManifests( ctx, - oc, + reasons, map[string]string{kopia.TagBackupCategory: ""}) if err != nil { return nil, nil, false, err @@ -427,28 +431,28 @@ func collectMetadata( return dcs, nil } -func selectorToOwnersCats(sel selectors.Selector) *kopia.OwnersCats { +func selectorToReasons(sel selectors.Selector) []kopia.Reason { service := sel.PathService() - oc := &kopia.OwnersCats{ - ResourceOwners: map[string]struct{}{}, - ServiceCats: map[string]kopia.ServiceCat{}, - } - - oc.ResourceOwners[sel.DiscreteOwner] = struct{}{} + reasons := []kopia.Reason{} pcs, err := sel.PathCategories() if err != nil { - return &kopia.OwnersCats{} + // This is technically safe, it's just that the resulting backup won't be + // usable as a base for future incremental backups. + return nil } for _, sl := range [][]path.CategoryType{pcs.Includes, pcs.Filters} { for _, cat := range sl { - k, v := kopia.MakeServiceCat(service, cat) - oc.ServiceCats[k] = v + reasons = append(reasons, kopia.Reason{ + ResourceOwner: sel.DiscreteOwner, + Service: service, + Category: cat, + }) } } - return oc + return reasons } func builderFromReason(tenant string, r kopia.Reason) (*path.Builder, error) { @@ -479,8 +483,7 @@ func consumeBackupDataCollections( ctx context.Context, bu backuper, tenantID string, - sel selectors.Selector, - oc *kopia.OwnersCats, + reasons []kopia.Reason, mans []*kopia.ManifestEntry, cs []data.Collection, backupID model.StableID, @@ -498,6 +501,15 @@ func consumeBackupDataCollections( kopia.TagBackupCategory: "", } + for _, reason := range reasons { + tags[reason.ResourceOwner] = "" + + // TODO(ashmrtn): Create a separate helper function to go from service/cat + // to a tag. + serviceCat, _ := kopia.MakeServiceCat(reason.Service, reason.Category) + tags[serviceCat] = "" + } + bases := make([]kopia.IncrementalBase, 0, len(mans)) for _, m := range mans { @@ -518,7 +530,7 @@ func consumeBackupDataCollections( }) } - return bu.BackupCollections(ctx, bases, cs, sel.PathService(), oc, tags, isIncremental) + return bu.BackupCollections(ctx, bases, cs, tags, isIncremental) } func matchesReason(reasons []kopia.Reason, p path.Path) bool { diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index 1b9e50c9d..6df6da829 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -176,16 +176,18 @@ func checkBackupIsInManifests( for _, category := range categories { t.Run(category.String(), func(t *testing.T) { var ( - sck, scv = kopia.MakeServiceCat(sel.PathService(), category) - oc = &kopia.OwnersCats{ - ResourceOwners: map[string]struct{}{resourceOwner: {}}, - ServiceCats: map[string]kopia.ServiceCat{sck: scv}, + reasons = []kopia.Reason{ + { + ResourceOwner: resourceOwner, + Service: sel.PathService(), + Category: category, + }, } tags = map[string]string{kopia.TagBackupCategory: ""} found bool ) - mans, err := kw.FetchPrevSnapshotManifests(ctx, oc, tags) + mans, err := kw.FetchPrevSnapshotManifests(ctx, reasons, tags) require.NoError(t, err) for _, man := range mans { diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index 382c55515..e858a319e 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -61,8 +61,6 @@ type mockBackuper struct { checkFunc func( bases []kopia.IncrementalBase, cs []data.Collection, - service path.ServiceType, - oc *kopia.OwnersCats, tags map[string]string, buildTreeWithBase bool, ) @@ -72,13 +70,11 @@ func (mbu mockBackuper) BackupCollections( ctx context.Context, bases []kopia.IncrementalBase, cs []data.Collection, - service path.ServiceType, - oc *kopia.OwnersCats, tags map[string]string, buildTreeWithBase bool, ) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) { if mbu.checkFunc != nil { - mbu.checkFunc(bases, cs, service, oc, tags, buildTreeWithBase) + mbu.checkFunc(bases, cs, tags, buildTreeWithBase) } return &kopia.BackupStats{}, &details.Builder{}, nil, nil @@ -673,8 +669,6 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat manifest2 = &snapshot.Manifest{ ID: "id2", } - - sel = selectors.NewExchangeBackup([]string{resourceOwner}).Selector ) table := []struct { @@ -768,8 +762,6 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat checkFunc: func( bases []kopia.IncrementalBase, cs []data.Collection, - service path.ServiceType, - oc *kopia.OwnersCats, tags map[string]string, buildTreeWithBase bool, ) { @@ -782,7 +774,6 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat ctx, mbu, tenant, - sel, nil, test.inputMan, nil, diff --git a/src/internal/streamstore/streamstore.go b/src/internal/streamstore/streamstore.go index 285bff78f..92123194e 100644 --- a/src/internal/streamstore/streamstore.go +++ b/src/internal/streamstore/streamstore.go @@ -77,8 +77,6 @@ func (ss *streamStore) WriteBackupDetails( ctx, nil, []data.Collection{dc}, - ss.service, - nil, nil, false, ) From 8a613077add53acfc1c20f4427909ea971b21db6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 05:13:37 +0000 Subject: [PATCH 12/22] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20docusaurus-pl?= =?UTF-8?q?ugin-sass=20from=200.2.2=20to=200.2.3=20in=20/website=20(#2078)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- website/package-lock.json | 15 +++++++-------- website/package.json | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index dbf55711f..f83bff32c 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -16,7 +16,7 @@ "animate.css": "^4.1.1", "clsx": "^1.2.1", "docusaurus-plugin-image-zoom": "^0.1.1", - "docusaurus-plugin-sass": "^0.2.2", + "docusaurus-plugin-sass": "^0.2.3", "feather-icons": "^4.29.0", "jarallax": "^2.1.3", "mdx-mermaid": "^1.3.2", @@ -6451,10 +6451,9 @@ } }, "node_modules/docusaurus-plugin-sass": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.2.tgz", - "integrity": "sha512-ZZBpj3PrhGpYE2kAnkZB9NRwy/CDi4rGun1oec6PYR8YvGzqxYGtXvLgHi6FFbu8/N483klk8udqyYMh6Ted+A==", - "license": "MIT", + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.3.tgz", + "integrity": "sha512-FbaE06K8NF8SPUYTwiG+83/jkXrwHJ/Afjqz3SUIGon6QvFwSSoKOcoxGQmUBnjTOk+deUONDx8jNWsegFJcBQ==", "dependencies": { "sass-loader": "^10.1.1" }, @@ -18997,9 +18996,9 @@ } }, "docusaurus-plugin-sass": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.2.tgz", - "integrity": "sha512-ZZBpj3PrhGpYE2kAnkZB9NRwy/CDi4rGun1oec6PYR8YvGzqxYGtXvLgHi6FFbu8/N483klk8udqyYMh6Ted+A==", + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.3.tgz", + "integrity": "sha512-FbaE06K8NF8SPUYTwiG+83/jkXrwHJ/Afjqz3SUIGon6QvFwSSoKOcoxGQmUBnjTOk+deUONDx8jNWsegFJcBQ==", "requires": { "sass-loader": "^10.1.1" } diff --git a/website/package.json b/website/package.json index 2c2262ae1..431c60abc 100644 --- a/website/package.json +++ b/website/package.json @@ -22,7 +22,7 @@ "animate.css": "^4.1.1", "clsx": "^1.2.1", "docusaurus-plugin-image-zoom": "^0.1.1", - "docusaurus-plugin-sass": "^0.2.2", + "docusaurus-plugin-sass": "^0.2.3", "feather-icons": "^4.29.0", "jarallax": "^2.1.3", "mdx-mermaid": "^1.3.2", From 527f1247f86cc3c487b537a9cf62ba54e8d5d655 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 05:18:21 +0000 Subject: [PATCH 13/22] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20postcss=20fro?= =?UTF-8?q?m=208.4.20=20to=208.4.21=20in=20/website=20(#2077)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- website/package-lock.json | 14 +++++++------- website/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index f83bff32c..ef0577fb2 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -32,7 +32,7 @@ "@docusaurus/module-type-aliases": "2.2.0", "@iconify/react": "^4.0.1", "autoprefixer": "^10.4.13", - "postcss": "^8.4.20", + "postcss": "^8.4.21", "tailwindcss": "^3.2.4" } }, @@ -10076,9 +10076,9 @@ } }, "node_modules/postcss": { - "version": "8.4.20", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.20.tgz", - "integrity": "sha512-6Q04AXR1212bXr5fh03u8aAwbLxAQNGQ/Q1LNa0VfOI06ZAlhPHtQvE4OIdpj4kLThXilalPnmDSOD65DcHt+g==", + "version": "8.4.21", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", + "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", "funding": [ { "type": "opencollective", @@ -21383,9 +21383,9 @@ "integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ==" }, "postcss": { - "version": "8.4.20", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.20.tgz", - "integrity": "sha512-6Q04AXR1212bXr5fh03u8aAwbLxAQNGQ/Q1LNa0VfOI06ZAlhPHtQvE4OIdpj4kLThXilalPnmDSOD65DcHt+g==", + "version": "8.4.21", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", + "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", "requires": { "nanoid": "^3.3.4", "picocolors": "^1.0.0", diff --git a/website/package.json b/website/package.json index 431c60abc..6c27fe784 100644 --- a/website/package.json +++ b/website/package.json @@ -38,7 +38,7 @@ "@docusaurus/module-type-aliases": "2.2.0", "@iconify/react": "^4.0.1", "autoprefixer": "^10.4.13", - "postcss": "^8.4.20", + "postcss": "^8.4.21", "tailwindcss": "^3.2.4" }, "browserslist": { From b03bcff4770e7bf035dc3da768fb135bce80406c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 06:08:24 +0000 Subject: [PATCH 14/22] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/aw?= =?UTF-8?q?s/aws-sdk-go=20from=201.44.175=20to=201.44.176=20in=20/src=20(#?= =?UTF-8?q?2079)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.175 to 1.44.176.
Release notes

Sourced from github.com/aws/aws-sdk-go's releases.

Release v1.44.176 (2023-01-09)

Service Client Updates

  • service/ecr-public: Updates service API and documentation
  • service/kendra-ranking: Adds new service
  • service/network-firewall: Updates service API and documentation
  • service/ram: Adds new service
  • service/workspaces-web: Updates service API and documentation
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/aws/aws-sdk-go&package-manager=go_modules&previous-version=1.44.175&new-version=1.44.176)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index dbcf2456c..e553e34c9 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 - github.com/aws/aws-sdk-go v1.44.175 + github.com/aws/aws-sdk-go v1.44.176 github.com/aws/aws-xray-sdk-go v1.8.0 github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 diff --git a/src/go.sum b/src/go.sum index 88697a8c4..35341bc8d 100644 --- a/src/go.sum +++ b/src/go.sum @@ -58,8 +58,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/aws/aws-sdk-go v1.44.175 h1:c0NzHHnPXV5kJoTUFQxFN5cUPpX1SxO635XnwL5/oIY= -github.com/aws/aws-sdk-go v1.44.175/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.176 h1:mxcfI3IWHemX+5fEKt5uqIS/hdbaR7qzGfJYo5UyjJE= +github.com/aws/aws-sdk-go v1.44.176/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY= github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= From 7c402edca4cf47a61004a8b51bdede47fa238b42 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 06:41:27 +0000 Subject: [PATCH 15/22] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20Bump=20github.com/mi?= =?UTF-8?q?crosoft/kiota-abstractions-go=20from=200.15.1=20to=200.15.2=20i?= =?UTF-8?q?n=20/src=20(#2080)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/microsoft/kiota-abstractions-go](https://github.com/microsoft/kiota-abstractions-go) from 0.15.1 to 0.15.2.
Release notes

Sourced from github.com/microsoft/kiota-abstractions-go's releases.

v0.15.2

Changed

  • Fix bug where empty string query parameters are added to the request.
Changelog

Sourced from github.com/microsoft/kiota-abstractions-go's changelog.

[0.15.2] - 2023-01-09

Changed

  • Fix bug where empty string query parameters are added to the request.
Commits
  • 6e5567e Merge pull request #52 from microsoft/fixEmptyQueryParams
  • bf44843 - Fix bug where empty string query parameters are added to the request.
  • 625813e Merge pull request #51 from microsoft/dependabot/go_modules/github.com/cjlapa...
  • 01b135c Bump github.com/cjlapao/common-go from 0.0.36 to 0.0.37
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/microsoft/kiota-abstractions-go&package-manager=go_modules&previous-version=0.15.1&new-version=0.15.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- src/go.mod | 2 +- src/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/go.mod b/src/go.mod index e553e34c9..ef900e954 100644 --- a/src/go.mod +++ b/src/go.mod @@ -9,7 +9,7 @@ require ( github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 github.com/kopia/kopia v0.12.0 - github.com/microsoft/kiota-abstractions-go v0.15.1 + github.com/microsoft/kiota-abstractions-go v0.15.2 github.com/microsoft/kiota-authentication-azure-go v0.5.0 github.com/microsoft/kiota-http-go v0.11.0 github.com/microsoft/kiota-serialization-json-go v0.7.2 diff --git a/src/go.sum b/src/go.sum index 35341bc8d..5156b1c7e 100644 --- a/src/go.sum +++ b/src/go.sum @@ -257,8 +257,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09 github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microsoft/kiota-abstractions-go v0.15.1 h1:RgN8h9Z3AoFav1K4ODVSkmA8Es933hTlAWNesll1G5U= -github.com/microsoft/kiota-abstractions-go v0.15.1/go.mod h1:YqOu8G6bZTG0eCIWrmEny8PaF750uaw7tLFac4psf+4= +github.com/microsoft/kiota-abstractions-go v0.15.2 h1:Pp78BbqPvkF2mAMH0Ph37ymwfSH7uF9iYfY1fZ8g630= +github.com/microsoft/kiota-abstractions-go v0.15.2/go.mod h1:RT/s9sCzg49i4iO7e2qhyWmX+DlJDgC0P+Wp8fKQQfo= github.com/microsoft/kiota-authentication-azure-go v0.5.0 h1:RVA/tTgMnDIN3u4qPZtvYvVRsQDOFkd3yvi6KXjZJko= github.com/microsoft/kiota-authentication-azure-go v0.5.0/go.mod h1:1Io6h+88FlDRmrajdjSnXPz8oyObUVjNuQZLhrF9kQk= github.com/microsoft/kiota-http-go v0.11.0 h1:0K0y/wZcTvEEX2Xdj5tngJqknqYQpArLdtjB/fo88Dc= From 634780a18999d1e974710df195b27672d59a0d2c Mon Sep 17 00:00:00 2001 From: Abin Simon Date: Tue, 10 Jan 2023 21:54:29 +0530 Subject: [PATCH 16/22] Prevent config-file default repeating in help (#2083) ## Description Previously the help output looked like: ``` --config-file string config file location (default is $HOME/.corso.toml) (default "$HOME/.corso.toml") ``` ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * # ## Test Plan - [x] :muscle: Manual - [ ] :zap: Unit test - [ ] :green_heart: E2E --- src/cli/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli/config/config.go b/src/cli/config/config.go index 297b7afc3..dbcd21422 100644 --- a/src/cli/config/config.go +++ b/src/cli/config/config.go @@ -68,7 +68,7 @@ func AddConfigFlags(cmd *cobra.Command) { &configFilePathFlag, "config-file", displayDefaultFP, - "config file location (default is $HOME/.corso.toml)") + "config file location") } // --------------------------------------------------------------------------------------------------------- From daaa2588670f5219306f7d3a3bc788fb5622fbc8 Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 10 Jan 2023 11:31:49 -0700 Subject: [PATCH 17/22] details and restore exit on missing backup (#2066) ## Description Ensures that the details and restore commands exit without attempting further processing if a backupID doesn't point to either a valid backup or backup details, and that the user is appropriately notified. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1877 ## Test Plan - [x] :green_heart: E2E --- src/cli/backup/exchange.go | 5 ----- src/cli/backup/onedrive.go | 5 ----- src/cli/backup/sharepoint.go | 5 ----- src/cli/restore/exchange.go | 16 ++++++++-------- src/cli/restore/onedrive.go | 16 ++++++++-------- src/cli/restore/sharepoint.go | 16 ++++++++-------- src/cli/utils/exchange.go | 1 - src/cli/utils/onedrive.go | 1 - 8 files changed, 24 insertions(+), 41 deletions(-) diff --git a/src/cli/backup/exchange.go b/src/cli/backup/exchange.go index 1501ddcb8..63a1f6393 100644 --- a/src/cli/backup/exchange.go +++ b/src/cli/backup/exchange.go @@ -508,11 +508,6 @@ func runDetailsExchangeCmd( sel := utils.IncludeExchangeRestoreDataSelectors(opts) utils.FilterExchangeRestoreInfoSelectors(sel, opts) - // if no selector flags were specified, get all data in the service. - if len(sel.Scopes()) == 0 { - sel.Include(sel.AllData()) - } - return sel.Reduce(ctx, d), nil } diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 29c18d7e7..3454a9b3b 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -399,11 +399,6 @@ func runDetailsOneDriveCmd( sel := utils.IncludeOneDriveRestoreDataSelectors(opts) utils.FilterOneDriveRestoreInfoSelectors(sel, opts) - // if no selector flags were specified, get all data in the service. - if len(sel.Scopes()) == 0 { - sel.Include(sel.AllData()) - } - return sel.Reduce(ctx, d), nil } diff --git a/src/cli/backup/sharepoint.go b/src/cli/backup/sharepoint.go index cd0c96029..d5624380f 100644 --- a/src/cli/backup/sharepoint.go +++ b/src/cli/backup/sharepoint.go @@ -483,10 +483,5 @@ func runDetailsSharePointCmd( sel := utils.IncludeSharePointRestoreDataSelectors(opts) utils.FilterSharePointRestoreInfoSelectors(sel, opts) - // if no selector flags were specified, get all data in the service. - if len(sel.Scopes()) == 0 { - sel.Include(sel.AllData()) - } - return sel.Reduce(ctx, d), nil } diff --git a/src/cli/restore/exchange.go b/src/cli/restore/exchange.go index 1f781a970..6d67fa03e 100644 --- a/src/cli/restore/exchange.go +++ b/src/cli/restore/exchange.go @@ -10,6 +10,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/repository" ) @@ -215,23 +216,22 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error { defer utils.CloseRepo(ctx, r) + dest := control.DefaultRestoreDestination(common.SimpleDateTime) + sel := utils.IncludeExchangeRestoreDataSelectors(opts) utils.FilterExchangeRestoreInfoSelectors(sel, opts) - // if no selector flags were specified, get all data in the service. - if len(sel.Scopes()) == 0 { - sel.Include(sel.AllData()) - } - - restoreDest := control.DefaultRestoreDestination(common.SimpleDateTime) - - ro, err := r.NewRestore(ctx, backupID, sel.Selector, restoreDest) + ro, err := r.NewRestore(ctx, backupID, sel.Selector, dest) if err != nil { return Only(ctx, errors.Wrap(err, "Failed to initialize Exchange restore")) } ds, err := ro.Run(ctx) if err != nil { + if errors.Is(err, kopia.ErrNotFound) { + return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID)) + } + return Only(ctx, errors.Wrap(err, "Failed to run Exchange restore")) } diff --git a/src/cli/restore/onedrive.go b/src/cli/restore/onedrive.go index a4ed087c8..0932461a3 100644 --- a/src/cli/restore/onedrive.go +++ b/src/cli/restore/onedrive.go @@ -10,6 +10,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/repository" ) @@ -152,23 +153,22 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error { defer utils.CloseRepo(ctx, r) + dest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive) + sel := utils.IncludeOneDriveRestoreDataSelectors(opts) utils.FilterOneDriveRestoreInfoSelectors(sel, opts) - // if no selector flags were specified, get all data in the service. - if len(sel.Scopes()) == 0 { - sel.Include(sel.AllData()) - } - - restoreDest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive) - - ro, err := r.NewRestore(ctx, backupID, sel.Selector, restoreDest) + ro, err := r.NewRestore(ctx, backupID, sel.Selector, dest) if err != nil { return Only(ctx, errors.Wrap(err, "Failed to initialize OneDrive restore")) } ds, err := ro.Run(ctx) if err != nil { + if errors.Is(err, kopia.ErrNotFound) { + return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID)) + } + return Only(ctx, errors.Wrap(err, "Failed to run OneDrive restore")) } diff --git a/src/cli/restore/sharepoint.go b/src/cli/restore/sharepoint.go index f93c712b3..1c3a81e89 100644 --- a/src/cli/restore/sharepoint.go +++ b/src/cli/restore/sharepoint.go @@ -10,6 +10,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/common" + "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/repository" ) @@ -153,23 +154,22 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error { defer utils.CloseRepo(ctx, r) + dest := control.DefaultRestoreDestination(common.SimpleDateTime) + sel := utils.IncludeSharePointRestoreDataSelectors(opts) utils.FilterSharePointRestoreInfoSelectors(sel, opts) - // if no selector flags were specified, get all data in the service. - if len(sel.Scopes()) == 0 { - sel.Include(sel.AllData()) - } - - restoreDest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive) - - ro, err := r.NewRestore(ctx, backupID, sel.Selector, restoreDest) + ro, err := r.NewRestore(ctx, backupID, sel.Selector, dest) if err != nil { return Only(ctx, errors.Wrap(err, "Failed to initialize SharePoint restore")) } ds, err := ro.Run(ctx) if err != nil { + if errors.Is(err, kopia.ErrNotFound) { + return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID)) + } + return Only(ctx, errors.Wrap(err, "Failed to run SharePoint restore")) } diff --git a/src/cli/utils/exchange.go b/src/cli/utils/exchange.go index 42894d6e5..bbc360dfb 100644 --- a/src/cli/utils/exchange.go +++ b/src/cli/utils/exchange.go @@ -138,7 +138,6 @@ func IncludeExchangeRestoreDataSelectors(opts ExchangeOpts) *selectors.ExchangeR // either scope the request to a set of users if lc+lcf+le+lef+lev+lec == 0 { sel.Include(sel.AllData()) - return sel } diff --git a/src/cli/utils/onedrive.go b/src/cli/utils/onedrive.go index 7d4ed47df..4951a7b63 100644 --- a/src/cli/utils/onedrive.go +++ b/src/cli/utils/onedrive.go @@ -84,7 +84,6 @@ func IncludeOneDriveRestoreDataSelectors(opts OneDriveOpts) *selectors.OneDriveR // is specified if lp+ln == 0 { sel.Include(sel.AllData()) - return sel } From 85b5877987221c09df196226a7536965716f2a3a Mon Sep 17 00:00:00 2001 From: Danny Date: Tue, 10 Jan 2023 13:56:38 -0500 Subject: [PATCH 18/22] Backup Details: Onedrive details expanded (#2085) ## Description Updates the amount of metadata available for OneDrive backups. This does not affect the way `OneDrive` information is displayed to the user. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :sunflower: Feature ## Issue(s) *closes #2074 ## Test Plan - [x] :zap: Unit test --- src/internal/connector/onedrive/item.go | 22 +++++++++++++++------- src/pkg/backup/details/details.go | 1 + 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/internal/connector/onedrive/item.go b/src/internal/connector/onedrive/item.go index 186e7a6a3..7f377d2cd 100644 --- a/src/internal/connector/onedrive/item.go +++ b/src/internal/connector/onedrive/item.go @@ -94,7 +94,7 @@ func driveItemReader( // doesn't have its size value updated as a side effect of creation, // and kiota drops any SetSize update. func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDriveInfo { - email := "" + var email, parent string if di.GetCreatedBy().GetUser() != nil { // User is sometimes not available when created via some @@ -105,13 +105,21 @@ func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDrive } } + if di.GetParentReference() != nil { + if di.GetParentReference().GetName() != nil { + // EndPoint is not always populated from external apps + parent = *di.GetParentReference().GetName() + } + } + return &details.OneDriveInfo{ - ItemType: details.OneDriveItem, - ItemName: *di.GetName(), - Created: *di.GetCreatedDateTime(), - Modified: *di.GetLastModifiedDateTime(), - Size: itemSize, - Owner: email, + ItemType: details.OneDriveItem, + ItemName: *di.GetName(), + Created: *di.GetCreatedDateTime(), + Modified: *di.GetLastModifiedDateTime(), + DriveName: parent, + Size: itemSize, + Owner: email, } } diff --git a/src/pkg/backup/details/details.go b/src/pkg/backup/details/details.go index 27075ec72..798c12ce2 100644 --- a/src/pkg/backup/details/details.go +++ b/src/pkg/backup/details/details.go @@ -520,6 +520,7 @@ func (i *SharePointInfo) UpdateParentPath(newPath path.Path) error { type OneDriveInfo struct { Created time.Time `json:"created,omitempty"` ItemName string `json:"itemName"` + DriveName string `json:"driveName"` ItemType ItemType `json:"itemType,omitempty"` Modified time.Time `json:"modified,omitempty"` Owner string `json:"owner,omitempty"` From 778a60774a854a91d40d7f24b8fa2a272a183490 Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Tue, 10 Jan 2023 11:45:02 -0800 Subject: [PATCH 19/22] Fix unit tests that fail when not running with env vars (#2089) ## Description Refactoring and new code additions have led to some unit tests that fail unless testing is run with the proper env vars. Fixup code to either skip those tests or provide them with the proper information so they pass * some tests moved from unit tests to integration tests * function to get credentials with bogus info for unit tests * skip some tests if env vars not passed ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * closes #2052 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [ ] :green_heart: E2E --- .../exchange/service_iterators_test.go | 4 ++-- src/internal/connector/graph/service_test.go | 2 +- .../graph_connector_disconnected_test.go | 24 ------------------- .../connector/graph_connector_test.go | 23 ++++++++++++++++++ .../connector/sharepoint/collection_test.go | 6 +++++ src/internal/tester/account.go | 16 +++++++++++++ 6 files changed, 48 insertions(+), 27 deletions(-) diff --git a/src/internal/connector/exchange/service_iterators_test.go b/src/internal/connector/exchange/service_iterators_test.go index 75a03c3d7..d7cbee9e0 100644 --- a/src/internal/connector/exchange/service_iterators_test.go +++ b/src/internal/connector/exchange/service_iterators_test.go @@ -95,7 +95,7 @@ func TestServiceIteratorsSuite(t *testing.T) { } func (suite *ServiceIteratorsSuite) SetupSuite() { - a := tester.NewM365Account(suite.T()) + a := tester.NewMockM365Account(suite.T()) m365, err := a.M365Config() require.NoError(suite.T(), err) suite.creds = m365 @@ -343,7 +343,7 @@ func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() { func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incrementals() { var ( userID = "user_id" - tenantID = tester.M365TenantID(suite.T()) + tenantID = suite.creds.AzureTenantID cat = path.EmailCategory // doesn't matter which one we use, qp = graph.QueryParams{ Category: cat, diff --git a/src/internal/connector/graph/service_test.go b/src/internal/connector/graph/service_test.go index 737419c88..ee8c6bc29 100644 --- a/src/internal/connector/graph/service_test.go +++ b/src/internal/connector/graph/service_test.go @@ -24,7 +24,7 @@ func TestGraphUnitSuite(t *testing.T) { func (suite *GraphUnitSuite) SetupSuite() { t := suite.T() - a := tester.NewM365Account(t) + a := tester.NewMockM365Account(t) m365, err := a.M365Config() require.NoError(t, err) diff --git a/src/internal/connector/graph_connector_disconnected_test.go b/src/internal/connector/graph_connector_disconnected_test.go index f6f47d8cf..f7f583ebd 100644 --- a/src/internal/connector/graph_connector_disconnected_test.go +++ b/src/internal/connector/graph_connector_disconnected_test.go @@ -167,30 +167,6 @@ func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_ErrorChecking() } } -func (suite *DisconnectedGraphConnectorSuite) TestRestoreFailsBadService() { - ctx, flush := tester.NewContext() - defer flush() - - var ( - t = suite.T() - acct = tester.NewM365Account(t) - dest = tester.DefaultTestRestoreDestination() - gc = GraphConnector{wg: &sync.WaitGroup{}} - sel = selectors.Selector{ - Service: selectors.ServiceUnknown, - } - ) - - deets, err := gc.RestoreDataCollections(ctx, acct, sel, dest, nil) - assert.Error(t, err) - assert.NotNil(t, deets) - - status := gc.AwaitStatus() - assert.Equal(t, 0, status.ObjectCount) - assert.Equal(t, 0, status.FolderCount) - assert.Equal(t, 0, status.Successful) -} - func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs() { users := []string{ "elliotReid@someHospital.org", diff --git a/src/internal/connector/graph_connector_test.go b/src/internal/connector/graph_connector_test.go index 3b6de3586..d635ee6f9 100644 --- a/src/internal/connector/graph_connector_test.go +++ b/src/internal/connector/graph_connector_test.go @@ -212,6 +212,29 @@ func (suite *GraphConnectorIntegrationSuite) TestSetTenantSites() { } } +func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() { + ctx, flush := tester.NewContext() + defer flush() + + var ( + t = suite.T() + acct = tester.NewM365Account(t) + dest = tester.DefaultTestRestoreDestination() + sel = selectors.Selector{ + Service: selectors.ServiceUnknown, + } + ) + + deets, err := suite.connector.RestoreDataCollections(ctx, acct, sel, dest, nil) + assert.Error(t, err) + assert.NotNil(t, deets) + + status := suite.connector.AwaitStatus() + assert.Equal(t, 0, status.ObjectCount) + assert.Equal(t, 0, status.FolderCount) + assert.Equal(t, 0, status.Successful) +} + func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { dest := tester.DefaultTestRestoreDestination() table := []struct { diff --git a/src/internal/connector/sharepoint/collection_test.go b/src/internal/connector/sharepoint/collection_test.go index f69366f67..f049ab26f 100644 --- a/src/internal/connector/sharepoint/collection_test.go +++ b/src/internal/connector/sharepoint/collection_test.go @@ -25,6 +25,12 @@ type SharePointCollectionSuite struct { } func TestSharePointCollectionSuite(t *testing.T) { + tester.RunOnAny( + t, + tester.CorsoCITests, + tester.CorsoGraphConnectorTests, + tester.CorsoGraphConnectorSharePointTests) + suite.Run(t, new(SharePointCollectionSuite)) } diff --git a/src/internal/tester/account.go b/src/internal/tester/account.go index 67776bba6..5f82be938 100644 --- a/src/internal/tester/account.go +++ b/src/internal/tester/account.go @@ -31,3 +31,19 @@ func NewM365Account(t *testing.T) account.Account { return acc } + +func NewMockM365Account(t *testing.T) account.Account { + acc, err := account.NewAccount( + account.ProviderM365, + account.M365Config{ + M365: credentials.M365{ + AzureClientID: "12345", + AzureClientSecret: "abcde", + }, + AzureTenantID: "09876", + }, + ) + require.NoError(t, err, "initializing mock account") + + return acc +} From e88553d073d9073ad211078c1c9901f9982f068a Mon Sep 17 00:00:00 2001 From: ashmrtn Date: Tue, 10 Jan 2023 12:32:17 -0800 Subject: [PATCH 20/22] Refactor the Reason struct and how tags for lookups are generated (#2087) ## Description Solidify the interface between BackupOp and KopiaWrapper by making Reason the de facto way to pass/generate tags for things. The Reason struct now includes a function to generate tags for that instance. KopiaWrapper also now hides the fact that it prefixes tags from other components ## Does this PR need a docs update or release note? - [ ] :white_check_mark: Yes, it's included - [ ] :clock1: Yes, but in a later PR - [x] :no_entry: No ## Type of change - [ ] :sunflower: Feature - [ ] :bug: Bugfix - [ ] :world_map: Documentation - [ ] :robot: Test - [ ] :computer: CI/Deployment - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1916 ## Test Plan - [ ] :muscle: Manual - [x] :zap: Unit test - [x] :green_heart: E2E --- src/internal/kopia/snapshot_manager.go | 38 ++++++++---- src/internal/kopia/snapshot_manager_test.go | 15 ++--- src/internal/kopia/wrapper.go | 2 +- src/internal/kopia/wrapper_test.go | 58 ++++++++++++++----- src/internal/operations/backup.go | 21 +++---- .../operations/backup_integration_test.go | 8 ++- src/internal/operations/backup_test.go | 10 +++- 7 files changed, 101 insertions(+), 51 deletions(-) diff --git a/src/internal/kopia/snapshot_manager.go b/src/internal/kopia/snapshot_manager.go index fa2cb78b1..55cf7b8a6 100644 --- a/src/internal/kopia/snapshot_manager.go +++ b/src/internal/kopia/snapshot_manager.go @@ -32,6 +32,13 @@ type Reason struct { Category path.CategoryType } +func (r Reason) TagKeys() []string { + return []string{ + r.ResourceOwner, + serviceCatString(r.Service, r.Category), + } +} + type ManifestEntry struct { *snapshot.Manifest // Reason contains the ResourceOwners and Service/Categories that caused this @@ -44,6 +51,13 @@ type ManifestEntry struct { Reasons []Reason } +func (me ManifestEntry) GetTag(key string) (string, bool) { + k, _ := makeTagKV(key) + v, ok := me.Tags[k] + + return v, ok +} + type snapshotManager interface { FindManifests( ctx context.Context, @@ -68,6 +82,10 @@ func MakeServiceCat(s path.ServiceType, c path.CategoryType) (string, ServiceCat return serviceCatString(s, c), ServiceCat{s, c} } +// TODO(ashmrtn): Remove in a future PR. +// +//nolint:unused +//lint:ignore U1000 will be removed in future PR. func serviceCatTag(p path.Path) string { return serviceCatString(p.Service(), p.Category()) } @@ -82,7 +100,7 @@ func serviceCatString(s path.ServiceType, c path.CategoryType) string { // Returns the normalized Key plus a default value. If you're embedding a // key-only tag, the returned default value msut be used instead of an // empty string. -func MakeTagKV(k string) (string, string) { +func makeTagKV(k string) (string, string) { return userTagPrefix + k, defaultTagValue } @@ -101,12 +119,12 @@ func tagsFromStrings(oc *OwnersCats) map[string]string { res := make(map[string]string, len(oc.ServiceCats)+len(oc.ResourceOwners)) for k := range oc.ServiceCats { - tk, tv := MakeTagKV(k) + tk, tv := makeTagKV(k) res[tk] = tv } for k := range oc.ResourceOwners { - tk, tv := MakeTagKV(k) + tk, tv := makeTagKV(k) res[tk] = tv } @@ -195,14 +213,14 @@ func fetchPrevManifests( reason Reason, tags map[string]string, ) ([]*ManifestEntry, error) { - tags = normalizeTagKVs(tags) - serviceCatKey, _ := MakeServiceCat(reason.Service, reason.Category) - allTags := normalizeTagKVs(map[string]string{ - serviceCatKey: "", - reason.ResourceOwner: "", - }) + allTags := map[string]string{} + + for _, k := range reason.TagKeys() { + allTags[k] = "" + } maps.Copy(allTags, tags) + allTags = normalizeTagKVs(allTags) metas, err := sm.FindManifests(ctx, allTags) if err != nil { @@ -335,7 +353,7 @@ func normalizeTagKVs(tags map[string]string) map[string]string { t2 := make(map[string]string, len(tags)) for k, v := range tags { - mk, mv := MakeTagKV(k) + mk, mv := makeTagKV(k) if len(v) == 0 { v = mv diff --git a/src/internal/kopia/snapshot_manager_test.go b/src/internal/kopia/snapshot_manager_test.go index 324f471ad..31c06ba33 100644 --- a/src/internal/kopia/snapshot_manager_test.go +++ b/src/internal/kopia/snapshot_manager_test.go @@ -29,16 +29,9 @@ var ( testID2 = manifest.ID("snap2") testID3 = manifest.ID("snap3") - testMail = path.ExchangeService.String() + path.EmailCategory.String() - testMailServiceCat = ServiceCat{ - Service: path.ExchangeService, - Category: path.EmailCategory, - } - testEvents = path.ExchangeService.String() + path.EventsCategory.String() - testEventsServiceCat = ServiceCat{ - Service: path.ExchangeService, - Category: path.EventsCategory, - } + testMail = path.ExchangeService.String() + path.EmailCategory.String() + testEvents = path.ExchangeService.String() + path.EventsCategory.String() + testUser1 = "user1" testUser2 = "user2" testUser3 = "user3" @@ -115,7 +108,7 @@ func newManifestInfo( structTags := make(map[string]struct{}, len(tags)) for _, t := range tags { - tk, _ := MakeTagKV(t) + tk, _ := makeTagKV(t) structTags[tk] = struct{}{} } diff --git a/src/internal/kopia/wrapper.go b/src/internal/kopia/wrapper.go index ab0331ff8..3e8f5b338 100644 --- a/src/internal/kopia/wrapper.go +++ b/src/internal/kopia/wrapper.go @@ -230,7 +230,7 @@ func (w Wrapper) makeSnapshotWithRoot( man.Tags = map[string]string{} for k, v := range addlTags { - mk, mv := MakeTagKV(k) + mk, mv := makeTagKV(k) if len(v) == 0 { v = mv diff --git a/src/internal/kopia/wrapper_test.go b/src/internal/kopia/wrapper_test.go index 947b4439e..07fa78567 100644 --- a/src/internal/kopia/wrapper_test.go +++ b/src/internal/kopia/wrapper_test.go @@ -212,11 +212,25 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() { tags := map[string]string{ "fnords": "smarf", "brunhilda": "", + } - suite.testPath1.ResourceOwner(): "", - suite.testPath2.ResourceOwner(): "", - serviceCatTag(suite.testPath1): "", - serviceCatTag(suite.testPath2): "", + reasons := []Reason{ + { + ResourceOwner: suite.testPath1.ResourceOwner(), + Service: suite.testPath1.Service(), + Category: suite.testPath1.Category(), + }, + { + ResourceOwner: suite.testPath2.ResourceOwner(), + Service: suite.testPath2.Service(), + Category: suite.testPath2.Category(), + }, + } + + for _, r := range reasons { + for _, k := range r.TagKeys() { + tags[k] = "" + } } expectedTags := map[string]string{} @@ -305,9 +319,15 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { w := &Wrapper{k} - tags := map[string]string{ - testUser: "", - serviceCatString(path.ExchangeService, path.EmailCategory): "", + tags := map[string]string{} + reason := Reason{ + ResourceOwner: testUser, + Service: path.ExchangeService, + Category: path.EmailCategory, + } + + for _, k := range reason.TagKeys() { + tags[k] = "" } dc1 := mockconnector.NewMockExchangeCollection(suite.testPath1, 1) @@ -353,9 +373,15 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() { func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() { t := suite.T() - tags := map[string]string{ - testUser: "", - serviceCatString(path.ExchangeService, path.EmailCategory): "", + tags := map[string]string{} + reason := Reason{ + ResourceOwner: testUser, + Service: path.ExchangeService, + Category: path.EmailCategory, + } + + for _, k := range reason.TagKeys() { + tags[k] = "" } collections := []data.Collection{ @@ -586,9 +612,15 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() { collections = append(collections, collection) } - tags := map[string]string{ - testUser: "", - serviceCatString(path.ExchangeService, path.EmailCategory): "", + tags := map[string]string{} + reason := Reason{ + ResourceOwner: testUser, + Service: path.ExchangeService, + Category: path.EmailCategory, + } + + for _, k := range reason.TagKeys() { + tags[k] = "" } stats, deets, _, err := suite.w.BackupCollections( diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 1c6f790e3..856335a85 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -344,10 +344,8 @@ func produceManifestsAndMetadata( continue } - tk, _ := kopia.MakeTagKV(kopia.TagBackupID) - - bID := man.Tags[tk] - if len(bID) == 0 { + bID, ok := man.GetTag(kopia.TagBackupID) + if !ok { return nil, nil, false, errors.New("snapshot manifest missing backup ID") } @@ -502,12 +500,9 @@ func consumeBackupDataCollections( } for _, reason := range reasons { - tags[reason.ResourceOwner] = "" - - // TODO(ashmrtn): Create a separate helper function to go from service/cat - // to a tag. - serviceCat, _ := kopia.MakeServiceCat(reason.Service, reason.Category) - tags[serviceCat] = "" + for _, k := range reason.TagKeys() { + tags[k] = "" + } } bases := make([]kopia.IncrementalBase, 0, len(mans)) @@ -568,8 +563,10 @@ func mergeDetails( continue } - k, _ := kopia.MakeTagKV(kopia.TagBackupID) - bID := man.Tags[k] + bID, ok := man.GetTag(kopia.TagBackupID) + if !ok { + return errors.Errorf("no backup ID in snapshot manifest with ID %s", man.ID) + } _, baseDeets, err := getBackupAndDetailsFromID( ctx, diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index 6df6da829..5876cd331 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -191,8 +191,12 @@ func checkBackupIsInManifests( require.NoError(t, err) for _, man := range mans { - tk, _ := kopia.MakeTagKV(kopia.TagBackupID) - if man.Tags[tk] == string(bo.Results.BackupID) { + bID, ok := man.GetTag(kopia.TagBackupID) + if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) { + continue + } + + if bID == string(bo.Results.BackupID) { found = true break } diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index e858a319e..90c5aa50e 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -276,14 +276,20 @@ func makeDetailsEntry( return res } +// TODO(ashmrtn): This should belong to some code that lives in the kopia +// package that is only compiled when running tests. +func makeKopiaTagKey(k string) string { + return "tag:" + k +} + func makeManifest(t *testing.T, backupID model.StableID, incompleteReason string) *snapshot.Manifest { t.Helper() - backupIDTagKey, _ := kopia.MakeTagKV(kopia.TagBackupID) + tagKey := makeKopiaTagKey(kopia.TagBackupID) return &snapshot.Manifest{ Tags: map[string]string{ - backupIDTagKey: string(backupID), + tagKey: string(backupID), }, IncompleteReason: incompleteReason, } From 1174a99e84494be7f2af71c7ca93362da205e22f Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 10 Jan 2023 14:31:20 -0700 Subject: [PATCH 21/22] refactors the exchange fetchIDs iters (#1906) ## Description Transitions the fetchIDs service iterators to a set of interfaces to consolidate code across multiple nearly identical variations of "fetch id for directory". This was originally written in another PR (1780), then separated out to isolate changes. ## Type of change - [x] :hamster: Trivial/Minor ## Test Plan - [x] :green_heart: E2E --- src/internal/connector/exchange/api/api.go | 12 -- .../connector/exchange/api/contacts.go | 102 ++++++-------- src/internal/connector/exchange/api/events.go | 79 ++++++----- src/internal/connector/exchange/api/mail.go | 95 ++++++------- src/internal/connector/exchange/api/shared.go | 126 ++++++++++++++++++ 5 files changed, 249 insertions(+), 165 deletions(-) create mode 100644 src/internal/connector/exchange/api/shared.go diff --git a/src/internal/connector/exchange/api/api.go b/src/internal/connector/exchange/api/api.go index acaeb9c5e..3fd15409f 100644 --- a/src/internal/connector/exchange/api/api.go +++ b/src/internal/connector/exchange/api/api.go @@ -87,18 +87,6 @@ func newService(creds account.M365Config) (*graph.Service, error) { return graph.NewService(adapter), nil } -func (c Client) Contacts() Contacts { - return Contacts{c} -} - -func (c Client) Events() Events { - return Events{c} -} - -func (c Client) Mail() Mail { - return Mail{c} -} - // --------------------------------------------------------------------------- // helper funcs // --------------------------------------------------------------------------- diff --git a/src/internal/connector/exchange/api/contacts.go b/src/internal/connector/exchange/api/contacts.go index 0356d88ef..ab41ff4b3 100644 --- a/src/internal/connector/exchange/api/contacts.go +++ b/src/internal/connector/exchange/api/contacts.go @@ -17,6 +17,11 @@ import ( // controller // --------------------------------------------------------------------------- +func (c Client) Contacts() Contacts { + return Contacts{c} +} + +// Contacts is an interface-compliant provider of the client. type Contacts struct { Client } @@ -147,6 +152,30 @@ func (c Contacts) EnumerateContainers( return errs.ErrorOrNil() } +// --------------------------------------------------------------------------- +// item pager +// --------------------------------------------------------------------------- + +var _ itemPager = &contactPager{} + +type contactPager struct { + gs graph.Servicer + builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder + options *users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration +} + +func (p *contactPager) getPage(ctx context.Context) (pageLinker, error) { + return p.builder.Get(ctx, p.options) +} + +func (p *contactPager) setNext(nextLink string) { + p.builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(nextLink, p.gs.Adapter()) +} + +func (p *contactPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) { + return toValues[models.Contactable](pl) +} + func (c Contacts) GetAddedAndRemovedItemIDs( ctx context.Context, user, directoryID, oldDelta string, @@ -158,9 +187,6 @@ func (c Contacts) GetAddedAndRemovedItemIDs( var ( errs *multierror.Error - ids []string - removedIDs []string - deltaURL string resetDelta bool ) @@ -169,63 +195,17 @@ func (c Contacts) GetAddedAndRemovedItemIDs( return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options") } - getIDs := func(builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder) error { - for { - resp, err := builder.Get(ctx, options) - if err != nil { - if err := graph.IsErrDeletedInFlight(err); err != nil { - return err - } - - if err := graph.IsErrInvalidDelta(err); err != nil { - return err - } - - return errors.Wrap(err, support.ConnectorStackErrorTrace(err)) - } - - for _, item := range resp.GetValue() { - if item.GetId() == nil { - errs = multierror.Append( - errs, - errors.Errorf("item with nil ID in folder %s", directoryID), - ) - - // TODO(ashmrtn): Handle fail-fast. - continue - } - - if item.GetAdditionalData()[graph.AddtlDataRemoved] == nil { - ids = append(ids, *item.GetId()) - } else { - removedIDs = append(removedIDs, *item.GetId()) - } - } - - delta := resp.GetOdataDeltaLink() - if delta != nil && len(*delta) > 0 { - deltaURL = *delta - } - - nextLink := resp.GetOdataNextLink() - if nextLink == nil || len(*nextLink) == 0 { - break - } - - builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(*nextLink, service.Adapter()) - } - - return nil - } - if len(oldDelta) > 0 { - err := getIDs(users.NewItemContactFoldersItemContactsDeltaRequestBuilder(oldDelta, service.Adapter())) + builder := users.NewItemContactFoldersItemContactsDeltaRequestBuilder(oldDelta, service.Adapter()) + pgr := &contactPager{service, builder, options} + + added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) // note: happy path, not the error condition if err == nil { - return ids, removedIDs, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil() + return added, removed, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil() } // only return on error if it is NOT a delta issue. - // otherwise we'll retry the call with the regular builder + // on bad deltas we retry the call with the regular builder if graph.IsErrInvalidDelta(err) == nil { return nil, nil, DeltaUpdate{}, err } @@ -234,15 +214,13 @@ func (c Contacts) GetAddedAndRemovedItemIDs( errs = nil } - builder := service.Client(). - UsersById(user). - ContactFoldersById(directoryID). - Contacts(). - Delta() + builder := service.Client().UsersById(user).ContactFoldersById(directoryID).Contacts().Delta() + pgr := &contactPager{service, builder, options} - if err := getIDs(builder); err != nil { + added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) + if err != nil { return nil, nil, DeltaUpdate{}, err } - return ids, removedIDs, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil() + return added, removed, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil() } diff --git a/src/internal/connector/exchange/api/events.go b/src/internal/connector/exchange/api/events.go index bd6a68893..bd37a361a 100644 --- a/src/internal/connector/exchange/api/events.go +++ b/src/internal/connector/exchange/api/events.go @@ -18,6 +18,11 @@ import ( // controller // --------------------------------------------------------------------------- +func (c Client) Events() Events { + return Events{c} +} + +// Events is an interface-compliant provider of the client. type Events struct { Client } @@ -124,6 +129,39 @@ func (c Events) EnumerateContainers( return errs.ErrorOrNil() } +// --------------------------------------------------------------------------- +// item pager +// --------------------------------------------------------------------------- + +type eventWrapper struct { + models.EventCollectionResponseable +} + +func (ew eventWrapper) GetOdataDeltaLink() *string { + return nil +} + +var _ itemPager = &eventPager{} + +type eventPager struct { + gs graph.Servicer + builder *users.ItemCalendarsItemEventsRequestBuilder + options *users.ItemCalendarsItemEventsRequestBuilderGetRequestConfiguration +} + +func (p *eventPager) getPage(ctx context.Context) (pageLinker, error) { + resp, err := p.builder.Get(ctx, p.options) + return eventWrapper{resp}, err +} + +func (p *eventPager) setNext(nextLink string) { + p.builder = users.NewItemCalendarsItemEventsRequestBuilder(nextLink, p.gs.Adapter()) +} + +func (p *eventPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) { + return toValues[models.Eventable](pl) +} + func (c Events) GetAddedAndRemovedItemIDs( ctx context.Context, user, calendarID, oldDelta string, @@ -133,10 +171,7 @@ func (c Events) GetAddedAndRemovedItemIDs( return nil, nil, DeltaUpdate{}, err } - var ( - errs *multierror.Error - ids []string - ) + var errs *multierror.Error options, err := optionsForEventsByCalendar([]string{"id"}) if err != nil { @@ -144,41 +179,15 @@ func (c Events) GetAddedAndRemovedItemIDs( } builder := service.Client().UsersById(user).CalendarsById(calendarID).Events() + pgr := &eventPager{service, builder, options} - for { - resp, err := builder.Get(ctx, options) - if err != nil { - if err := graph.IsErrDeletedInFlight(err); err != nil { - return nil, nil, DeltaUpdate{}, err - } - - return nil, nil, DeltaUpdate{}, errors.Wrap(err, support.ConnectorStackErrorTrace(err)) - } - - for _, item := range resp.GetValue() { - if item.GetId() == nil { - errs = multierror.Append( - errs, - errors.Errorf("event with nil ID in calendar %s", calendarID), - ) - - // TODO(ashmrtn): Handle fail-fast. - continue - } - - ids = append(ids, *item.GetId()) - } - - nextLink := resp.GetOdataNextLink() - if nextLink == nil || len(*nextLink) == 0 { - break - } - - builder = users.NewItemCalendarsItemEventsRequestBuilder(*nextLink, service.Adapter()) + added, _, _, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) + if err != nil { + return nil, nil, DeltaUpdate{}, err } // Events don't have a delta endpoint so just return an empty string. - return ids, nil, DeltaUpdate{}, errs.ErrorOrNil() + return added, nil, DeltaUpdate{}, errs.ErrorOrNil() } // --------------------------------------------------------------------------- diff --git a/src/internal/connector/exchange/api/mail.go b/src/internal/connector/exchange/api/mail.go index bc10bf53b..bf6739384 100644 --- a/src/internal/connector/exchange/api/mail.go +++ b/src/internal/connector/exchange/api/mail.go @@ -17,6 +17,11 @@ import ( // controller // --------------------------------------------------------------------------- +func (c Client) Mail() Mail { + return Mail{c} +} + +// Mail is an interface-compliant provider of the client. type Mail struct { Client } @@ -145,6 +150,30 @@ func (c Mail) EnumerateContainers( return errs.ErrorOrNil() } +// --------------------------------------------------------------------------- +// item pager +// --------------------------------------------------------------------------- + +var _ itemPager = &mailPager{} + +type mailPager struct { + gs graph.Servicer + builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder + options *users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration +} + +func (p *mailPager) getPage(ctx context.Context) (pageLinker, error) { + return p.builder.Get(ctx, p.options) +} + +func (p *mailPager) setNext(nextLink string) { + p.builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(nextLink, p.gs.Adapter()) +} + +func (p *mailPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) { + return toValues[models.Messageable](pl) +} + func (c Mail) GetAddedAndRemovedItemIDs( ctx context.Context, user, directoryID, oldDelta string, @@ -156,8 +185,6 @@ func (c Mail) GetAddedAndRemovedItemIDs( var ( errs *multierror.Error - ids []string - removedIDs []string deltaURL string resetDelta bool ) @@ -167,63 +194,17 @@ func (c Mail) GetAddedAndRemovedItemIDs( return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options") } - getIDs := func(builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder) error { - for { - resp, err := builder.Get(ctx, options) - if err != nil { - if err := graph.IsErrDeletedInFlight(err); err != nil { - return err - } - - if err := graph.IsErrInvalidDelta(err); err != nil { - return err - } - - return errors.Wrap(err, support.ConnectorStackErrorTrace(err)) - } - - for _, item := range resp.GetValue() { - if item.GetId() == nil { - errs = multierror.Append( - errs, - errors.Errorf("item with nil ID in folder %s", directoryID), - ) - - // TODO(ashmrtn): Handle fail-fast. - continue - } - - if item.GetAdditionalData()[graph.AddtlDataRemoved] == nil { - ids = append(ids, *item.GetId()) - } else { - removedIDs = append(removedIDs, *item.GetId()) - } - } - - delta := resp.GetOdataDeltaLink() - if delta != nil && len(*delta) > 0 { - deltaURL = *delta - } - - nextLink := resp.GetOdataNextLink() - if nextLink == nil || len(*nextLink) == 0 { - break - } - - builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(*nextLink, service.Adapter()) - } - - return nil - } - if len(oldDelta) > 0 { - err := getIDs(users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, service.Adapter())) + builder := users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, service.Adapter()) + pgr := &mailPager{service, builder, options} + + added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) // note: happy path, not the error condition if err == nil { - return ids, removedIDs, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil() + return added, removed, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil() } // only return on error if it is NOT a delta issue. - // otherwise we'll retry the call with the regular builder + // on bad deltas we retry the call with the regular builder if graph.IsErrInvalidDelta(err) == nil { return nil, nil, DeltaUpdate{}, err } @@ -233,10 +214,12 @@ func (c Mail) GetAddedAndRemovedItemIDs( } builder := service.Client().UsersById(user).MailFoldersById(directoryID).Messages().Delta() + pgr := &mailPager{service, builder, options} - if err := getIDs(builder); err != nil { + added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr) + if err != nil { return nil, nil, DeltaUpdate{}, err } - return ids, removedIDs, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil() + return added, removed, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil() } diff --git a/src/internal/connector/exchange/api/shared.go b/src/internal/connector/exchange/api/shared.go new file mode 100644 index 000000000..c77e21fa8 --- /dev/null +++ b/src/internal/connector/exchange/api/shared.go @@ -0,0 +1,126 @@ +package api + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/corso/src/internal/connector/support" +) + +// --------------------------------------------------------------------------- +// generic handler for paging item ids in a container +// --------------------------------------------------------------------------- + +type itemPager interface { + getPage(context.Context) (pageLinker, error) + setNext(string) + valuesIn(pageLinker) ([]getIDAndAddtler, error) +} + +type pageLinker interface { + GetOdataDeltaLink() *string + GetOdataNextLink() *string +} + +type getIDAndAddtler interface { + GetId() *string + GetAdditionalData() map[string]any +} + +// uses a models interface compliant with { GetValues() []T } +// to transform its results into a slice of getIDer interfaces. +// Generics used here to handle the variation of msoft interfaces +// that all _almost_ comply with GetValue, but all return a different +// interface. +func toValues[T any](a any) ([]getIDAndAddtler, error) { + gv, ok := a.(interface{ GetValue() []T }) + if !ok { + return nil, errors.Errorf("response of type [%T] does not comply with the GetValue() interface", a) + } + + items := gv.GetValue() + r := make([]getIDAndAddtler, 0, len(items)) + + for _, item := range items { + var a any = item + + ri, ok := a.(getIDAndAddtler) + if !ok { + return nil, errors.Errorf("item of type [%T] does not comply with the getIDAndAddtler interface", item) + } + + r = append(r, ri) + } + + return r, nil +} + +// generic controller for retrieving all item ids in a container. +func getItemsAddedAndRemovedFromContainer( + ctx context.Context, + pager itemPager, +) ([]string, []string, string, error) { + var ( + addedIDs = []string{} + removedIDs = []string{} + deltaURL string + ) + + for { + // get the next page of data, check for standard errors + resp, err := pager.getPage(ctx) + if err != nil { + if err := graph.IsErrDeletedInFlight(err); err != nil { + return nil, nil, deltaURL, err + } + + if err := graph.IsErrInvalidDelta(err); err != nil { + return nil, nil, deltaURL, err + } + + return nil, nil, deltaURL, errors.Wrap(err, support.ConnectorStackErrorTrace(err)) + } + + // each category type responds with a different interface, but all + // of them comply with GetValue, which is where we'll get our item data. + items, err := pager.valuesIn(resp) + if err != nil { + return nil, nil, "", err + } + + // iterate through the items in the page + for _, item := range items { + // if the additional data conains a `@removed` key, the value will either + // be 'changed' or 'deleted'. We don't really care about the cause: both + // cases are handled the same way in storage. + if item.GetAdditionalData()[graph.AddtlDataRemoved] == nil { + addedIDs = append(addedIDs, *item.GetId()) + } else { + removedIDs = append(removedIDs, *item.GetId()) + } + } + + // the deltaLink is kind of like a cursor for overall data state. + // once we run through pages of nextLinks, the last query will + // produce a deltaLink instead (if supported), which we'll use on + // the next backup to only get the changes since this run. + delta := resp.GetOdataDeltaLink() + if delta != nil && len(*delta) > 0 { + deltaURL = *delta + } + + // the nextLink is our page cursor within this query. + // if we have more data to retrieve, we'll have a + // nextLink instead of a deltaLink. + nextLink := resp.GetOdataNextLink() + if nextLink == nil || len(*nextLink) == 0 { + break + } + + pager.setNext(*nextLink) + } + + return addedIDs, removedIDs, deltaURL, nil +} From 580934b0693c0856b19443c74e0e1cceb2805779 Mon Sep 17 00:00:00 2001 From: Keepers Date: Tue, 10 Jan 2023 16:24:01 -0700 Subject: [PATCH 22/22] remove DiscreteScopes() from selectors (#2036) ## Description DiscreteScopes is a vestigial func from when scopes contained the list of resource owners to track. That behavior is no longer in use. ## Does this PR need a docs update or release note? - [x] :no_entry: No ## Type of change - [x] :broom: Tech Debt/Cleanup ## Issue(s) * #1617 ## Test Plan - [x] :zap: Unit test --- .../connector/exchange/data_collections.go | 3 +- src/internal/operations/backup.go | 4 +- src/internal/operations/restore.go | 1 - src/pkg/backup/details/details.go | 3 +- src/pkg/selectors/exchange.go | 15 ------ src/pkg/selectors/exchange_test.go | 49 ++----------------- src/pkg/selectors/onedrive.go | 15 ------ src/pkg/selectors/onedrive_test.go | 43 ---------------- src/pkg/selectors/selectors.go | 39 --------------- src/pkg/selectors/sharepoint.go | 15 ------ src/pkg/selectors/sharepoint_test.go | 45 +---------------- 11 files changed, 8 insertions(+), 224 deletions(-) diff --git a/src/internal/connector/exchange/data_collections.go b/src/internal/connector/exchange/data_collections.go index 62bc12f3f..33467411a 100644 --- a/src/internal/connector/exchange/data_collections.go +++ b/src/internal/connector/exchange/data_collections.go @@ -175,7 +175,6 @@ func DataCollections( var ( user = selector.DiscreteOwner - scopes = eb.DiscreteScopes([]string{user}) collections = []data.Collection{} errs error ) @@ -185,7 +184,7 @@ func DataCollections( return nil, err } - for _, scope := range scopes { + for _, scope := range eb.Scopes() { dps := cdps[scope.Category().PathType()] dcs, err := createCollections( diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index 856335a85..d4d3056a3 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -207,8 +207,8 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { return opStats.writeErr } - // TODO: should always be 1, since backups are 1:1 with resourceOwners now. - opStats.resourceCount = len(data.ResourceOwnerSet(cs)) + // should always be 1, since backups are 1:1 with resourceOwners. + opStats.resourceCount = 1 opStats.started = true opStats.gc = gc.AwaitStatus() diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index 80c7b986a..b4713f57c 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -150,7 +150,6 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De events.BackupID: op.BackupID, events.BackupCreateTime: bup.CreationTime, events.RestoreID: opStats.restoreID, - // TODO: restore options, }, ) diff --git a/src/pkg/backup/details/details.go b/src/pkg/backup/details/details.go index 798c12ce2..923410dde 100644 --- a/src/pkg/backup/details/details.go +++ b/src/pkg/backup/details/details.go @@ -229,8 +229,7 @@ func (d *Details) addFolder(folder folderEntry) { // DetailsEntry describes a single item stored in a Backup type DetailsEntry struct { - // TODO: `RepoRef` is currently the full path to the item in Kopia - // This can be optimized. + // RepoRef is the full storage path of the item in Kopia RepoRef string `json:"repoRef"` ShortRef string `json:"shortRef"` ParentRef string `json:"parentRef,omitempty"` diff --git a/src/pkg/selectors/exchange.go b/src/pkg/selectors/exchange.go index 109ee2ea4..fd3a29e65 100644 --- a/src/pkg/selectors/exchange.go +++ b/src/pkg/selectors/exchange.go @@ -188,21 +188,6 @@ func (s *exchange) Scopes() []ExchangeScope { return scopes[ExchangeScope](s.Selector) } -// DiscreteScopes retrieves the list of exchangeScopes in the selector. -// If any Include scope's User category is set to Any, replaces that -// scope's value with the list of userPNs instead. -func (s *exchange) DiscreteScopes(userPNs []string) []ExchangeScope { - scopes := discreteScopes[ExchangeScope](s.Includes, ExchangeUser, userPNs) - - ss := make([]ExchangeScope, 0, len(scopes)) - - for _, scope := range scopes { - ss = append(ss, ExchangeScope(scope)) - } - - return ss -} - type ExchangeItemScopeConstructor func([]string, []string, ...option) []ExchangeScope // ------------------- diff --git a/src/pkg/selectors/exchange_test.go b/src/pkg/selectors/exchange_test.go index bbb2bf974..df556895f 100644 --- a/src/pkg/selectors/exchange_test.go +++ b/src/pkg/selectors/exchange_test.go @@ -479,49 +479,6 @@ func (suite *ExchangeSelectorSuite) TestExchangeBackup_Scopes() { } } -func (suite *ExchangeSelectorSuite) TestExchangeBackup_DiscreteScopes() { - usrs := []string{"u1", "u2"} - table := []struct { - name string - include []string - discrete []string - expect []string - }{ - { - name: "any user", - include: Any(), - discrete: usrs, - expect: usrs, - }, - { - name: "discrete user", - include: []string{"u3"}, - discrete: usrs, - expect: []string{"u3"}, - }, - { - name: "nil discrete slice", - include: Any(), - discrete: nil, - expect: Any(), - }, - } - - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - // todo: remove discreteScopes - // eb := NewExchangeBackup(test.include) - // eb.Include(eb.AllData()) - - // scopes := eb.DiscreteScopes(test.discrete) - // for _, sc := range scopes { - // users := sc.Get(ExchangeUser) - // assert.Equal(t, test.expect, users) - // } - }) - } -} - func (suite *ExchangeSelectorSuite) TestExchangeScope_Category() { table := []struct { is exchangeCategory @@ -1144,7 +1101,7 @@ func (suite *ExchangeSelectorSuite) TestPasses() { ) var ( - es = NewExchangeRestore(Any()) // TODO: move into test and compose with each test value + es = NewExchangeRestore(Any()) otherMail = setScopesToDefault(es.Mails(Any(), []string{"smarf"})) mail = setScopesToDefault(es.Mails(Any(), []string{mid})) noMail = setScopesToDefault(es.Mails(Any(), None())) @@ -1186,7 +1143,7 @@ func (suite *ExchangeSelectorSuite) TestContains() { target := "fnords" var ( - es = NewExchangeRestore(Any()) // TODO: move into test and compose with each test value + es = NewExchangeRestore(Any()) noMail = setScopesToDefault(es.Mails(None(), None())) does = setScopesToDefault(es.Mails(Any(), []string{target})) doesNot = setScopesToDefault(es.Mails(Any(), []string{"smarf"})) @@ -1221,7 +1178,7 @@ func (suite *ExchangeSelectorSuite) TestContains() { func (suite *ExchangeSelectorSuite) TestIsAny() { var ( - es = NewExchangeRestore(Any()) // TODO: move into test and compose with each test value + es = NewExchangeRestore(Any()) specificMail = setScopesToDefault(es.Mails(Any(), []string{"email"})) anyMail = setScopesToDefault(es.Mails(Any(), Any())) ) diff --git a/src/pkg/selectors/onedrive.go b/src/pkg/selectors/onedrive.go index 69cc0687a..14ece70fb 100644 --- a/src/pkg/selectors/onedrive.go +++ b/src/pkg/selectors/onedrive.go @@ -181,21 +181,6 @@ func (s *oneDrive) Scopes() []OneDriveScope { return scopes[OneDriveScope](s.Selector) } -// DiscreteScopes retrieves the list of oneDriveScopes in the selector. -// If any Include scope's User category is set to Any, replaces that -// scope's value with the list of userPNs instead. -func (s *oneDrive) DiscreteScopes(userPNs []string) []OneDriveScope { - scopes := discreteScopes[OneDriveScope](s.Includes, OneDriveUser, userPNs) - - ss := make([]OneDriveScope, 0, len(scopes)) - - for _, scope := range scopes { - ss = append(ss, OneDriveScope(scope)) - } - - return ss -} - // ------------------- // Scope Factories diff --git a/src/pkg/selectors/onedrive_test.go b/src/pkg/selectors/onedrive_test.go index aa9e72016..1efcb1f3b 100644 --- a/src/pkg/selectors/onedrive_test.go +++ b/src/pkg/selectors/onedrive_test.go @@ -39,49 +39,6 @@ func (suite *OneDriveSelectorSuite) TestToOneDriveBackup() { assert.NotZero(t, ob.Scopes()) } -func (suite *OneDriveSelectorSuite) TestOneDriveBackup_DiscreteScopes() { - usrs := []string{"u1", "u2"} - table := []struct { - name string - include []string - discrete []string - expect []string - }{ - { - name: "any user", - include: Any(), - discrete: usrs, - expect: usrs, - }, - { - name: "discrete user", - include: []string{"u3"}, - discrete: usrs, - expect: []string{"u3"}, - }, - { - name: "nil discrete slice", - include: Any(), - discrete: nil, - expect: Any(), - }, - } - - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - // todo: remove discreteScopes - // eb := NewOneDriveBackup(test.include) - // eb.Include(eb.AllData()) - - // scopes := eb.DiscreteScopes(test.discrete) - // for _, sc := range scopes { - // users := sc.Get(OneDriveUser) - // assert.Equal(t, test.expect, users) - // } - }) - } -} - func (suite *OneDriveSelectorSuite) TestOneDriveSelector_AllData() { t := suite.T() diff --git a/src/pkg/selectors/selectors.go b/src/pkg/selectors/selectors.go index a51ed24bb..505ff1a7b 100644 --- a/src/pkg/selectors/selectors.go +++ b/src/pkg/selectors/selectors.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/pkg/errors" - "golang.org/x/exp/maps" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/filters" @@ -182,11 +181,6 @@ func splitByResourceOwner[T scopeT, C categoryT](s Selector, allOwners []string, for _, ro := range targets { c := s c.DiscreteOwner = ro - - // TODO: when the rootCat gets removed from the scopes, we can remove this - c.Includes = discreteScopes[T](s.Includes, rootCat, []string{ro}) - c.Filters = discreteScopes[T](s.Filters, rootCat, []string{ro}) - ss = append(ss, c) } @@ -221,7 +215,6 @@ func appendScopes[T scopeT](to []scope, scopes ...[]T) []scope { } // scopes retrieves the list of scopes in the selector. -// future TODO: if Inclues is nil, return filters. func scopes[T scopeT](s Selector) []T { scopes := []T{} @@ -232,38 +225,6 @@ func scopes[T scopeT](s Selector) []T { return scopes } -// discreteScopes retrieves the list of scopes in the selector. -// for any scope in the `Includes` set, if scope.IsAny(rootCat), -// then that category's value is replaced with the provided set of -// discrete identifiers. -// If discreteIDs is an empty slice, returns the normal scopes(s). -// future TODO: if Includes is nil, return filters. -func discreteScopes[T scopeT, C categoryT]( - scopes []scope, - rootCat C, - discreteIDs []string, -) []scope { - sl := []scope{} - - if len(discreteIDs) == 0 { - return scopes - } - - for _, v := range scopes { - t := T(v) - - if isAnyTarget(t, rootCat) { - w := maps.Clone(t) - set(w, rootCat, discreteIDs) - t = w - } - - sl = append(sl, scope(t)) - } - - return sl -} - // Returns the path.ServiceType matching the selector service. func (s Selector) PathService() path.ServiceType { return serviceToPathType[s.Service] diff --git a/src/pkg/selectors/sharepoint.go b/src/pkg/selectors/sharepoint.go index af4901ac6..0e6cb6260 100644 --- a/src/pkg/selectors/sharepoint.go +++ b/src/pkg/selectors/sharepoint.go @@ -179,21 +179,6 @@ func (s *sharePoint) Scopes() []SharePointScope { return scopes[SharePointScope](s.Selector) } -// DiscreteScopes retrieves the list of sharePointScopes in the selector. -// If any Include scope's Site category is set to Any, replaces that -// scope's value with the list of siteIDs instead. -func (s *sharePoint) DiscreteScopes(siteIDs []string) []SharePointScope { - scopes := discreteScopes[SharePointScope](s.Includes, SharePointSite, siteIDs) - - ss := make([]SharePointScope, 0, len(scopes)) - - for _, scope := range scopes { - ss = append(ss, SharePointScope(scope)) - } - - return ss -} - // ------------------- // Scope Factories diff --git a/src/pkg/selectors/sharepoint_test.go b/src/pkg/selectors/sharepoint_test.go index 3470db170..fc0b3d5c1 100644 --- a/src/pkg/selectors/sharepoint_test.go +++ b/src/pkg/selectors/sharepoint_test.go @@ -37,49 +37,6 @@ func (suite *SharePointSelectorSuite) TestToSharePointBackup() { assert.NotZero(t, ob.Scopes()) } -func (suite *SharePointSelectorSuite) TestSharePointBackup_DiscreteScopes() { - sites := []string{"s1", "s2"} - table := []struct { - name string - include []string - discrete []string - expect []string - }{ - { - name: "any site", - include: Any(), - discrete: sites, - expect: sites, - }, - { - name: "discrete sitet", - include: []string{"s3"}, - discrete: sites, - expect: []string{"s3"}, - }, - { - name: "nil discrete slice", - include: Any(), - discrete: nil, - expect: Any(), - }, - } - - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - // todo: remove discreteScopes - // eb := NewSharePointBackup(test.include) - // eb.Include(eb.AllData()) - - // scopes := eb.DiscreteScopes(test.discrete) - // for _, sc := range scopes { - // sites := sc.Get(SharePointSite) - // assert.Equal(t, test.expect, sites) - // } - }) - } -} - func (suite *SharePointSelectorSuite) TestSharePointSelector_AllData() { t := suite.T() @@ -368,7 +325,7 @@ func (suite *SharePointSelectorSuite) TestSharePointCategory_PathValues() { func (suite *SharePointSelectorSuite) TestSharePointScope_MatchesInfo() { var ( - ods = NewSharePointRestore(nil) // TODO: move into test + ods = NewSharePointRestore(nil) host = "www.website.com" pth = "/foo" url = host + pth