Merge branch 'main' into serialize-common

This commit is contained in:
Danny 2023-01-10 18:53:26 -05:00 committed by GitHub
commit c0e1b3db7e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 1640 additions and 1114 deletions

View File

@ -369,7 +369,7 @@ jobs:
run: go-licenses check github.com/alcionai/corso/src --ignore github.com/alcionai/corso/src
- name: Run staticcheck
uses: dominikh/staticcheck-action@v1.2.0
uses: dominikh/staticcheck-action@v1.3.0
with:
install-go: false
working-directory: src

View File

@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] (alpha)
### Added
- Incremental backup support for exchange is now enabled by default.
### Changed
- The selectors Reduce() process will only include details that match the DiscreteOwner, if one is specified.

View File

@ -13,6 +13,7 @@ build:
lint: check-lint-version
golangci-lint run
staticcheck ./...
check-lint-version: check-lint
@if [ "$(LINT_VERSION)" != "$(WANTED_LINT_VERSION)" ]; then \

View File

@ -105,7 +105,7 @@ func addExchangeCommands(cmd *cobra.Command) *cobra.Command {
switch cmd.Use {
case createCommand:
c, fs = utils.AddCommand(cmd, exchangeCreateCmd())
options.AddFeatureFlags(cmd, options.ExchangeIncrementals())
options.AddFeatureToggle(cmd, options.DisableIncrementals())
c.Use = c.Use + " " + exchangeServiceCommandCreateUseSuffix
c.Example = exchangeServiceCommandCreateExamples
@ -508,11 +508,6 @@ func runDetailsExchangeCmd(
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
// if no selector flags were specified, get all data in the service.
if len(sel.Scopes()) == 0 {
sel.Include(sel.AllData())
}
return sel.Reduce(ctx, d), nil
}

View File

@ -399,11 +399,6 @@ func runDetailsOneDriveCmd(
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
// if no selector flags were specified, get all data in the service.
if len(sel.Scopes()) == 0 {
sel.Include(sel.AllData())
}
return sel.Reduce(ctx, d), nil
}

View File

@ -483,10 +483,5 @@ func runDetailsSharePointCmd(
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
// if no selector flags were specified, get all data in the service.
if len(sel.Scopes()) == 0 {
sel.Include(sel.AllData())
}
return sel.Reduce(ctx, d), nil
}

View File

@ -68,7 +68,7 @@ func AddConfigFlags(cmd *cobra.Command) {
&configFilePathFlag,
"config-file",
displayDefaultFP,
"config file location (default is $HOME/.corso.toml)")
"config file location")
}
// ---------------------------------------------------------------------------------------------------------

View File

@ -19,8 +19,8 @@ func Control() control.Options {
opt.DisableMetrics = true
}
if exchangeIncrementals {
opt.EnabledFeatures.ExchangeIncrementals = true
if disableIncrementals {
opt.ToggleFeatures.DisableIncrementals = true
}
return opt
@ -53,28 +53,28 @@ func AddGlobalOperationFlags(cmd *cobra.Command) {
// Feature Flags
// ---------------------------------------------------------------------------
var exchangeIncrementals bool
var disableIncrementals bool
type exposeFeatureFlag func(*pflag.FlagSet)
// AddFeatureFlags adds CLI flags for each exposed feature flags to the
// AddFeatureToggle adds CLI flags for each exposed feature toggle to the
// persistent flag set within the command.
func AddFeatureFlags(cmd *cobra.Command, effs ...exposeFeatureFlag) {
func AddFeatureToggle(cmd *cobra.Command, effs ...exposeFeatureFlag) {
fs := cmd.PersistentFlags()
for _, fflag := range effs {
fflag(fs)
}
}
// Adds the '--exchange-incrementals' cli flag which, when set, enables
// incrementals data retrieval for exchange backups.
func ExchangeIncrementals() func(*pflag.FlagSet) {
// Adds the hidden '--no-incrementals' cli flag which, when set, disables
// incremental backups.
func DisableIncrementals() func(*pflag.FlagSet) {
return func(fs *pflag.FlagSet) {
fs.BoolVar(
&exchangeIncrementals,
"exchange-incrementals",
&disableIncrementals,
"disable-incrementals",
false,
"Enable incremental data retrieval in Exchange backups.")
cobra.CheckErr(fs.MarkHidden("exchange-incrementals"))
"Disable incremental data retrieval in backups.")
cobra.CheckErr(fs.MarkHidden("disable-incrementals"))
}
}

View File

@ -10,6 +10,7 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/repository"
)
@ -215,23 +216,22 @@ func restoreExchangeCmd(cmd *cobra.Command, args []string) error {
defer utils.CloseRepo(ctx, r)
dest := control.DefaultRestoreDestination(common.SimpleDateTime)
sel := utils.IncludeExchangeRestoreDataSelectors(opts)
utils.FilterExchangeRestoreInfoSelectors(sel, opts)
// if no selector flags were specified, get all data in the service.
if len(sel.Scopes()) == 0 {
sel.Include(sel.AllData())
}
restoreDest := control.DefaultRestoreDestination(common.SimpleDateTime)
ro, err := r.NewRestore(ctx, backupID, sel.Selector, restoreDest)
ro, err := r.NewRestore(ctx, backupID, sel.Selector, dest)
if err != nil {
return Only(ctx, errors.Wrap(err, "Failed to initialize Exchange restore"))
}
ds, err := ro.Run(ctx)
if err != nil {
if errors.Is(err, kopia.ErrNotFound) {
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
}
return Only(ctx, errors.Wrap(err, "Failed to run Exchange restore"))
}

View File

@ -10,6 +10,7 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/repository"
)
@ -152,23 +153,22 @@ func restoreOneDriveCmd(cmd *cobra.Command, args []string) error {
defer utils.CloseRepo(ctx, r)
dest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive)
sel := utils.IncludeOneDriveRestoreDataSelectors(opts)
utils.FilterOneDriveRestoreInfoSelectors(sel, opts)
// if no selector flags were specified, get all data in the service.
if len(sel.Scopes()) == 0 {
sel.Include(sel.AllData())
}
restoreDest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive)
ro, err := r.NewRestore(ctx, backupID, sel.Selector, restoreDest)
ro, err := r.NewRestore(ctx, backupID, sel.Selector, dest)
if err != nil {
return Only(ctx, errors.Wrap(err, "Failed to initialize OneDrive restore"))
}
ds, err := ro.Run(ctx)
if err != nil {
if errors.Is(err, kopia.ErrNotFound) {
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
}
return Only(ctx, errors.Wrap(err, "Failed to run OneDrive restore"))
}

View File

@ -10,6 +10,7 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/repository"
)
@ -153,23 +154,22 @@ func restoreSharePointCmd(cmd *cobra.Command, args []string) error {
defer utils.CloseRepo(ctx, r)
dest := control.DefaultRestoreDestination(common.SimpleDateTime)
sel := utils.IncludeSharePointRestoreDataSelectors(opts)
utils.FilterSharePointRestoreInfoSelectors(sel, opts)
// if no selector flags were specified, get all data in the service.
if len(sel.Scopes()) == 0 {
sel.Include(sel.AllData())
}
restoreDest := control.DefaultRestoreDestination(common.SimpleDateTimeOneDrive)
ro, err := r.NewRestore(ctx, backupID, sel.Selector, restoreDest)
ro, err := r.NewRestore(ctx, backupID, sel.Selector, dest)
if err != nil {
return Only(ctx, errors.Wrap(err, "Failed to initialize SharePoint restore"))
}
ds, err := ro.Run(ctx)
if err != nil {
if errors.Is(err, kopia.ErrNotFound) {
return Only(ctx, errors.Errorf("Backup or backup details missing for id %s", backupID))
}
return Only(ctx, errors.Wrap(err, "Failed to run SharePoint restore"))
}

View File

@ -138,7 +138,6 @@ func IncludeExchangeRestoreDataSelectors(opts ExchangeOpts) *selectors.ExchangeR
// either scope the request to a set of users
if lc+lcf+le+lef+lev+lec == 0 {
sel.Include(sel.AllData())
return sel
}

View File

@ -325,42 +325,36 @@ func (suite *ExchangeUtilsSuite) TestAddExchangeInclude() {
}{
{
name: "no inputs",
resources: empty,
folders: empty,
items: empty,
expectIncludeLen: 0,
},
{
name: "single inputs",
resources: single,
folders: single,
items: single,
expectIncludeLen: 1,
},
{
name: "multi inputs",
resources: multi,
folders: multi,
items: multi,
expectIncludeLen: 1,
},
{
name: "folder contains",
resources: empty,
folders: containsOnly,
items: empty,
expectIncludeLen: 1,
},
{
name: "folder prefixes",
resources: empty,
folders: prefixOnly,
items: empty,
expectIncludeLen: 1,
},
{
name: "folder prefixes and contains",
resources: empty,
folders: containsAndPrefix,
items: empty,
expectIncludeLen: 2,

View File

@ -84,7 +84,6 @@ func IncludeOneDriveRestoreDataSelectors(opts OneDriveOpts) *selectors.OneDriveR
// is specified
if lp+ln == 0 {
sel.Include(sel.AllData())
return sel
}

View File

@ -4,12 +4,12 @@ go 1.19
require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
github.com/aws/aws-sdk-go v1.44.174
github.com/aws/aws-sdk-go v1.44.176
github.com/aws/aws-xray-sdk-go v1.8.0
github.com/google/uuid v1.3.0
github.com/hashicorp/go-multierror v1.1.1
github.com/kopia/kopia v0.12.0
github.com/microsoft/kiota-abstractions-go v0.15.1
github.com/microsoft/kiota-abstractions-go v0.15.2
github.com/microsoft/kiota-authentication-azure-go v0.5.0
github.com/microsoft/kiota-http-go v0.11.0
github.com/microsoft/kiota-serialization-json-go v0.7.2

View File

@ -58,8 +58,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/aws/aws-sdk-go v1.44.174 h1:9lR4a6MKQW/t6YCG0ZKAt1GAkjdEPP8sWch/pfcuR0c=
github.com/aws/aws-sdk-go v1.44.174/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.176 h1:mxcfI3IWHemX+5fEKt5uqIS/hdbaR7qzGfJYo5UyjJE=
github.com/aws/aws-sdk-go v1.44.176/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -257,8 +257,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microsoft/kiota-abstractions-go v0.15.1 h1:RgN8h9Z3AoFav1K4ODVSkmA8Es933hTlAWNesll1G5U=
github.com/microsoft/kiota-abstractions-go v0.15.1/go.mod h1:YqOu8G6bZTG0eCIWrmEny8PaF750uaw7tLFac4psf+4=
github.com/microsoft/kiota-abstractions-go v0.15.2 h1:Pp78BbqPvkF2mAMH0Ph37ymwfSH7uF9iYfY1fZ8g630=
github.com/microsoft/kiota-abstractions-go v0.15.2/go.mod h1:RT/s9sCzg49i4iO7e2qhyWmX+DlJDgC0P+Wp8fKQQfo=
github.com/microsoft/kiota-authentication-azure-go v0.5.0 h1:RVA/tTgMnDIN3u4qPZtvYvVRsQDOFkd3yvi6KXjZJko=
github.com/microsoft/kiota-authentication-azure-go v0.5.0/go.mod h1:1Io6h+88FlDRmrajdjSnXPz8oyObUVjNuQZLhrF9kQk=
github.com/microsoft/kiota-http-go v0.11.0 h1:0K0y/wZcTvEEX2Xdj5tngJqknqYQpArLdtjB/fo88Dc=

View File

@ -87,18 +87,6 @@ func newService(creds account.M365Config) (*graph.Service, error) {
return graph.NewService(adapter), nil
}
func (c Client) Contacts() Contacts {
return Contacts{c}
}
func (c Client) Events() Events {
return Events{c}
}
func (c Client) Mail() Mail {
return Mail{c}
}
// ---------------------------------------------------------------------------
// helper funcs
// ---------------------------------------------------------------------------

View File

@ -17,6 +17,11 @@ import (
// controller
// ---------------------------------------------------------------------------
func (c Client) Contacts() Contacts {
return Contacts{c}
}
// Contacts is an interface-compliant provider of the client.
type Contacts struct {
Client
}
@ -147,6 +152,30 @@ func (c Contacts) EnumerateContainers(
return errs.ErrorOrNil()
}
// ---------------------------------------------------------------------------
// item pager
// ---------------------------------------------------------------------------
var _ itemPager = &contactPager{}
type contactPager struct {
gs graph.Servicer
builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder
options *users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration
}
func (p *contactPager) getPage(ctx context.Context) (pageLinker, error) {
return p.builder.Get(ctx, p.options)
}
func (p *contactPager) setNext(nextLink string) {
p.builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(nextLink, p.gs.Adapter())
}
func (p *contactPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) {
return toValues[models.Contactable](pl)
}
func (c Contacts) GetAddedAndRemovedItemIDs(
ctx context.Context,
user, directoryID, oldDelta string,
@ -158,9 +187,6 @@ func (c Contacts) GetAddedAndRemovedItemIDs(
var (
errs *multierror.Error
ids []string
removedIDs []string
deltaURL string
resetDelta bool
)
@ -169,63 +195,17 @@ func (c Contacts) GetAddedAndRemovedItemIDs(
return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options")
}
getIDs := func(builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder) error {
for {
resp, err := builder.Get(ctx, options)
if err != nil {
if err := graph.IsErrDeletedInFlight(err); err != nil {
return err
}
if err := graph.IsErrInvalidDelta(err); err != nil {
return err
}
return errors.Wrap(err, support.ConnectorStackErrorTrace(err))
}
for _, item := range resp.GetValue() {
if item.GetId() == nil {
errs = multierror.Append(
errs,
errors.Errorf("item with nil ID in folder %s", directoryID),
)
// TODO(ashmrtn): Handle fail-fast.
continue
}
if item.GetAdditionalData()[graph.AddtlDataRemoved] == nil {
ids = append(ids, *item.GetId())
} else {
removedIDs = append(removedIDs, *item.GetId())
}
}
delta := resp.GetOdataDeltaLink()
if delta != nil && len(*delta) > 0 {
deltaURL = *delta
}
nextLink := resp.GetOdataNextLink()
if nextLink == nil || len(*nextLink) == 0 {
break
}
builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(*nextLink, service.Adapter())
}
return nil
}
if len(oldDelta) > 0 {
err := getIDs(users.NewItemContactFoldersItemContactsDeltaRequestBuilder(oldDelta, service.Adapter()))
builder := users.NewItemContactFoldersItemContactsDeltaRequestBuilder(oldDelta, service.Adapter())
pgr := &contactPager{service, builder, options}
added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr)
// note: happy path, not the error condition
if err == nil {
return ids, removedIDs, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil()
return added, removed, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil()
}
// only return on error if it is NOT a delta issue.
// otherwise we'll retry the call with the regular builder
// on bad deltas we retry the call with the regular builder
if graph.IsErrInvalidDelta(err) == nil {
return nil, nil, DeltaUpdate{}, err
}
@ -234,15 +214,13 @@ func (c Contacts) GetAddedAndRemovedItemIDs(
errs = nil
}
builder := service.Client().
UsersById(user).
ContactFoldersById(directoryID).
Contacts().
Delta()
builder := service.Client().UsersById(user).ContactFoldersById(directoryID).Contacts().Delta()
pgr := &contactPager{service, builder, options}
if err := getIDs(builder); err != nil {
added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr)
if err != nil {
return nil, nil, DeltaUpdate{}, err
}
return ids, removedIDs, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil()
return added, removed, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil()
}

View File

@ -18,6 +18,11 @@ import (
// controller
// ---------------------------------------------------------------------------
func (c Client) Events() Events {
return Events{c}
}
// Events is an interface-compliant provider of the client.
type Events struct {
Client
}
@ -124,6 +129,39 @@ func (c Events) EnumerateContainers(
return errs.ErrorOrNil()
}
// ---------------------------------------------------------------------------
// item pager
// ---------------------------------------------------------------------------
type eventWrapper struct {
models.EventCollectionResponseable
}
func (ew eventWrapper) GetOdataDeltaLink() *string {
return nil
}
var _ itemPager = &eventPager{}
type eventPager struct {
gs graph.Servicer
builder *users.ItemCalendarsItemEventsRequestBuilder
options *users.ItemCalendarsItemEventsRequestBuilderGetRequestConfiguration
}
func (p *eventPager) getPage(ctx context.Context) (pageLinker, error) {
resp, err := p.builder.Get(ctx, p.options)
return eventWrapper{resp}, err
}
func (p *eventPager) setNext(nextLink string) {
p.builder = users.NewItemCalendarsItemEventsRequestBuilder(nextLink, p.gs.Adapter())
}
func (p *eventPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) {
return toValues[models.Eventable](pl)
}
func (c Events) GetAddedAndRemovedItemIDs(
ctx context.Context,
user, calendarID, oldDelta string,
@ -133,10 +171,7 @@ func (c Events) GetAddedAndRemovedItemIDs(
return nil, nil, DeltaUpdate{}, err
}
var (
errs *multierror.Error
ids []string
)
var errs *multierror.Error
options, err := optionsForEventsByCalendar([]string{"id"})
if err != nil {
@ -144,41 +179,15 @@ func (c Events) GetAddedAndRemovedItemIDs(
}
builder := service.Client().UsersById(user).CalendarsById(calendarID).Events()
pgr := &eventPager{service, builder, options}
for {
resp, err := builder.Get(ctx, options)
if err != nil {
if err := graph.IsErrDeletedInFlight(err); err != nil {
return nil, nil, DeltaUpdate{}, err
}
return nil, nil, DeltaUpdate{}, errors.Wrap(err, support.ConnectorStackErrorTrace(err))
}
for _, item := range resp.GetValue() {
if item.GetId() == nil {
errs = multierror.Append(
errs,
errors.Errorf("event with nil ID in calendar %s", calendarID),
)
// TODO(ashmrtn): Handle fail-fast.
continue
}
ids = append(ids, *item.GetId())
}
nextLink := resp.GetOdataNextLink()
if nextLink == nil || len(*nextLink) == 0 {
break
}
builder = users.NewItemCalendarsItemEventsRequestBuilder(*nextLink, service.Adapter())
added, _, _, err := getItemsAddedAndRemovedFromContainer(ctx, pgr)
if err != nil {
return nil, nil, DeltaUpdate{}, err
}
// Events don't have a delta endpoint so just return an empty string.
return ids, nil, DeltaUpdate{}, errs.ErrorOrNil()
return added, nil, DeltaUpdate{}, errs.ErrorOrNil()
}
// ---------------------------------------------------------------------------

View File

@ -17,6 +17,11 @@ import (
// controller
// ---------------------------------------------------------------------------
func (c Client) Mail() Mail {
return Mail{c}
}
// Mail is an interface-compliant provider of the client.
type Mail struct {
Client
}
@ -145,6 +150,30 @@ func (c Mail) EnumerateContainers(
return errs.ErrorOrNil()
}
// ---------------------------------------------------------------------------
// item pager
// ---------------------------------------------------------------------------
var _ itemPager = &mailPager{}
type mailPager struct {
gs graph.Servicer
builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder
options *users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration
}
func (p *mailPager) getPage(ctx context.Context) (pageLinker, error) {
return p.builder.Get(ctx, p.options)
}
func (p *mailPager) setNext(nextLink string) {
p.builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(nextLink, p.gs.Adapter())
}
func (p *mailPager) valuesIn(pl pageLinker) ([]getIDAndAddtler, error) {
return toValues[models.Messageable](pl)
}
func (c Mail) GetAddedAndRemovedItemIDs(
ctx context.Context,
user, directoryID, oldDelta string,
@ -156,8 +185,6 @@ func (c Mail) GetAddedAndRemovedItemIDs(
var (
errs *multierror.Error
ids []string
removedIDs []string
deltaURL string
resetDelta bool
)
@ -167,63 +194,17 @@ func (c Mail) GetAddedAndRemovedItemIDs(
return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options")
}
getIDs := func(builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder) error {
for {
resp, err := builder.Get(ctx, options)
if err != nil {
if err := graph.IsErrDeletedInFlight(err); err != nil {
return err
}
if err := graph.IsErrInvalidDelta(err); err != nil {
return err
}
return errors.Wrap(err, support.ConnectorStackErrorTrace(err))
}
for _, item := range resp.GetValue() {
if item.GetId() == nil {
errs = multierror.Append(
errs,
errors.Errorf("item with nil ID in folder %s", directoryID),
)
// TODO(ashmrtn): Handle fail-fast.
continue
}
if item.GetAdditionalData()[graph.AddtlDataRemoved] == nil {
ids = append(ids, *item.GetId())
} else {
removedIDs = append(removedIDs, *item.GetId())
}
}
delta := resp.GetOdataDeltaLink()
if delta != nil && len(*delta) > 0 {
deltaURL = *delta
}
nextLink := resp.GetOdataNextLink()
if nextLink == nil || len(*nextLink) == 0 {
break
}
builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(*nextLink, service.Adapter())
}
return nil
}
if len(oldDelta) > 0 {
err := getIDs(users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, service.Adapter()))
builder := users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, service.Adapter())
pgr := &mailPager{service, builder, options}
added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr)
// note: happy path, not the error condition
if err == nil {
return ids, removedIDs, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil()
return added, removed, DeltaUpdate{deltaURL, false}, errs.ErrorOrNil()
}
// only return on error if it is NOT a delta issue.
// otherwise we'll retry the call with the regular builder
// on bad deltas we retry the call with the regular builder
if graph.IsErrInvalidDelta(err) == nil {
return nil, nil, DeltaUpdate{}, err
}
@ -233,10 +214,12 @@ func (c Mail) GetAddedAndRemovedItemIDs(
}
builder := service.Client().UsersById(user).MailFoldersById(directoryID).Messages().Delta()
pgr := &mailPager{service, builder, options}
if err := getIDs(builder); err != nil {
added, removed, deltaURL, err := getItemsAddedAndRemovedFromContainer(ctx, pgr)
if err != nil {
return nil, nil, DeltaUpdate{}, err
}
return ids, removedIDs, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil()
return added, removed, DeltaUpdate{deltaURL, resetDelta}, errs.ErrorOrNil()
}

View File

@ -0,0 +1,126 @@
package api
import (
"context"
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/support"
)
// ---------------------------------------------------------------------------
// generic handler for paging item ids in a container
// ---------------------------------------------------------------------------
type itemPager interface {
getPage(context.Context) (pageLinker, error)
setNext(string)
valuesIn(pageLinker) ([]getIDAndAddtler, error)
}
type pageLinker interface {
GetOdataDeltaLink() *string
GetOdataNextLink() *string
}
type getIDAndAddtler interface {
GetId() *string
GetAdditionalData() map[string]any
}
// uses a models interface compliant with { GetValues() []T }
// to transform its results into a slice of getIDer interfaces.
// Generics used here to handle the variation of msoft interfaces
// that all _almost_ comply with GetValue, but all return a different
// interface.
func toValues[T any](a any) ([]getIDAndAddtler, error) {
gv, ok := a.(interface{ GetValue() []T })
if !ok {
return nil, errors.Errorf("response of type [%T] does not comply with the GetValue() interface", a)
}
items := gv.GetValue()
r := make([]getIDAndAddtler, 0, len(items))
for _, item := range items {
var a any = item
ri, ok := a.(getIDAndAddtler)
if !ok {
return nil, errors.Errorf("item of type [%T] does not comply with the getIDAndAddtler interface", item)
}
r = append(r, ri)
}
return r, nil
}
// generic controller for retrieving all item ids in a container.
func getItemsAddedAndRemovedFromContainer(
ctx context.Context,
pager itemPager,
) ([]string, []string, string, error) {
var (
addedIDs = []string{}
removedIDs = []string{}
deltaURL string
)
for {
// get the next page of data, check for standard errors
resp, err := pager.getPage(ctx)
if err != nil {
if err := graph.IsErrDeletedInFlight(err); err != nil {
return nil, nil, deltaURL, err
}
if err := graph.IsErrInvalidDelta(err); err != nil {
return nil, nil, deltaURL, err
}
return nil, nil, deltaURL, errors.Wrap(err, support.ConnectorStackErrorTrace(err))
}
// each category type responds with a different interface, but all
// of them comply with GetValue, which is where we'll get our item data.
items, err := pager.valuesIn(resp)
if err != nil {
return nil, nil, "", err
}
// iterate through the items in the page
for _, item := range items {
// if the additional data conains a `@removed` key, the value will either
// be 'changed' or 'deleted'. We don't really care about the cause: both
// cases are handled the same way in storage.
if item.GetAdditionalData()[graph.AddtlDataRemoved] == nil {
addedIDs = append(addedIDs, *item.GetId())
} else {
removedIDs = append(removedIDs, *item.GetId())
}
}
// the deltaLink is kind of like a cursor for overall data state.
// once we run through pages of nextLinks, the last query will
// produce a deltaLink instead (if supported), which we'll use on
// the next backup to only get the changes since this run.
delta := resp.GetOdataDeltaLink()
if delta != nil && len(*delta) > 0 {
deltaURL = *delta
}
// the nextLink is our page cursor within this query.
// if we have more data to retrieve, we'll have a
// nextLink instead of a deltaLink.
nextLink := resp.GetOdataNextLink()
if nextLink == nil || len(*nextLink) == 0 {
break
}
pager.setNext(*nextLink)
}
return addedIDs, removedIDs, deltaURL, nil
}

View File

@ -19,24 +19,26 @@ import (
// mocks and helpers
// ---------------------------------------------------------------------------
var _ graph.CachedContainer = &mockContainer{}
type mockContainer struct {
id *string
name *string
parentID *string
id *string
displayName *string
parentID *string
p *path.Builder
}
//nolint:revive
func (m mockContainer) GetId() *string {
return m.id
}
func (m mockContainer) GetDisplayName() *string {
return m.name
}
func (m mockContainer) GetId() *string { return m.id }
//nolint:revive
func (m mockContainer) GetParentFolderId() *string {
return m.parentID
func (m mockContainer) GetParentFolderId() *string { return m.parentID }
func (m mockContainer) GetDisplayName() *string { return m.displayName }
func (m mockContainer) Path() *path.Builder { return m.p }
func (m mockContainer) SetPath(p *path.Builder) {}
func strPtr(s string) *string {
return &s
}
// ---------------------------------------------------------------------------
@ -67,45 +69,45 @@ var (
{
name: "NilID",
c: mockContainer{
id: nil,
name: &testName,
parentID: &testParentID,
id: nil,
displayName: &testName,
parentID: &testParentID,
},
check: assert.Error,
},
{
name: "NilDisplayName",
c: mockContainer{
id: &testID,
name: nil,
parentID: &testParentID,
id: &testID,
displayName: nil,
parentID: &testParentID,
},
check: assert.Error,
},
{
name: "EmptyID",
c: mockContainer{
id: &emptyString,
name: &testName,
parentID: &testParentID,
id: &emptyString,
displayName: &testName,
parentID: &testParentID,
},
check: assert.Error,
},
{
name: "EmptyDisplayName",
c: mockContainer{
id: &testID,
name: &emptyString,
parentID: &testParentID,
id: &testID,
displayName: &emptyString,
parentID: &testParentID,
},
check: assert.Error,
},
{
name: "AllValues",
c: mockContainer{
id: &testID,
name: &testName,
parentID: &testParentID,
id: &testID,
displayName: &testName,
parentID: &testParentID,
},
check: assert.NoError,
},
@ -125,18 +127,18 @@ func (suite *FolderCacheUnitSuite) TestCheckRequiredValues() {
{
name: "NilParentFolderID",
c: mockContainer{
id: &testID,
name: &testName,
parentID: nil,
id: &testID,
displayName: &testName,
parentID: nil,
},
check: assert.Error,
},
{
name: "EmptyParentFolderID",
c: mockContainer{
id: &testID,
name: &testName,
parentID: &emptyString,
id: &testID,
displayName: &testName,
parentID: &emptyString,
},
check: assert.Error,
},
@ -161,9 +163,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() {
name: "NoParentNoPath",
cf: graph.NewCacheFolder(
&mockContainer{
id: &testID,
name: &testName,
parentID: nil,
id: &testID,
displayName: &testName,
parentID: nil,
},
nil,
),
@ -173,9 +175,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() {
name: "NoParentPath",
cf: graph.NewCacheFolder(
&mockContainer{
id: &testID,
name: &testName,
parentID: nil,
id: &testID,
displayName: &testName,
parentID: nil,
},
path.Builder{}.Append("foo"),
),
@ -185,9 +187,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() {
name: "NoName",
cf: graph.NewCacheFolder(
&mockContainer{
id: &testID,
name: nil,
parentID: &testParentID,
id: &testID,
displayName: nil,
parentID: &testParentID,
},
path.Builder{}.Append("foo"),
),
@ -197,9 +199,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() {
name: "NoID",
cf: graph.NewCacheFolder(
&mockContainer{
id: nil,
name: &testName,
parentID: &testParentID,
id: nil,
displayName: &testName,
parentID: &testParentID,
},
path.Builder{}.Append("foo"),
),
@ -209,9 +211,9 @@ func (suite *FolderCacheUnitSuite) TestAddFolder() {
name: "NoPath",
cf: graph.NewCacheFolder(
&mockContainer{
id: &testID,
name: &testName,
parentID: &testParentID,
id: &testID,
displayName: &testName,
parentID: &testParentID,
},
nil,
),

View File

@ -175,7 +175,6 @@ func DataCollections(
var (
user = selector.DiscreteOwner
scopes = eb.DiscreteScopes([]string{user})
collections = []data.Collection{}
errs error
)
@ -185,7 +184,7 @@ func DataCollections(
return nil, err
}
for _, scope := range scopes {
for _, scope := range eb.Scopes() {
dps := cdps[scope.Category().PathType()]
dcs, err := createCollections(

View File

@ -525,14 +525,16 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression(
expected: DefaultCalendar,
scope: selectors.NewExchangeBackup(users).EventCalendars(
[]string{DefaultCalendar},
selectors.PrefixMatch())[0],
selectors.PrefixMatch(),
)[0],
},
{
name: "Birthday Calendar",
expected: "Birthdays",
scope: selectors.NewExchangeBackup(users).EventCalendars(
[]string{"Birthdays"},
selectors.PrefixMatch())[0],
selectors.PrefixMatch(),
)[0],
},
}

View File

@ -95,18 +95,18 @@ func filterContainersAndFillCollections(
added, removed, newDelta, err := getter.GetAddedAndRemovedItemIDs(ctx, qp.ResourceOwner, cID, prevDelta)
if err != nil {
// note == nil check; only catches non-inFlight error cases.
if graph.IsErrDeletedInFlight(err) == nil {
errs = support.WrapAndAppend(qp.ResourceOwner, err, errs)
} else {
// race conditions happen, containers might get deleted while
// this process is in flight. If that happens, force the collection
// to reset which will prevent any old items from being retained in
// storage. If the container (or its children) are sill missing
// on the next backup, they'll get tombstoned.
newDelta = api.DeltaUpdate{Reset: true}
continue
}
continue
// race conditions happen, containers might get deleted while
// this process is in flight. If that happens, force the collection
// to reset. This prevents any old items from being retained in
// storage. If the container (or its children) are sill missing
// on the next backup, they'll get tombstoned.
newDelta = api.DeltaUpdate{Reset: true}
}
if len(newDelta.URL) > 0 {

View File

@ -0,0 +1,677 @@
package exchange
import (
"context"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/connector/exchange/api"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
// ---------------------------------------------------------------------------
// mocks
// ---------------------------------------------------------------------------
var _ addedAndRemovedItemIDsGetter = &mockGetter{}
type (
mockGetter map[string]mockGetterResults
mockGetterResults struct {
added []string
removed []string
newDelta api.DeltaUpdate
err error
}
)
func (mg mockGetter) GetAddedAndRemovedItemIDs(
ctx context.Context,
userID, cID, prevDelta string,
) (
[]string,
[]string,
api.DeltaUpdate,
error,
) {
results, ok := mg[cID]
if !ok {
return nil, nil, api.DeltaUpdate{}, errors.New("mock not found for " + cID)
}
return results.added, results.removed, results.newDelta, results.err
}
var _ graph.ContainerResolver = &mockResolver{}
type (
mockResolver struct {
items []graph.CachedContainer
}
)
func newMockResolver(items ...mockContainer) mockResolver {
is := make([]graph.CachedContainer, 0, len(items))
for _, i := range items {
is = append(is, i)
}
return mockResolver{items: is}
}
func (m mockResolver) Items() []graph.CachedContainer {
return m.items
}
func (m mockResolver) AddToCache(context.Context, graph.Container) error { return nil }
func (m mockResolver) IDToPath(context.Context, string) (*path.Builder, error) { return nil, nil }
func (m mockResolver) PathInCache(string) (string, bool) { return "", false }
func (m mockResolver) Populate(context.Context, string, ...string) error { return nil }
// ---------------------------------------------------------------------------
// tests
// ---------------------------------------------------------------------------
type ServiceIteratorsSuite struct {
suite.Suite
creds account.M365Config
}
func TestServiceIteratorsSuite(t *testing.T) {
suite.Run(t, new(ServiceIteratorsSuite))
}
func (suite *ServiceIteratorsSuite) SetupSuite() {
a := tester.NewMockM365Account(suite.T())
m365, err := a.M365Config()
require.NoError(suite.T(), err)
suite.creds = m365
}
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections() {
var (
userID = "user_id"
qp = graph.QueryParams{
Category: path.EmailCategory, // doesn't matter which one we use.
ResourceOwner: userID,
Credentials: suite.creds,
}
statusUpdater = func(*support.ConnectorOperationStatus) {}
allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0]
dps = DeltaPaths{} // incrementals are tested separately
commonResult = mockGetterResults{
added: []string{"a1", "a2", "a3"},
removed: []string{"r1", "r2", "r3"},
newDelta: api.DeltaUpdate{URL: "delta_url"},
}
errorResult = mockGetterResults{
added: []string{"a1", "a2", "a3"},
removed: []string{"r1", "r2", "r3"},
newDelta: api.DeltaUpdate{URL: "delta_url"},
err: assert.AnError,
}
deletedInFlightResult = mockGetterResults{
added: []string{"a1", "a2", "a3"},
removed: []string{"r1", "r2", "r3"},
newDelta: api.DeltaUpdate{URL: "delta_url"},
err: graph.ErrDeletedInFlight{Err: *common.EncapsulateError(assert.AnError)},
}
container1 = mockContainer{
id: strPtr("1"),
displayName: strPtr("display_name_1"),
p: path.Builder{}.Append("display_name_1"),
}
container2 = mockContainer{
id: strPtr("2"),
displayName: strPtr("display_name_2"),
p: path.Builder{}.Append("display_name_2"),
}
)
table := []struct {
name string
getter mockGetter
resolver graph.ContainerResolver
scope selectors.ExchangeScope
failFast bool
expectErr assert.ErrorAssertionFunc
expectNewColls int
expectMetadataColls int
expectDoNotMergeColls int
}{
{
name: "happy path, one container",
getter: map[string]mockGetterResults{
"1": commonResult,
},
resolver: newMockResolver(container1),
scope: allScope,
expectErr: assert.NoError,
expectNewColls: 1,
expectMetadataColls: 1,
},
{
name: "happy path, many containers",
getter: map[string]mockGetterResults{
"1": commonResult,
"2": commonResult,
},
resolver: newMockResolver(container1, container2),
scope: allScope,
expectErr: assert.NoError,
expectNewColls: 2,
expectMetadataColls: 1,
},
{
name: "happy path, many containers, same display name",
getter: map[string]mockGetterResults{
"1": commonResult,
"2": commonResult,
},
resolver: newMockResolver(container1, container2),
scope: allScope,
expectErr: assert.NoError,
expectNewColls: 2,
expectMetadataColls: 1,
},
{
name: "no containers pass scope",
getter: map[string]mockGetterResults{
"1": commonResult,
"2": commonResult,
},
resolver: newMockResolver(container1, container2),
scope: selectors.NewExchangeBackup(nil).MailFolders(selectors.None())[0],
expectErr: assert.NoError,
expectNewColls: 0,
expectMetadataColls: 1,
},
{
name: "err: deleted in flight",
getter: map[string]mockGetterResults{
"1": deletedInFlightResult,
},
resolver: newMockResolver(container1),
scope: allScope,
expectErr: assert.NoError,
expectNewColls: 1,
expectMetadataColls: 1,
expectDoNotMergeColls: 1,
},
{
name: "err: other error",
getter: map[string]mockGetterResults{
"1": errorResult,
},
resolver: newMockResolver(container1),
scope: allScope,
expectErr: assert.Error,
expectNewColls: 0,
expectMetadataColls: 1,
},
{
name: "half collections error: deleted in flight",
getter: map[string]mockGetterResults{
"1": deletedInFlightResult,
"2": commonResult,
},
resolver: newMockResolver(container1, container2),
scope: allScope,
expectErr: assert.NoError,
expectNewColls: 2,
expectMetadataColls: 1,
expectDoNotMergeColls: 1,
},
{
name: "half collections error: other error",
getter: map[string]mockGetterResults{
"1": errorResult,
"2": commonResult,
},
resolver: newMockResolver(container1, container2),
scope: allScope,
expectErr: assert.Error,
expectNewColls: 1,
expectMetadataColls: 1,
},
{
name: "half collections error: deleted in flight, fail fast",
getter: map[string]mockGetterResults{
"1": deletedInFlightResult,
"2": commonResult,
},
resolver: newMockResolver(container1, container2),
scope: allScope,
failFast: true,
expectErr: assert.NoError,
expectNewColls: 2,
expectMetadataColls: 1,
expectDoNotMergeColls: 1,
},
{
name: "half collections error: other error, fail fast",
getter: map[string]mockGetterResults{
"1": errorResult,
"2": commonResult,
},
resolver: newMockResolver(container1, container2),
scope: allScope,
failFast: true,
expectErr: assert.Error,
expectNewColls: 0,
expectMetadataColls: 0,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
ctx, flush := tester.NewContext()
defer flush()
collections := map[string]data.Collection{}
err := filterContainersAndFillCollections(
ctx,
qp,
test.getter,
collections,
statusUpdater,
test.resolver,
test.scope,
dps,
control.Options{FailFast: test.failFast},
)
test.expectErr(t, err)
// collection assertions
deleteds, news, metadatas, doNotMerges := 0, 0, 0, 0
for _, c := range collections {
if c.FullPath().Service() == path.ExchangeMetadataService {
metadatas++
continue
}
if c.State() == data.DeletedState {
deleteds++
}
if c.State() == data.NewState {
news++
}
if c.DoNotMergeItems() {
doNotMerges++
}
}
assert.Zero(t, deleteds, "deleted collections")
assert.Equal(t, test.expectNewColls, news, "new collections")
assert.Equal(t, test.expectMetadataColls, metadatas, "metadata collections")
assert.Equal(t, test.expectDoNotMergeColls, doNotMerges, "doNotMerge collections")
// items in collections assertions
for k, expect := range test.getter {
coll := collections[k]
if coll == nil {
continue
}
exColl, ok := coll.(*Collection)
require.True(t, ok, "collection is an *exchange.Collection")
assert.ElementsMatch(t, expect.added, exColl.added, "added items")
assert.ElementsMatch(t, expect.removed, exColl.removed, "removed items")
}
})
}
}
func (suite *ServiceIteratorsSuite) TestFilterContainersAndFillCollections_incrementals() {
var (
userID = "user_id"
tenantID = suite.creds.AzureTenantID
cat = path.EmailCategory // doesn't matter which one we use,
qp = graph.QueryParams{
Category: cat,
ResourceOwner: userID,
Credentials: suite.creds,
}
statusUpdater = func(*support.ConnectorOperationStatus) {}
allScope = selectors.NewExchangeBackup(nil).MailFolders(selectors.Any())[0]
commonResults = mockGetterResults{
added: []string{"added"},
newDelta: api.DeltaUpdate{URL: "new_delta_url"},
}
expiredResults = mockGetterResults{
added: []string{"added"},
newDelta: api.DeltaUpdate{
URL: "new_delta_url",
Reset: true,
},
}
)
prevPath := func(t *testing.T, at ...string) path.Path {
p, err := path.Builder{}.
Append(at...).
ToDataLayerExchangePathForCategory(tenantID, userID, cat, false)
require.NoError(t, err)
return p
}
type endState struct {
state data.CollectionState
doNotMerge bool
}
table := []struct {
name string
getter mockGetter
resolver graph.ContainerResolver
dps DeltaPaths
expect map[string]endState
}{
{
name: "new container",
getter: map[string]mockGetterResults{
"1": commonResults,
},
resolver: newMockResolver(mockContainer{
id: strPtr("1"),
displayName: strPtr("new"),
p: path.Builder{}.Append("1", "new"),
}),
dps: DeltaPaths{},
expect: map[string]endState{
"1": {data.NewState, false},
},
},
{
name: "not moved container",
getter: map[string]mockGetterResults{
"1": commonResults,
},
resolver: newMockResolver(mockContainer{
id: strPtr("1"),
displayName: strPtr("not_moved"),
p: path.Builder{}.Append("1", "not_moved"),
}),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "1", "not_moved").String(),
},
},
expect: map[string]endState{
"1": {data.NotMovedState, false},
},
},
{
name: "moved container",
getter: map[string]mockGetterResults{
"1": commonResults,
},
resolver: newMockResolver(mockContainer{
id: strPtr("1"),
displayName: strPtr("moved"),
p: path.Builder{}.Append("1", "moved"),
}),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "1", "prev").String(),
},
},
expect: map[string]endState{
"1": {data.MovedState, false},
},
},
{
name: "deleted container",
getter: map[string]mockGetterResults{},
resolver: newMockResolver(),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "1", "deleted").String(),
},
},
expect: map[string]endState{
"1": {data.DeletedState, false},
},
},
{
name: "one deleted, one new",
getter: map[string]mockGetterResults{
"2": commonResults,
},
resolver: newMockResolver(mockContainer{
id: strPtr("2"),
displayName: strPtr("new"),
p: path.Builder{}.Append("2", "new"),
}),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "1", "deleted").String(),
},
},
expect: map[string]endState{
"1": {data.DeletedState, false},
"2": {data.NewState, false},
},
},
{
name: "one deleted, one new, same path",
getter: map[string]mockGetterResults{
"2": commonResults,
},
resolver: newMockResolver(mockContainer{
id: strPtr("2"),
displayName: strPtr("same"),
p: path.Builder{}.Append("2", "same"),
}),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "1", "same").String(),
},
},
expect: map[string]endState{
"1": {data.DeletedState, false},
"2": {data.NewState, false},
},
},
{
name: "one moved, one new, same path",
getter: map[string]mockGetterResults{
"1": commonResults,
"2": commonResults,
},
resolver: newMockResolver(
mockContainer{
id: strPtr("1"),
displayName: strPtr("moved"),
p: path.Builder{}.Append("1", "moved"),
},
mockContainer{
id: strPtr("2"),
displayName: strPtr("prev"),
p: path.Builder{}.Append("2", "prev"),
},
),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "1", "prev").String(),
},
},
expect: map[string]endState{
"1": {data.MovedState, false},
"2": {data.NewState, false},
},
},
{
name: "bad previous path strings",
getter: map[string]mockGetterResults{
"1": commonResults,
},
resolver: newMockResolver(mockContainer{
id: strPtr("1"),
displayName: strPtr("not_moved"),
p: path.Builder{}.Append("1", "not_moved"),
}),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: "1/fnords/mc/smarfs",
},
"2": DeltaPath{
delta: "old_delta_url",
path: "2/fnords/mc/smarfs",
},
},
expect: map[string]endState{
"1": {data.NewState, false},
},
},
{
name: "delta expiration",
getter: map[string]mockGetterResults{
"1": expiredResults,
},
resolver: newMockResolver(mockContainer{
id: strPtr("1"),
displayName: strPtr("same"),
p: path.Builder{}.Append("1", "same"),
}),
dps: DeltaPaths{
"1": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "1", "same").String(),
},
},
expect: map[string]endState{
"1": {data.NotMovedState, true},
},
},
{
name: "a little bit of everything",
getter: map[string]mockGetterResults{
"1": commonResults, // new
"2": commonResults, // notMoved
"3": commonResults, // moved
"4": expiredResults, // moved
// "5" gets deleted
},
resolver: newMockResolver(
mockContainer{
id: strPtr("1"),
displayName: strPtr("new"),
p: path.Builder{}.Append("1", "new"),
},
mockContainer{
id: strPtr("2"),
displayName: strPtr("not_moved"),
p: path.Builder{}.Append("2", "not_moved"),
},
mockContainer{
id: strPtr("3"),
displayName: strPtr("moved"),
p: path.Builder{}.Append("3", "moved"),
},
mockContainer{
id: strPtr("4"),
displayName: strPtr("moved"),
p: path.Builder{}.Append("4", "moved"),
},
),
dps: DeltaPaths{
"2": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "2", "not_moved").String(),
},
"3": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "3", "prev").String(),
},
"4": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "4", "prev").String(),
},
"5": DeltaPath{
delta: "old_delta_url",
path: prevPath(suite.T(), "5", "deleted").String(),
},
},
expect: map[string]endState{
"1": {data.NewState, false},
"2": {data.NotMovedState, false},
"3": {data.MovedState, false},
"4": {data.MovedState, true},
"5": {data.DeletedState, false},
},
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
ctx, flush := tester.NewContext()
defer flush()
collections := map[string]data.Collection{}
err := filterContainersAndFillCollections(
ctx,
qp,
test.getter,
collections,
statusUpdater,
test.resolver,
allScope,
test.dps,
control.Options{},
)
assert.NoError(t, err)
metadatas := 0
for _, c := range collections {
p := c.FullPath()
if p == nil {
p = c.PreviousPath()
}
require.NotNil(t, p)
if p.Service() == path.ExchangeMetadataService {
metadatas++
continue
}
p0 := p.Folders()[0]
expect, ok := test.expect[p0]
assert.True(t, ok, "collection is expected in result")
assert.Equalf(t, expect.state, c.State(), "collection %s state", p0)
assert.Equalf(t, expect.doNotMerge, c.DoNotMergeItems(), "collection %s DoNotMergeItems", p0)
}
assert.Equal(t, 1, metadatas, "metadata collections")
})
}
}

View File

@ -24,7 +24,7 @@ func TestGraphUnitSuite(t *testing.T) {
func (suite *GraphUnitSuite) SetupSuite() {
t := suite.T()
a := tester.NewM365Account(t)
a := tester.NewMockM365Account(t)
m365, err := a.M365Config()
require.NoError(t, err)

View File

@ -167,30 +167,6 @@ func (suite *DisconnectedGraphConnectorSuite) TestGraphConnector_ErrorChecking()
}
}
func (suite *DisconnectedGraphConnectorSuite) TestRestoreFailsBadService() {
ctx, flush := tester.NewContext()
defer flush()
var (
t = suite.T()
acct = tester.NewM365Account(t)
dest = tester.DefaultTestRestoreDestination()
gc = GraphConnector{wg: &sync.WaitGroup{}}
sel = selectors.Selector{
Service: selectors.ServiceUnknown,
}
)
deets, err := gc.RestoreDataCollections(ctx, acct, sel, dest, nil)
assert.Error(t, err)
assert.NotNil(t, deets)
status := gc.AwaitStatus()
assert.Equal(t, 0, status.ObjectCount)
assert.Equal(t, 0, status.FolderCount)
assert.Equal(t, 0, status.Successful)
}
func (suite *DisconnectedGraphConnectorSuite) TestVerifyBackupInputs() {
users := []string{
"elliotReid@someHospital.org",

View File

@ -212,6 +212,29 @@ func (suite *GraphConnectorIntegrationSuite) TestSetTenantSites() {
}
}
func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() {
ctx, flush := tester.NewContext()
defer flush()
var (
t = suite.T()
acct = tester.NewM365Account(t)
dest = tester.DefaultTestRestoreDestination()
sel = selectors.Selector{
Service: selectors.ServiceUnknown,
}
)
deets, err := suite.connector.RestoreDataCollections(ctx, acct, sel, dest, nil)
assert.Error(t, err)
assert.NotNil(t, deets)
status := suite.connector.AwaitStatus()
assert.Equal(t, 0, status.ObjectCount)
assert.Equal(t, 0, status.FolderCount)
assert.Equal(t, 0, status.Successful)
}
func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
dest := tester.DefaultTestRestoreDestination()
table := []struct {

View File

@ -94,7 +94,7 @@ func driveItemReader(
// doesn't have its size value updated as a side effect of creation,
// and kiota drops any SetSize update.
func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDriveInfo {
email := ""
var email, parent string
if di.GetCreatedBy().GetUser() != nil {
// User is sometimes not available when created via some
@ -105,13 +105,21 @@ func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDrive
}
}
if di.GetParentReference() != nil {
if di.GetParentReference().GetName() != nil {
// EndPoint is not always populated from external apps
parent = *di.GetParentReference().GetName()
}
}
return &details.OneDriveInfo{
ItemType: details.OneDriveItem,
ItemName: *di.GetName(),
Created: *di.GetCreatedDateTime(),
Modified: *di.GetLastModifiedDateTime(),
Size: itemSize,
Owner: email,
ItemType: details.OneDriveItem,
ItemName: *di.GetName(),
Created: *di.GetCreatedDateTime(),
Modified: *di.GetLastModifiedDateTime(),
DriveName: parent,
Size: itemSize,
Owner: email,
}
}

View File

@ -25,6 +25,12 @@ type SharePointCollectionSuite struct {
}
func TestSharePointCollectionSuite(t *testing.T) {
tester.RunOnAny(
t,
tester.CorsoCITests,
tester.CorsoGraphConnectorTests,
tester.CorsoGraphConnectorSharePointTests)
suite.Run(t, new(SharePointCollectionSuite))
}

View File

@ -32,6 +32,13 @@ type Reason struct {
Category path.CategoryType
}
func (r Reason) TagKeys() []string {
return []string{
r.ResourceOwner,
serviceCatString(r.Service, r.Category),
}
}
type ManifestEntry struct {
*snapshot.Manifest
// Reason contains the ResourceOwners and Service/Categories that caused this
@ -44,6 +51,13 @@ type ManifestEntry struct {
Reasons []Reason
}
func (me ManifestEntry) GetTag(key string) (string, bool) {
k, _ := makeTagKV(key)
v, ok := me.Tags[k]
return v, ok
}
type snapshotManager interface {
FindManifests(
ctx context.Context,
@ -68,6 +82,10 @@ func MakeServiceCat(s path.ServiceType, c path.CategoryType) (string, ServiceCat
return serviceCatString(s, c), ServiceCat{s, c}
}
// TODO(ashmrtn): Remove in a future PR.
//
//nolint:unused
//lint:ignore U1000 will be removed in future PR.
func serviceCatTag(p path.Path) string {
return serviceCatString(p.Service(), p.Category())
}
@ -82,13 +100,17 @@ func serviceCatString(s path.ServiceType, c path.CategoryType) string {
// Returns the normalized Key plus a default value. If you're embedding a
// key-only tag, the returned default value msut be used instead of an
// empty string.
func MakeTagKV(k string) (string, string) {
func makeTagKV(k string) (string, string) {
return userTagPrefix + k, defaultTagValue
}
// tagsFromStrings returns a map[string]string with tags for all ownersCats
// passed in. Currently uses placeholder values for each tag because there can
// be multiple instances of resource owners and categories in a single snapshot.
// TODO(ashmrtn): Remove in future PR.
//
//nolint:unused
//lint:ignore U1000 will be removed in future PR.
func tagsFromStrings(oc *OwnersCats) map[string]string {
if oc == nil {
return map[string]string{}
@ -97,12 +119,12 @@ func tagsFromStrings(oc *OwnersCats) map[string]string {
res := make(map[string]string, len(oc.ServiceCats)+len(oc.ResourceOwners))
for k := range oc.ServiceCats {
tk, tv := MakeTagKV(k)
tk, tv := makeTagKV(k)
res[tk] = tv
}
for k := range oc.ResourceOwners {
tk, tv := MakeTagKV(k)
tk, tv := makeTagKV(k)
res[tk] = tv
}
@ -188,24 +210,17 @@ func fetchPrevManifests(
ctx context.Context,
sm snapshotManager,
foundMans map[manifest.ID]*ManifestEntry,
serviceCat ServiceCat,
resourceOwner string,
reason Reason,
tags map[string]string,
) ([]*ManifestEntry, error) {
tags = normalizeTagKVs(tags)
serviceCatKey, _ := MakeServiceCat(serviceCat.Service, serviceCat.Category)
allTags := normalizeTagKVs(map[string]string{
serviceCatKey: "",
resourceOwner: "",
})
allTags := map[string]string{}
for _, k := range reason.TagKeys() {
allTags[k] = ""
}
maps.Copy(allTags, tags)
reason := Reason{
ResourceOwner: resourceOwner,
Service: serviceCat.Service,
Category: serviceCat.Category,
}
allTags = normalizeTagKVs(allTags)
metas, err := sm.FindManifests(ctx, allTags)
if err != nil {
@ -275,59 +290,54 @@ func fetchPrevManifests(
func fetchPrevSnapshotManifests(
ctx context.Context,
sm snapshotManager,
oc *OwnersCats,
reasons []Reason,
tags map[string]string,
) []*ManifestEntry {
if oc == nil {
return nil
}
mans := map[manifest.ID]*ManifestEntry{}
// For each serviceCat/resource owner pair that we will be backing up, see if
// there's a previous incomplete snapshot and/or a previous complete snapshot
// we can pass in. Can be expanded to return more than the most recent
// snapshots, but may require more memory at runtime.
for _, serviceCat := range oc.ServiceCats {
for resourceOwner := range oc.ResourceOwners {
found, err := fetchPrevManifests(
ctx,
sm,
mans,
serviceCat,
resourceOwner,
tags,
for _, reason := range reasons {
found, err := fetchPrevManifests(
ctx,
sm,
mans,
reason,
tags,
)
if err != nil {
logger.Ctx(ctx).Warnw(
"fetching previous snapshot manifests for service/category/resource owner",
"error",
err,
"service",
reason.Service.String(),
"category",
reason.Category.String(),
)
if err != nil {
logger.Ctx(ctx).Warnw(
"fetching previous snapshot manifests for service/category/resource owner",
"error",
err,
"service/category",
serviceCat,
)
// Snapshot can still complete fine, just not as efficient.
// Snapshot can still complete fine, just not as efficient.
continue
}
// If we found more recent snapshots then add them.
for _, m := range found {
man := mans[m.ID]
if man == nil {
mans[m.ID] = m
continue
}
// If we found more recent snapshots then add them.
for _, m := range found {
found := mans[m.ID]
if found == nil {
mans[m.ID] = m
continue
}
// If the manifest already exists and it's incomplete then we should
// merge the reasons for consistency. This will become easier to handle
// once we update how checkpoint manifests are tagged.
if len(found.IncompleteReason) == 0 {
continue
}
found.Reasons = append(found.Reasons, m.Reasons...)
// If the manifest already exists and it's incomplete then we should
// merge the reasons for consistency. This will become easier to handle
// once we update how checkpoint manifests are tagged.
if len(man.IncompleteReason) == 0 {
continue
}
man.Reasons = append(man.Reasons, m.Reasons...)
}
}
@ -343,7 +353,7 @@ func normalizeTagKVs(tags map[string]string) map[string]string {
t2 := make(map[string]string, len(tags))
for k, v := range tags {
mk, mv := MakeTagKV(k)
mk, mv := makeTagKV(k)
if len(v) == 0 {
v = mv

View File

@ -29,39 +29,60 @@ var (
testID2 = manifest.ID("snap2")
testID3 = manifest.ID("snap3")
testMail = path.ExchangeService.String() + path.EmailCategory.String()
testMailServiceCat = ServiceCat{
Service: path.ExchangeService,
Category: path.EmailCategory,
}
testEvents = path.ExchangeService.String() + path.EventsCategory.String()
testEventsServiceCat = ServiceCat{
Service: path.ExchangeService,
Category: path.EventsCategory,
}
testMail = path.ExchangeService.String() + path.EmailCategory.String()
testEvents = path.ExchangeService.String() + path.EventsCategory.String()
testUser1 = "user1"
testUser2 = "user2"
testUser3 = "user3"
testAllUsersAllCats = &OwnersCats{
ResourceOwners: map[string]struct{}{
testUser1: {},
testUser2: {},
testUser3: {},
testAllUsersAllCats = []Reason{
{
ResourceOwner: testUser1,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
ServiceCats: map[string]ServiceCat{
testMail: testMailServiceCat,
testEvents: testEventsServiceCat,
{
ResourceOwner: testUser1,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
}
testAllUsersMail = &OwnersCats{
ResourceOwners: map[string]struct{}{
testUser1: {},
testUser2: {},
testUser3: {},
testAllUsersMail = []Reason{
{
ResourceOwner: testUser1,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
ServiceCats: map[string]ServiceCat{
testMail: testMailServiceCat,
{
ResourceOwner: testUser2,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: testUser3,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
}
)
@ -87,7 +108,7 @@ func newManifestInfo(
structTags := make(map[string]struct{}, len(tags))
for _, t := range tags {
tk, _ := MakeTagKV(t)
tk, _ := makeTagKV(t)
structTags[tk] = struct{}{}
}
@ -176,7 +197,7 @@ func TestSnapshotFetchUnitSuite(t *testing.T) {
func (suite *SnapshotFetchUnitSuite) TestFetchPrevSnapshots() {
table := []struct {
name string
input *OwnersCats
input []Reason
data []manifestInfo
// Use this to denote which manifests in data should be expected. Allows
// defining data in a table while not repeating things between data and
@ -813,7 +834,7 @@ func (suite *SnapshotFetchUnitSuite) TestFetchPrevSnapshots_customTags() {
table := []struct {
name string
input *OwnersCats
input []Reason
tags map[string]string
// Use this to denote which manifests in data should be expected. Allows
// defining data in a table while not repeating things between data and

View File

@ -119,8 +119,6 @@ func (w Wrapper) BackupCollections(
ctx context.Context,
previousSnapshots []IncrementalBase,
collections []data.Collection,
service path.ServiceType,
oc *OwnersCats,
tags map[string]string,
buildTreeWithBase bool,
) (*BackupStats, *details.Builder, map[string]path.Path, error) {
@ -158,7 +156,6 @@ func (w Wrapper) BackupCollections(
ctx,
previousSnapshots,
dirTree,
oc,
tags,
progress,
)
@ -173,7 +170,6 @@ func (w Wrapper) makeSnapshotWithRoot(
ctx context.Context,
prevSnapEntries []IncrementalBase,
root fs.Directory,
oc *OwnersCats,
addlTags map[string]string,
progress *corsoProgress,
) (*BackupStats, error) {
@ -231,9 +227,10 @@ func (w Wrapper) makeSnapshotWithRoot(
return err
}
man.Tags = tagsFromStrings(oc)
man.Tags = map[string]string{}
for k, v := range addlTags {
mk, mv := MakeTagKV(k)
mk, mv := makeTagKV(k)
if len(v) == 0 {
v = mv
@ -442,12 +439,12 @@ func (w Wrapper) DeleteSnapshot(
// normalized inside the func using MakeTagKV.
func (w Wrapper) FetchPrevSnapshotManifests(
ctx context.Context,
oc *OwnersCats,
reasons []Reason,
tags map[string]string,
) ([]*ManifestEntry, error) {
if w.c == nil {
return nil, errors.WithStack(errNotConnected)
}
return fetchPrevSnapshotManifests(ctx, w.c, oc, tags), nil
return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil
}

View File

@ -207,39 +207,35 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
),
}
k, v := MakeServiceCat(path.ExchangeService, path.EmailCategory)
oc := &OwnersCats{
ResourceOwners: map[string]struct{}{
testUser: {},
},
ServiceCats: map[string]ServiceCat{
k: v,
},
}
// tags that are expected to populate as a side effect
// of the backup process.
baseTagKeys := []string{
serviceCatTag(suite.testPath1),
suite.testPath1.ResourceOwner(),
serviceCatTag(suite.testPath2),
suite.testPath2.ResourceOwner(),
}
// tags that are supplied by the caller.
customTags := map[string]string{
// tags that are supplied by the caller. This includes basic tags to support
// lookups and extra tags the caller may want to apply.
tags := map[string]string{
"fnords": "smarf",
"brunhilda": "",
}
expectedTags := map[string]string{}
for _, k := range baseTagKeys {
tk, tv := MakeTagKV(k)
expectedTags[tk] = tv
reasons := []Reason{
{
ResourceOwner: suite.testPath1.ResourceOwner(),
Service: suite.testPath1.Service(),
Category: suite.testPath1.Category(),
},
{
ResourceOwner: suite.testPath2.ResourceOwner(),
Service: suite.testPath2.Service(),
Category: suite.testPath2.Category(),
},
}
maps.Copy(expectedTags, normalizeTagKVs(customTags))
for _, r := range reasons {
for _, k := range r.TagKeys() {
tags[k] = ""
}
}
expectedTags := map[string]string{}
maps.Copy(expectedTags, normalizeTagKVs(tags))
table := []struct {
name string
@ -266,9 +262,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections() {
suite.ctx,
prevSnaps,
collections,
path.ExchangeService,
oc,
customTags,
tags,
true,
)
assert.NoError(t, err)
@ -325,14 +319,15 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
w := &Wrapper{k}
mapK, mapV := MakeServiceCat(path.ExchangeService, path.EmailCategory)
oc := &OwnersCats{
ResourceOwners: map[string]struct{}{
testUser: {},
},
ServiceCats: map[string]ServiceCat{
mapK: mapV,
},
tags := map[string]string{}
reason := Reason{
ResourceOwner: testUser,
Service: path.ExchangeService,
Category: path.EmailCategory,
}
for _, k := range reason.TagKeys() {
tags[k] = ""
}
dc1 := mockconnector.NewMockExchangeCollection(suite.testPath1, 1)
@ -348,9 +343,7 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
ctx,
nil,
[]data.Collection{dc1, dc2},
path.ExchangeService,
oc,
nil,
tags,
true,
)
require.NoError(t, err)
@ -380,14 +373,15 @@ func (suite *KopiaIntegrationSuite) TestRestoreAfterCompressionChange() {
func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
t := suite.T()
k, v := MakeServiceCat(path.ExchangeService, path.EmailCategory)
oc := &OwnersCats{
ResourceOwners: map[string]struct{}{
testUser: {},
},
ServiceCats: map[string]ServiceCat{
k: v,
},
tags := map[string]string{}
reason := Reason{
ResourceOwner: testUser,
Service: path.ExchangeService,
Category: path.EmailCategory,
}
for _, k := range reason.TagKeys() {
tags[k] = ""
}
collections := []data.Collection{
@ -431,9 +425,7 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
suite.ctx,
nil,
collections,
path.ExchangeService,
oc,
nil,
tags,
true,
)
require.NoError(t, err)
@ -477,8 +469,6 @@ func (suite *KopiaIntegrationSuite) TestBackupCollectionsHandlesNoCollections()
ctx,
nil,
test.collections,
path.UnknownService,
&OwnersCats{},
nil,
true,
)
@ -622,23 +612,22 @@ func (suite *KopiaSimpleRepoIntegrationSuite) SetupTest() {
collections = append(collections, collection)
}
k, v := MakeServiceCat(path.ExchangeService, path.EmailCategory)
oc := &OwnersCats{
ResourceOwners: map[string]struct{}{
testUser: {},
},
ServiceCats: map[string]ServiceCat{
k: v,
},
tags := map[string]string{}
reason := Reason{
ResourceOwner: testUser,
Service: path.ExchangeService,
Category: path.EmailCategory,
}
for _, k := range reason.TagKeys() {
tags[k] = ""
}
stats, deets, _, err := suite.w.BackupCollections(
suite.ctx,
nil,
collections,
path.ExchangeService,
oc,
nil,
tags,
false,
)
require.NoError(t, err)

View File

@ -6,6 +6,7 @@ import (
"github.com/google/uuid"
multierror "github.com/hashicorp/go-multierror"
"github.com/kopia/kopia/repo/manifest"
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/common"
@ -114,7 +115,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
tenantID = op.account.ID()
startTime = time.Now()
detailsStore = streamstore.New(op.kopia, tenantID, op.Selectors.PathService())
oc = selectorToOwnersCats(op.Selectors)
reasons = selectorToReasons(op.Selectors)
uib = useIncrementalBackup(op.Selectors, op.Options)
)
@ -150,7 +151,14 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
}
}()
mans, mdColls, err := produceManifestsAndMetadata(ctx, op.kopia, op.store, oc, tenantID, uib)
mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata(
ctx,
op.kopia,
op.store,
reasons,
tenantID,
uib,
)
if err != nil {
opStats.readErr = errors.Wrap(err, "connecting to M365")
return opStats.readErr
@ -172,12 +180,11 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
ctx,
op.kopia,
tenantID,
op.Selectors,
oc,
reasons,
mans,
cs,
op.Results.BackupID,
uib)
uib && canUseMetaData)
if err != nil {
opStats.writeErr = errors.Wrap(err, "backing up service data")
return opStats.writeErr
@ -200,8 +207,8 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
return opStats.writeErr
}
// TODO: should always be 1, since backups are 1:1 with resourceOwners now.
opStats.resourceCount = len(data.ResourceOwnerSet(cs))
// should always be 1, since backups are 1:1 with resourceOwners.
opStats.resourceCount = 1
opStats.started = true
opStats.gc = gc.AwaitStatus()
@ -211,7 +218,7 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
// checker to see if conditions are correct for incremental backup behavior such as
// retrieving metadata like delta tokens and previous paths.
func useIncrementalBackup(sel selectors.Selector, opts control.Options) bool {
return opts.EnabledFeatures.ExchangeIncrementals && sel.Service == selectors.ServiceExchange
return !opts.ToggleFeatures.DisableIncrementals
}
// ---------------------------------------------------------------------------
@ -245,22 +252,60 @@ type backuper interface {
ctx context.Context,
bases []kopia.IncrementalBase,
cs []data.Collection,
service path.ServiceType,
oc *kopia.OwnersCats,
tags map[string]string,
buildTreeWithBase bool,
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error)
}
func verifyDistinctBases(mans []*kopia.ManifestEntry) error {
var (
errs *multierror.Error
reasons = map[string]manifest.ID{}
)
for _, man := range mans {
// Incomplete snapshots are used only for kopia-assisted incrementals. The
// fact that we need this check here makes it seem like this should live in
// the kopia code. However, keeping it here allows for better debugging as
// the kopia code only has access to a path builder which means it cannot
// remove the resource owner from the error/log output. That is also below
// the point where we decide if we should do a full backup or an
// incremental.
if len(man.IncompleteReason) > 0 {
continue
}
for _, reason := range man.Reasons {
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String()
if b, ok := reasons[reasonKey]; ok {
errs = multierror.Append(errs, errors.Errorf(
"multiple base snapshots source data for %s %s. IDs: %s, %s",
reason.Service.String(),
reason.Category.String(),
b,
man.ID,
))
continue
}
reasons[reasonKey] = man.ID
}
}
return errs.ErrorOrNil()
}
// calls kopia to retrieve prior backup manifests, metadata collections to supply backup heuristics.
func produceManifestsAndMetadata(
ctx context.Context,
kw *kopia.Wrapper,
sw *store.Wrapper,
oc *kopia.OwnersCats,
reasons []kopia.Reason,
tenantID string,
getMetadata bool,
) ([]*kopia.ManifestEntry, []data.Collection, error) {
) ([]*kopia.ManifestEntry, []data.Collection, bool, error) {
var (
metadataFiles = graph.AllMetadataFileNames()
collections []data.Collection
@ -268,14 +313,30 @@ func produceManifestsAndMetadata(
ms, err := kw.FetchPrevSnapshotManifests(
ctx,
oc,
reasons,
map[string]string{kopia.TagBackupCategory: ""})
if err != nil {
return nil, nil, err
return nil, nil, false, err
}
if !getMetadata {
return ms, nil, nil
return ms, nil, false, nil
}
// We only need to check that we have 1:1 reason:base if we're doing an
// incremental with associated metadata. This ensures that we're only sourcing
// data from a single Point-In-Time (base) for each incremental backup.
//
// TODO(ashmrtn): This may need updating if we start sourcing item backup
// details from previous snapshots when using kopia-assisted incrementals.
if err := verifyDistinctBases(ms); err != nil {
logger.Ctx(ctx).Warnw(
"base snapshot collision, falling back to full backup",
"error",
err,
)
return ms, nil, false, nil
}
for _, man := range ms {
@ -283,18 +344,51 @@ func produceManifestsAndMetadata(
continue
}
bID, ok := man.GetTag(kopia.TagBackupID)
if !ok {
return nil, nil, false, errors.New("snapshot manifest missing backup ID")
}
dID, _, err := sw.GetDetailsIDFromBackupID(ctx, model.StableID(bID))
if err != nil {
// if no backup exists for any of the complete manifests, we want
// to fall back to a complete backup.
if errors.Is(err, kopia.ErrNotFound) {
logger.Ctx(ctx).Infow(
"backup missing, falling back to full backup",
"backup_id", bID)
return ms, nil, false, nil
}
return nil, nil, false, errors.Wrap(err, "retrieving prior backup data")
}
// if no detailsID exists for any of the complete manifests, we want
// to fall back to a complete backup. This is a temporary prevention
// mechanism to keep backups from falling into a perpetually bad state.
// This makes an assumption that the ID points to a populated set of
// details; we aren't doing the work to look them up.
if len(dID) == 0 {
logger.Ctx(ctx).Infow(
"backup missing details ID, falling back to full backup",
"backup_id", bID)
return ms, nil, false, nil
}
colls, err := collectMetadata(ctx, kw, man, metadataFiles, tenantID)
if err != nil && !errors.Is(err, kopia.ErrNotFound) {
// prior metadata isn't guaranteed to exist.
// if it doesn't, we'll just have to do a
// full backup for that data.
return nil, nil, err
return nil, nil, false, err
}
collections = append(collections, colls...)
}
return ms, collections, err
return ms, collections, true, err
}
func collectMetadata(
@ -335,28 +429,28 @@ func collectMetadata(
return dcs, nil
}
func selectorToOwnersCats(sel selectors.Selector) *kopia.OwnersCats {
func selectorToReasons(sel selectors.Selector) []kopia.Reason {
service := sel.PathService()
oc := &kopia.OwnersCats{
ResourceOwners: map[string]struct{}{},
ServiceCats: map[string]kopia.ServiceCat{},
}
oc.ResourceOwners[sel.DiscreteOwner] = struct{}{}
reasons := []kopia.Reason{}
pcs, err := sel.PathCategories()
if err != nil {
return &kopia.OwnersCats{}
// This is technically safe, it's just that the resulting backup won't be
// usable as a base for future incremental backups.
return nil
}
for _, sl := range [][]path.CategoryType{pcs.Includes, pcs.Filters} {
for _, cat := range sl {
k, v := kopia.MakeServiceCat(service, cat)
oc.ServiceCats[k] = v
reasons = append(reasons, kopia.Reason{
ResourceOwner: sel.DiscreteOwner,
Service: service,
Category: cat,
})
}
}
return oc
return reasons
}
func builderFromReason(tenant string, r kopia.Reason) (*path.Builder, error) {
@ -387,8 +481,7 @@ func consumeBackupDataCollections(
ctx context.Context,
bu backuper,
tenantID string,
sel selectors.Selector,
oc *kopia.OwnersCats,
reasons []kopia.Reason,
mans []*kopia.ManifestEntry,
cs []data.Collection,
backupID model.StableID,
@ -406,6 +499,12 @@ func consumeBackupDataCollections(
kopia.TagBackupCategory: "",
}
for _, reason := range reasons {
for _, k := range reason.TagKeys() {
tags[k] = ""
}
}
bases := make([]kopia.IncrementalBase, 0, len(mans))
for _, m := range mans {
@ -426,7 +525,7 @@ func consumeBackupDataCollections(
})
}
return bu.BackupCollections(ctx, bases, cs, sel.PathService(), oc, tags, isIncremental)
return bu.BackupCollections(ctx, bases, cs, tags, isIncremental)
}
func matchesReason(reasons []kopia.Reason, p path.Path) bool {
@ -464,8 +563,10 @@ func mergeDetails(
continue
}
k, _ := kopia.MakeTagKV(kopia.TagBackupID)
bID := man.Tags[k]
bID, ok := man.GetTag(kopia.TagBackupID)
if !ok {
return errors.Errorf("no backup ID in snapshot manifest with ID %s", man.ID)
}
_, baseDeets, err := getBackupAndDetailsFromID(
ctx,

View File

@ -52,7 +52,7 @@ func prepNewTestBackupOp(
ctx context.Context,
bus events.Eventer,
sel selectors.Selector,
featureFlags control.FeatureFlags,
featureToggles control.Toggles,
) (BackupOperation, account.Account, *kopia.Wrapper, *kopia.ModelStore, func()) {
//revive:enable:context-as-argument
acct := tester.NewM365Account(t)
@ -90,7 +90,7 @@ func prepNewTestBackupOp(
ms.Close(ctx)
}
bo := newTestBackupOp(t, ctx, kw, ms, acct, sel, bus, featureFlags, closer)
bo := newTestBackupOp(t, ctx, kw, ms, acct, sel, bus, featureToggles, closer)
return bo, acct, kw, ms, closer
}
@ -109,7 +109,7 @@ func newTestBackupOp(
acct account.Account,
sel selectors.Selector,
bus events.Eventer,
featureFlags control.FeatureFlags,
featureToggles control.Toggles,
closer func(),
) BackupOperation {
//revive:enable:context-as-argument
@ -118,7 +118,7 @@ func newTestBackupOp(
opts = control.Options{}
)
opts.EnabledFeatures = featureFlags
opts.ToggleFeatures = featureToggles
bo, err := NewBackupOperation(ctx, opts, kw, sw, acct, sel, bus)
if !assert.NoError(t, err) {
@ -176,21 +176,27 @@ func checkBackupIsInManifests(
for _, category := range categories {
t.Run(category.String(), func(t *testing.T) {
var (
sck, scv = kopia.MakeServiceCat(sel.PathService(), category)
oc = &kopia.OwnersCats{
ResourceOwners: map[string]struct{}{resourceOwner: {}},
ServiceCats: map[string]kopia.ServiceCat{sck: scv},
reasons = []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: sel.PathService(),
Category: category,
},
}
tags = map[string]string{kopia.TagBackupCategory: ""}
found bool
)
mans, err := kw.FetchPrevSnapshotManifests(ctx, oc, tags)
mans, err := kw.FetchPrevSnapshotManifests(ctx, reasons, tags)
require.NoError(t, err)
for _, man := range mans {
tk, _ := kopia.MakeTagKV(kopia.TagBackupID)
if man.Tags[tk] == string(bo.Results.BackupID) {
bID, ok := man.GetTag(kopia.TagBackupID)
if !assert.Truef(t, ok, "snapshot manifest %s missing backup ID tag", man.ID) {
continue
}
if bID == string(bo.Results.BackupID) {
found = true
break
}
@ -554,7 +560,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchange() {
var (
mb = evmock.NewBus()
sel = test.selector().Selector
ffs = control.FeatureFlags{ExchangeIncrementals: test.runIncremental}
ffs = control.Toggles{}
)
bo, acct, kw, ms, closer := prepNewTestBackupOp(t, ctx, mb, sel, ffs)
@ -630,7 +636,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
var (
t = suite.T()
acct = tester.NewM365Account(t)
ffs = control.FeatureFlags{ExchangeIncrementals: true}
ffs = control.Toggles{}
mb = evmock.NewBus()
now = common.Now()
users = []string{suite.user}
@ -1010,7 +1016,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDrive() {
sel.Include(sel.AllData())
bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.FeatureFlags{})
bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{})
defer closer()
runAndCheckBackup(t, ctx, &bo, mb)
@ -1032,7 +1038,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_sharePoint() {
sel.Include(sel.AllData())
bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.FeatureFlags{})
bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{})
defer closer()
runAndCheckBackup(t, ctx, &bo, mb)

View File

@ -61,8 +61,6 @@ type mockBackuper struct {
checkFunc func(
bases []kopia.IncrementalBase,
cs []data.Collection,
service path.ServiceType,
oc *kopia.OwnersCats,
tags map[string]string,
buildTreeWithBase bool,
)
@ -72,13 +70,11 @@ func (mbu mockBackuper) BackupCollections(
ctx context.Context,
bases []kopia.IncrementalBase,
cs []data.Collection,
service path.ServiceType,
oc *kopia.OwnersCats,
tags map[string]string,
buildTreeWithBase bool,
) (*kopia.BackupStats, *details.Builder, map[string]path.Path, error) {
if mbu.checkFunc != nil {
mbu.checkFunc(bases, cs, service, oc, tags, buildTreeWithBase)
mbu.checkFunc(bases, cs, tags, buildTreeWithBase)
}
return &kopia.BackupStats{}, &details.Builder{}, nil, nil
@ -280,14 +276,20 @@ func makeDetailsEntry(
return res
}
// TODO(ashmrtn): This should belong to some code that lives in the kopia
// package that is only compiled when running tests.
func makeKopiaTagKey(k string) string {
return "tag:" + k
}
func makeManifest(t *testing.T, backupID model.StableID, incompleteReason string) *snapshot.Manifest {
t.Helper()
backupIDTagKey, _ := kopia.MakeTagKV(kopia.TagBackupID)
tagKey := makeKopiaTagKey(kopia.TagBackupID)
return &snapshot.Manifest{
Tags: map[string]string{
backupIDTagKey: string(backupID),
tagKey: string(backupID),
},
IncompleteReason: incompleteReason,
}
@ -386,6 +388,137 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
}
}
func (suite *BackupOpSuite) TestBackupOperation_VerifyDistinctBases() {
const user = "a-user"
table := []struct {
name string
input []*kopia.ManifestEntry
errCheck assert.ErrorAssertionFunc
}{
{
name: "SingleManifestMultipleReasons",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
},
},
},
errCheck: assert.NoError,
},
{
name: "MultipleManifestsDistinctReason",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
},
},
},
errCheck: assert.NoError,
},
{
name: "MultipleManifestsSameReason",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
},
errCheck: assert.Error,
},
{
name: "MultipleManifestsSameReasonOneIncomplete",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
IncompleteReason: "checkpoint",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
},
errCheck: assert.NoError,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.errCheck(t, verifyDistinctBases(test.input))
})
}
}
func (suite *BackupOpSuite) TestBackupOperation_CollectMetadata() {
var (
tenant = "a-tenant"
@ -542,8 +675,6 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat
manifest2 = &snapshot.Manifest{
ID: "id2",
}
sel = selectors.NewExchangeBackup([]string{resourceOwner}).Selector
)
table := []struct {
@ -637,8 +768,6 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat
checkFunc: func(
bases []kopia.IncrementalBase,
cs []data.Collection,
service path.ServiceType,
oc *kopia.OwnersCats,
tags map[string]string,
buildTreeWithBase bool,
) {
@ -651,7 +780,6 @@ func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Pat
ctx,
mbu,
tenant,
sel,
nil,
test.inputMan,
nil,

View File

@ -150,7 +150,6 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
events.BackupID: op.BackupID,
events.BackupCreateTime: bup.CreationTime,
events.RestoreID: opStats.restoreID,
// TODO: restore options,
},
)

View File

@ -77,8 +77,6 @@ func (ss *streamStore) WriteBackupDetails(
ctx,
nil,
[]data.Collection{dc},
ss.service,
nil,
nil,
false,
)

View File

@ -31,3 +31,19 @@ func NewM365Account(t *testing.T) account.Account {
return acc
}
func NewMockM365Account(t *testing.T) account.Account {
acc, err := account.NewAccount(
account.ProviderM365,
account.M365Config{
M365: credentials.M365{
AzureClientID: "12345",
AzureClientSecret: "abcde",
},
AzureTenantID: "09876",
},
)
require.NoError(t, err, "initializing mock account")
return acc
}

View File

@ -8,6 +8,17 @@ import (
"github.com/stretchr/testify/require"
)
// M365TenantID returns a tenantID string representing the azureTenantID described
// by either the env var AZURE_TENANT_ID, the corso_test.toml config
// file or the default value (in that order of priority). The default is a
// last-attempt fallback that will only work on alcion's testing org.
func M365TenantID(t *testing.T) string {
cfg, err := readTestConfig()
require.NoError(t, err, "retrieving m365 user id from test configuration")
return cfg[TestCfgAzureTenantID]
}
// M365UserID returns an userID string representing the m365UserID described
// by either the env var CORSO_M365_TEST_USER_ID, the corso_test.toml config
// file or the default value (in that order of priority). The default is a

View File

@ -9,6 +9,8 @@ import (
"github.com/alcionai/corso/src/pkg/storage"
)
const testRepoRootPrefix = "corso_integration_test/"
var AWSStorageCredEnvs = []string{
credentials.AWSAccessKeyID,
credentials.AWSSecretAccessKey,
@ -31,7 +33,7 @@ func NewPrefixedS3Storage(t *testing.T) storage.Storage {
storage.ProviderS3,
storage.S3Config{
Bucket: cfg[TestCfgBucket],
Prefix: t.Name() + "-" + now,
Prefix: testRepoRootPrefix + t.Name() + "-" + now,
},
storage.CommonConfig{
Corso: credentials.GetCorso(),

View File

@ -28,8 +28,8 @@ type Backup struct {
// Status of the operation
Status string `json:"status"`
// Selectors used in this operation
Selectors selectors.Selector `json:"selectors"`
// Selector used in this operation
Selector selectors.Selector `json:"selectors"`
// stats are embedded so that the values appear as top-level properties
stats.Errs
@ -58,7 +58,7 @@ func New(
SnapshotID: snapshotID,
DetailsID: detailsID,
Status: status,
Selectors: selector,
Selector: selector,
ReadWrites: rw,
StartAndEndTime: se,
}
@ -89,14 +89,13 @@ func PrintAll(ctx context.Context, bs []*Backup) {
}
type Printable struct {
ID model.StableID `json:"id"`
ErrorCount int `json:"errorCount"`
StartedAt time.Time `json:"started at"`
Status string `json:"status"`
Version string `json:"version"`
Selectors selectors.Printable `json:"selectors"`
BytesRead int64 `json:"bytesRead"`
BytesUploaded int64 `json:"bytesUploaded"`
ID model.StableID `json:"id"`
ErrorCount int `json:"errorCount"`
StartedAt time.Time `json:"started at"`
Status string `json:"status"`
Version string `json:"version"`
BytesRead int64 `json:"bytesRead"`
BytesUploaded int64 `json:"bytesUploaded"`
}
// MinimumPrintable reduces the Backup to its minimally printable details.
@ -107,7 +106,6 @@ func (b Backup) MinimumPrintable() any {
StartedAt: b.StartedAt,
Status: b.Status,
Version: "0",
Selectors: b.Selectors.ToPrintable(),
BytesRead: b.BytesRead,
BytesUploaded: b.BytesUploaded,
}
@ -120,7 +118,7 @@ func (b Backup) Headers() []string {
"Started At",
"ID",
"Status",
"Selectors",
"Resource Owner",
}
}
@ -134,6 +132,6 @@ func (b Backup) Values() []string {
common.FormatTabularDisplayTime(b.StartedAt),
string(b.ID),
status,
b.Selectors.ToPrintable().Resources(),
b.Selector.DiscreteOwner,
}
}

View File

@ -25,7 +25,7 @@ func TestBackupSuite(t *testing.T) {
}
func stubBackup(t time.Time) backup.Backup {
sel := selectors.NewExchangeBackup(selectors.Any())
sel := selectors.NewExchangeBackup([]string{"test"})
sel.Include(sel.AllData())
return backup.Backup{
@ -39,7 +39,7 @@ func stubBackup(t time.Time) backup.Backup {
SnapshotID: "snapshot",
DetailsID: "details",
Status: "status",
Selectors: sel.Selector,
Selector: sel.Selector,
Errs: stats.Errs{
ReadErrors: errors.New("1"),
WriteErrors: errors.New("1"),
@ -66,7 +66,7 @@ func (suite *BackupSuite) TestBackup_HeadersValues() {
"Started At",
"ID",
"Status",
"Selectors",
"Resource Owner",
}
hs := b.Headers()
assert.Equal(t, expectHs, hs)
@ -76,7 +76,7 @@ func (suite *BackupSuite) TestBackup_HeadersValues() {
nowFmt,
"id",
"status (2 errors)",
selectors.All,
"test",
}
vs := b.Values()
@ -96,11 +96,6 @@ func (suite *BackupSuite) TestBackup_MinimumPrintable() {
assert.Equal(t, 2, result.ErrorCount, "error count")
assert.Equal(t, now, result.StartedAt, "started at")
assert.Equal(t, b.Status, result.Status, "status")
bselp := b.Selectors.ToPrintable()
assert.Equal(t, bselp, result.Selectors, "selectors")
assert.Equal(t, bselp.Resources(), result.Selectors.Resources(), "selector resources")
assert.Equal(t, b.BytesRead, result.BytesRead, "size")
assert.Equal(t, b.BytesUploaded, result.BytesUploaded, "stored size")
}

View File

@ -229,8 +229,7 @@ func (d *Details) addFolder(folder folderEntry) {
// DetailsEntry describes a single item stored in a Backup
type DetailsEntry struct {
// TODO: `RepoRef` is currently the full path to the item in Kopia
// This can be optimized.
// RepoRef is the full storage path of the item in Kopia
RepoRef string `json:"repoRef"`
ShortRef string `json:"shortRef"`
ParentRef string `json:"parentRef,omitempty"`
@ -520,6 +519,7 @@ func (i *SharePointInfo) UpdateParentPath(newPath path.Path) error {
type OneDriveInfo struct {
Created time.Time `json:"created,omitempty"`
ItemName string `json:"itemName"`
DriveName string `json:"driveName"`
ItemType ItemType `json:"itemType,omitempty"`
Modified time.Time `json:"modified,omitempty"`
Owner string `json:"owner,omitempty"`

View File

@ -6,17 +6,17 @@ import (
// Options holds the optional configurations for a process
type Options struct {
Collision CollisionPolicy `json:"-"`
DisableMetrics bool `json:"disableMetrics"`
FailFast bool `json:"failFast"`
EnabledFeatures FeatureFlags `json:"enabledFeatures"`
Collision CollisionPolicy `json:"-"`
DisableMetrics bool `json:"disableMetrics"`
FailFast bool `json:"failFast"`
ToggleFeatures Toggles `json:"ToggleFeatures"`
}
// Defaults provides an Options with the default values set.
func Defaults() Options {
return Options{
FailFast: true,
EnabledFeatures: FeatureFlags{},
FailFast: true,
ToggleFeatures: Toggles{},
}
}
@ -63,11 +63,15 @@ func DefaultRestoreDestination(timeFormat common.TimeFormat) RestoreDestination
}
// ---------------------------------------------------------------------------
// Feature Flags
// Feature Flags and Toggles
// ---------------------------------------------------------------------------
type FeatureFlags struct {
// ExchangeIncrementals allow for re-use of delta links when backing up
// exchange data, reducing the amount of data pulled from graph.
ExchangeIncrementals bool `json:"incrementals,omitempty"`
// Toggles allows callers to force corso to behave in ways that deviate from
// the default expectations by turning on or shutting off certain features.
// The default state for every toggle is false; toggles are only turned on
// if specified by the caller.
type Toggles struct {
// DisableIncrementals prevents backups from using incremental lookups,
// forcing a new, complete backup of all data regardless of prior state.
DisableIncrementals bool `json:"exchangeIncrementals,omitempty"`
}

View File

@ -306,7 +306,7 @@ func (r repository) BackupDetails(ctx context.Context, backupID string) (*detail
deets, err := streamstore.New(
r.dataLayer,
r.Account.ID(),
b.Selectors.PathService()).ReadBackupDetails(ctx, dID)
b.Selector.PathService()).ReadBackupDetails(ctx, dID)
if err != nil {
return nil, nil, err
}

View File

@ -38,7 +38,6 @@ type (
var (
_ Reducer = &ExchangeRestore{}
_ printabler = &ExchangeRestore{}
_ pathCategorier = &ExchangeRestore{}
)
@ -110,11 +109,6 @@ func (sr ExchangeRestore) SplitByResourceOwner(users []string) []ExchangeRestore
return ss
}
// Printable creates the minimized display of a selector, formatted for human readability.
func (s exchange) Printable() Printable {
return toPrintable[ExchangeScope](s.Selector)
}
// PathCategories produces the aggregation of discrete users described by each type of scope.
func (s exchange) PathCategories() selectorPathCategories {
return selectorPathCategories{
@ -194,21 +188,6 @@ func (s *exchange) Scopes() []ExchangeScope {
return scopes[ExchangeScope](s.Selector)
}
// DiscreteScopes retrieves the list of exchangeScopes in the selector.
// If any Include scope's User category is set to Any, replaces that
// scope's value with the list of userPNs instead.
func (s *exchange) DiscreteScopes(userPNs []string) []ExchangeScope {
scopes := discreteScopes[ExchangeScope](s.Includes, ExchangeUser, userPNs)
ss := make([]ExchangeScope, 0, len(scopes))
for _, scope := range scopes {
ss = append(ss, ExchangeScope(scope))
}
return ss
}
type ExchangeItemScopeConstructor func([]string, []string, ...option) []ExchangeScope
// -------------------

View File

@ -479,49 +479,6 @@ func (suite *ExchangeSelectorSuite) TestExchangeBackup_Scopes() {
}
}
func (suite *ExchangeSelectorSuite) TestExchangeBackup_DiscreteScopes() {
usrs := []string{"u1", "u2"}
table := []struct {
name string
include []string
discrete []string
expect []string
}{
{
name: "any user",
include: Any(),
discrete: usrs,
expect: usrs,
},
{
name: "discrete user",
include: []string{"u3"},
discrete: usrs,
expect: []string{"u3"},
},
{
name: "nil discrete slice",
include: Any(),
discrete: nil,
expect: Any(),
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
// todo: remove discreteScopes
// eb := NewExchangeBackup(test.include)
// eb.Include(eb.AllData())
// scopes := eb.DiscreteScopes(test.discrete)
// for _, sc := range scopes {
// users := sc.Get(ExchangeUser)
// assert.Equal(t, test.expect, users)
// }
})
}
}
func (suite *ExchangeSelectorSuite) TestExchangeScope_Category() {
table := []struct {
is exchangeCategory
@ -1144,7 +1101,7 @@ func (suite *ExchangeSelectorSuite) TestPasses() {
)
var (
es = NewExchangeRestore(Any()) // TODO: move into test and compose with each test value
es = NewExchangeRestore(Any())
otherMail = setScopesToDefault(es.Mails(Any(), []string{"smarf"}))
mail = setScopesToDefault(es.Mails(Any(), []string{mid}))
noMail = setScopesToDefault(es.Mails(Any(), None()))
@ -1186,7 +1143,7 @@ func (suite *ExchangeSelectorSuite) TestContains() {
target := "fnords"
var (
es = NewExchangeRestore(Any()) // TODO: move into test and compose with each test value
es = NewExchangeRestore(Any())
noMail = setScopesToDefault(es.Mails(None(), None()))
does = setScopesToDefault(es.Mails(Any(), []string{target}))
doesNot = setScopesToDefault(es.Mails(Any(), []string{"smarf"}))
@ -1221,7 +1178,7 @@ func (suite *ExchangeSelectorSuite) TestContains() {
func (suite *ExchangeSelectorSuite) TestIsAny() {
var (
es = NewExchangeRestore(Any()) // TODO: move into test and compose with each test value
es = NewExchangeRestore(Any())
specificMail = setScopesToDefault(es.Mails(Any(), []string{"email"}))
anyMail = setScopesToDefault(es.Mails(Any(), Any()))
)

View File

@ -160,10 +160,6 @@ func stubSelector(resourceOwners []string) mockSel {
}
}
func (s mockSel) Printable() Printable {
return toPrintable[mockScope](s.Selector)
}
// ---------------------------------------------------------------------------
// helper funcs
// ---------------------------------------------------------------------------

View File

@ -37,7 +37,6 @@ type (
var (
_ Reducer = &OneDriveRestore{}
_ printabler = &OneDriveRestore{}
_ pathCategorier = &OneDriveRestore{}
)
@ -109,11 +108,6 @@ func (s OneDriveRestore) SplitByResourceOwner(users []string) []OneDriveRestore
return ss
}
// Printable creates the minimized display of a selector, formatted for human readability.
func (s oneDrive) Printable() Printable {
return toPrintable[OneDriveScope](s.Selector)
}
// PathCategories produces the aggregation of discrete users described by each type of scope.
func (s oneDrive) PathCategories() selectorPathCategories {
return selectorPathCategories{
@ -187,21 +181,6 @@ func (s *oneDrive) Scopes() []OneDriveScope {
return scopes[OneDriveScope](s.Selector)
}
// DiscreteScopes retrieves the list of oneDriveScopes in the selector.
// If any Include scope's User category is set to Any, replaces that
// scope's value with the list of userPNs instead.
func (s *oneDrive) DiscreteScopes(userPNs []string) []OneDriveScope {
scopes := discreteScopes[OneDriveScope](s.Includes, OneDriveUser, userPNs)
ss := make([]OneDriveScope, 0, len(scopes))
for _, scope := range scopes {
ss = append(ss, OneDriveScope(scope))
}
return ss
}
// -------------------
// Scope Factories

View File

@ -39,49 +39,6 @@ func (suite *OneDriveSelectorSuite) TestToOneDriveBackup() {
assert.NotZero(t, ob.Scopes())
}
func (suite *OneDriveSelectorSuite) TestOneDriveBackup_DiscreteScopes() {
usrs := []string{"u1", "u2"}
table := []struct {
name string
include []string
discrete []string
expect []string
}{
{
name: "any user",
include: Any(),
discrete: usrs,
expect: usrs,
},
{
name: "discrete user",
include: []string{"u3"},
discrete: usrs,
expect: []string{"u3"},
},
{
name: "nil discrete slice",
include: Any(),
discrete: nil,
expect: Any(),
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
// todo: remove discreteScopes
// eb := NewOneDriveBackup(test.include)
// eb.Include(eb.AllData())
// scopes := eb.DiscreteScopes(test.discrete)
// for _, sc := range scopes {
// users := sc.Get(OneDriveUser)
// assert.Equal(t, test.expect, users)
// }
})
}
}
func (suite *OneDriveSelectorSuite) TestOneDriveSelector_AllData() {
t := suite.T()

View File

@ -393,33 +393,6 @@ func (suite *SelectorScopesSuite) TestMatchesPathValues() {
}
}
func (suite *SelectorScopesSuite) TestAddToSet() {
t := suite.T()
set := []string{}
set = addToSet(set, []string{})
assert.Len(t, set, 0)
set = addToSet(set, []string{"a"})
assert.Len(t, set, 1)
assert.Equal(t, set[0], "a")
set = addToSet(set, []string{"a"})
assert.Len(t, set, 1)
set = addToSet(set, []string{"a", "b"})
assert.Len(t, set, 2)
assert.Equal(t, set[0], "a")
assert.Equal(t, set[1], "b")
set = addToSet(set, []string{"c", "d"})
assert.Len(t, set, 4)
assert.Equal(t, set[0], "a")
assert.Equal(t, set[1], "b")
assert.Equal(t, set[2], "c")
assert.Equal(t, set[3], "d")
}
func (suite *SelectorScopesSuite) TestClean() {
table := []struct {
name string

View File

@ -3,11 +3,9 @@ package selectors
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/filters"
@ -183,11 +181,6 @@ func splitByResourceOwner[T scopeT, C categoryT](s Selector, allOwners []string,
for _, ro := range targets {
c := s
c.DiscreteOwner = ro
// TODO: when the rootCat gets removed from the scopes, we can remove this
c.Includes = discreteScopes[T](s.Includes, rootCat, []string{ro})
c.Filters = discreteScopes[T](s.Filters, rootCat, []string{ro})
ss = append(ss, c)
}
@ -222,7 +215,6 @@ func appendScopes[T scopeT](to []scope, scopes ...[]T) []scope {
}
// scopes retrieves the list of scopes in the selector.
// future TODO: if Inclues is nil, return filters.
func scopes[T scopeT](s Selector) []T {
scopes := []T{}
@ -233,38 +225,6 @@ func scopes[T scopeT](s Selector) []T {
return scopes
}
// discreteScopes retrieves the list of scopes in the selector.
// for any scope in the `Includes` set, if scope.IsAny(rootCat),
// then that category's value is replaced with the provided set of
// discrete identifiers.
// If discreteIDs is an empty slice, returns the normal scopes(s).
// future TODO: if Includes is nil, return filters.
func discreteScopes[T scopeT, C categoryT](
scopes []scope,
rootCat C,
discreteIDs []string,
) []scope {
sl := []scope{}
if len(discreteIDs) == 0 {
return scopes
}
for _, v := range scopes {
t := T(v)
if isAnyTarget(t, rootCat) {
w := maps.Clone(t)
set(w, rootCat, discreteIDs)
t = w
}
sl = append(sl, scope(t))
}
return sl
}
// Returns the path.ServiceType matching the selector service.
func (s Selector) PathService() path.ServiceType {
return serviceToPathType[s.Service]
@ -318,128 +278,6 @@ func selectorAsIface[T any](s Selector) (T, error) {
return t, err
}
// ---------------------------------------------------------------------------
// Printing Selectors for Human Reading
// ---------------------------------------------------------------------------
type Printable struct {
ResourceOwners []string `json:"resourceOwners"`
Service string `json:"service"`
Excludes map[string][]string `json:"excludes,omitempty"`
Filters map[string][]string `json:"filters,omitempty"`
Includes map[string][]string `json:"includes,omitempty"`
}
type printabler interface {
Printable() Printable
}
// ToPrintable creates the minimized display of a selector, formatted for human readability.
func (s Selector) ToPrintable() Printable {
p, err := selectorAsIface[printabler](s)
if err != nil {
return Printable{}
}
return p.Printable()
}
// toPrintable creates the minimized display of a selector, formatted for human readability.
func toPrintable[T scopeT](s Selector) Printable {
return Printable{
ResourceOwners: s.DiscreteResourceOwners(),
Service: s.Service.String(),
Excludes: toResourceTypeMap[T](s.Excludes),
Filters: toResourceTypeMap[T](s.Filters),
Includes: toResourceTypeMap[T](s.Includes),
}
}
// Resources generates a tabular-readable output of the resources in Printable.
// Only the first (arbitrarily picked) resource is displayed. All others are
// simply counted. If no inclusions exist, uses Filters. If no filters exist,
// defaults to "None".
// Resource refers to the top-level entity in the service. User for Exchange,
// Site for sharepoint, etc.
func (p Printable) Resources() string {
s := resourcesShortFormat(p.ResourceOwners)
if len(s) == 0 {
s = "None"
}
if s == AnyTgt {
s = "All"
}
return s
}
// returns a string with the resources in the map. Shortened to the first resource key,
// plus, if more exist, " (len-1 more)"
func resourcesShortFormat(ros []string) string {
switch len(ros) {
case 0:
return ""
case 1:
return ros[0]
default:
return fmt.Sprintf("%s (%d more)", ros[0], len(ros)-1)
}
}
// Transforms the slice to a single map.
// Keys are each service's rootCat value.
// Values are the set of all scopeKeyDataTypes for the resource.
func toResourceTypeMap[T scopeT](s []scope) map[string][]string {
if len(s) == 0 {
return nil
}
r := make(map[string][]string)
for _, sc := range s {
t := T(sc)
res := sc[t.categorizer().rootCat().String()]
k := res.Target
if res.Target == AnyTgt {
k = All
}
for _, sk := range split(k) {
r[sk] = addToSet(r[sk], split(sc[scopeKeyDataType].Target))
}
}
return r
}
// returns v if set is empty,
// unions v with set, otherwise.
func addToSet(set []string, v []string) []string {
if len(set) == 0 {
return v
}
for _, vv := range v {
var matched bool
for _, s := range set {
if vv == s {
matched = true
break
}
}
if !matched {
set = append(set, vv)
}
}
return set
}
// ---------------------------------------------------------------------------
// helpers
// ---------------------------------------------------------------------------

View File

@ -1,7 +1,6 @@
package selectors
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
@ -32,127 +31,6 @@ func (suite *SelectorSuite) TestBadCastErr() {
assert.Error(suite.T(), err)
}
func (suite *SelectorSuite) TestPrintable() {
t := suite.T()
sel := stubSelector(Any())
p := sel.Printable()
assert.Equal(t, sel.Service.String(), p.Service)
assert.Equal(t, 1, len(p.Excludes))
assert.Equal(t, 1, len(p.Filters))
assert.Equal(t, 1, len(p.Includes))
}
func (suite *SelectorSuite) TestPrintable_IncludedResources() {
table := []struct {
name string
resourceOwners []string
expect func(string) bool
reason string
}{
{
name: "distinct",
resourceOwners: []string{"foo", "smarf", "fnords"},
expect: func(s string) bool {
return strings.HasSuffix(s, "(2 more)")
},
reason: "should end with (2 more)",
},
{
name: "distinct",
resourceOwners: nil,
expect: func(s string) bool {
return strings.HasSuffix(s, "None")
},
reason: "no resource owners should produce None",
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
sel := stubSelector(test.resourceOwners)
stubWithResource := func(resource string) scope {
ss := stubScope("")
ss[rootCatStub.String()] = filterize(scopeConfig{}, resource)
return scope(ss)
}
sel.Includes = []scope{}
sel.Filters = []scope{}
for _, ro := range test.resourceOwners {
sel.Includes = append(sel.Includes, stubWithResource(ro))
sel.Filters = append(sel.Filters, stubWithResource(ro))
}
})
}
}
func (suite *SelectorSuite) TestToResourceTypeMap() {
table := []struct {
name string
input []scope
expect map[string][]string
}{
{
name: "single scope",
input: []scope{scope(stubScope(""))},
expect: map[string][]string{
"All": {rootCatStub.String()},
},
},
{
name: "disjoint resources",
input: []scope{
scope(stubScope("")),
{
rootCatStub.String(): filterize(scopeConfig{}, "smarf"),
scopeKeyDataType: filterize(scopeConfig{}, unknownCatStub.String()),
},
},
expect: map[string][]string{
"All": {rootCatStub.String()},
"smarf": {unknownCatStub.String()},
},
},
{
name: "multiple resources",
input: []scope{
scope(stubScope("")),
{
rootCatStub.String(): filterize(scopeConfig{}, join("smarf", "fnords")),
scopeKeyDataType: filterize(scopeConfig{}, unknownCatStub.String()),
},
},
expect: map[string][]string{
"All": {rootCatStub.String()},
"smarf": {unknownCatStub.String()},
"fnords": {unknownCatStub.String()},
},
},
{
name: "disjoint types",
input: []scope{
scope(stubScope("")),
{
rootCatStub.String(): filterize(scopeConfig{}, AnyTgt),
scopeKeyDataType: filterize(scopeConfig{}, "other"),
},
},
expect: map[string][]string{
"All": {rootCatStub.String(), "other"},
},
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
rtm := toResourceTypeMap[mockScope](test.input)
assert.Equal(t, test.expect, rtm)
})
}
}
func (suite *SelectorSuite) TestResourceOwnersIn() {
rootCat := rootCatStub.String()

View File

@ -35,7 +35,6 @@ type (
var (
_ Reducer = &SharePointRestore{}
_ printabler = &SharePointRestore{}
_ pathCategorier = &SharePointRestore{}
)
@ -107,11 +106,6 @@ func (s SharePointRestore) SplitByResourceOwner(users []string) []SharePointRest
return ss
}
// Printable creates the minimized display of a selector, formatted for human readability.
func (s sharePoint) Printable() Printable {
return toPrintable[SharePointScope](s.Selector)
}
// PathCategories produces the aggregation of discrete users described by each type of scope.
func (s sharePoint) PathCategories() selectorPathCategories {
return selectorPathCategories{
@ -185,21 +179,6 @@ func (s *sharePoint) Scopes() []SharePointScope {
return scopes[SharePointScope](s.Selector)
}
// DiscreteScopes retrieves the list of sharePointScopes in the selector.
// If any Include scope's Site category is set to Any, replaces that
// scope's value with the list of siteIDs instead.
func (s *sharePoint) DiscreteScopes(siteIDs []string) []SharePointScope {
scopes := discreteScopes[SharePointScope](s.Includes, SharePointSite, siteIDs)
ss := make([]SharePointScope, 0, len(scopes))
for _, scope := range scopes {
ss = append(ss, SharePointScope(scope))
}
return ss
}
// -------------------
// Scope Factories

View File

@ -37,49 +37,6 @@ func (suite *SharePointSelectorSuite) TestToSharePointBackup() {
assert.NotZero(t, ob.Scopes())
}
func (suite *SharePointSelectorSuite) TestSharePointBackup_DiscreteScopes() {
sites := []string{"s1", "s2"}
table := []struct {
name string
include []string
discrete []string
expect []string
}{
{
name: "any site",
include: Any(),
discrete: sites,
expect: sites,
},
{
name: "discrete sitet",
include: []string{"s3"},
discrete: sites,
expect: []string{"s3"},
},
{
name: "nil discrete slice",
include: Any(),
discrete: nil,
expect: Any(),
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
// todo: remove discreteScopes
// eb := NewSharePointBackup(test.include)
// eb.Include(eb.AllData())
// scopes := eb.DiscreteScopes(test.discrete)
// for _, sc := range scopes {
// sites := sc.Get(SharePointSite)
// assert.Equal(t, test.expect, sites)
// }
})
}
}
func (suite *SharePointSelectorSuite) TestSharePointSelector_AllData() {
t := suite.T()
@ -368,7 +325,7 @@ func (suite *SharePointSelectorSuite) TestSharePointCategory_PathValues() {
func (suite *SharePointSelectorSuite) TestSharePointScope_MatchesInfo() {
var (
ods = NewSharePointRestore(nil) // TODO: move into test
ods = NewSharePointRestore(nil)
host = "www.website.com"
pth = "/foo"
url = host + pth

View File

@ -156,7 +156,7 @@ Your first backup may take some time if your mailbox is large.
There will be progress indicators as the backup and, on completion, you should see output similar to:
```text
Started At ID Status Selectors
Started At ID Status Resource Owner
2022-10-20T18:28:53Z d8cd833a-fc63-4872-8981-de5c08e0661b Completed (0 errors) alice@contoso.com
```
@ -195,7 +195,7 @@ docker run --env-file $HOME/.corso/corso.env \\
</Tabs>
```text
Started At ID Status Selectors
Started At ID Status Resource Owner
2022-10-20T18:28:53Z d8cd833a-fc63-4872-8981-de5c08e0661b Completed (0 errors) alice@contoso.com
2022-10-20T18:40:45Z 391ceeb3-b44d-4365-9a8e-8a8e1315b565 Completed (0 errors) alice@contoso.com
...

View File

@ -16,7 +16,7 @@
"animate.css": "^4.1.1",
"clsx": "^1.2.1",
"docusaurus-plugin-image-zoom": "^0.1.1",
"docusaurus-plugin-sass": "^0.2.2",
"docusaurus-plugin-sass": "^0.2.3",
"feather-icons": "^4.29.0",
"jarallax": "^2.1.3",
"mdx-mermaid": "^1.3.2",
@ -32,7 +32,7 @@
"@docusaurus/module-type-aliases": "2.2.0",
"@iconify/react": "^4.0.1",
"autoprefixer": "^10.4.13",
"postcss": "^8.4.20",
"postcss": "^8.4.21",
"tailwindcss": "^3.2.4"
}
},
@ -6451,10 +6451,9 @@
}
},
"node_modules/docusaurus-plugin-sass": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.2.tgz",
"integrity": "sha512-ZZBpj3PrhGpYE2kAnkZB9NRwy/CDi4rGun1oec6PYR8YvGzqxYGtXvLgHi6FFbu8/N483klk8udqyYMh6Ted+A==",
"license": "MIT",
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.3.tgz",
"integrity": "sha512-FbaE06K8NF8SPUYTwiG+83/jkXrwHJ/Afjqz3SUIGon6QvFwSSoKOcoxGQmUBnjTOk+deUONDx8jNWsegFJcBQ==",
"dependencies": {
"sass-loader": "^10.1.1"
},
@ -10077,9 +10076,9 @@
}
},
"node_modules/postcss": {
"version": "8.4.20",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.20.tgz",
"integrity": "sha512-6Q04AXR1212bXr5fh03u8aAwbLxAQNGQ/Q1LNa0VfOI06ZAlhPHtQvE4OIdpj4kLThXilalPnmDSOD65DcHt+g==",
"version": "8.4.21",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz",
"integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==",
"funding": [
{
"type": "opencollective",
@ -18997,9 +18996,9 @@
}
},
"docusaurus-plugin-sass": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.2.tgz",
"integrity": "sha512-ZZBpj3PrhGpYE2kAnkZB9NRwy/CDi4rGun1oec6PYR8YvGzqxYGtXvLgHi6FFbu8/N483klk8udqyYMh6Ted+A==",
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/docusaurus-plugin-sass/-/docusaurus-plugin-sass-0.2.3.tgz",
"integrity": "sha512-FbaE06K8NF8SPUYTwiG+83/jkXrwHJ/Afjqz3SUIGon6QvFwSSoKOcoxGQmUBnjTOk+deUONDx8jNWsegFJcBQ==",
"requires": {
"sass-loader": "^10.1.1"
}
@ -21384,9 +21383,9 @@
"integrity": "sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ=="
},
"postcss": {
"version": "8.4.20",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.20.tgz",
"integrity": "sha512-6Q04AXR1212bXr5fh03u8aAwbLxAQNGQ/Q1LNa0VfOI06ZAlhPHtQvE4OIdpj4kLThXilalPnmDSOD65DcHt+g==",
"version": "8.4.21",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz",
"integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==",
"requires": {
"nanoid": "^3.3.4",
"picocolors": "^1.0.0",

View File

@ -22,7 +22,7 @@
"animate.css": "^4.1.1",
"clsx": "^1.2.1",
"docusaurus-plugin-image-zoom": "^0.1.1",
"docusaurus-plugin-sass": "^0.2.2",
"docusaurus-plugin-sass": "^0.2.3",
"feather-icons": "^4.29.0",
"jarallax": "^2.1.3",
"mdx-mermaid": "^1.3.2",
@ -38,7 +38,7 @@
"@docusaurus/module-type-aliases": "2.2.0",
"@iconify/react": "^4.0.1",
"autoprefixer": "^10.4.13",
"postcss": "^8.4.20",
"postcss": "^8.4.21",
"tailwindcss": "^3.2.4"
},
"browserslist": {