diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7083e885b..170a63357 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,7 +41,7 @@ jobs: working-directory: src steps: - uses: actions/checkout@v3 - + # single setup and sum cache handling here. # the results will cascade onto both testing and linting. - name: Setup Golang with cache diff --git a/.gitignore b/.gitignore index 46f5189b8..911d91a10 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,9 @@ .corso_test.toml .corso.toml +# Logging +.corso.log + # Build directories /bin /docker/bin diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b18a4d04..7c029b8f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Document Corso's fault-tolerance and restartability features +- Add retries on timeouts and status code 500 for Exchange +- Increase page size preference for delta requests for Exchange to reduce number of roundtrips +- OneDrive file/folder permissions can now be backed up and restored +- Add `--restore-permissions` flag to toggle restoration of OneDrive permissions +- Add versions to backups so that we can understand/handle older backup formats + +### Fixed +- Backing up a calendar that has the same name as the default calendar +- Added additional backoff-retry to all OneDrive queries. +- Users with `null` userType values are no longer excluded from user queries. + +### Known Issues + +- When the same user has permissions to a file and the containing + folder, we only restore folder level permissions for the user and no + separate file only permission is restored. +- Link shares are not restored ## [v0.2.0] (alpha) - 2023-1-29 @@ -18,7 +35,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Check if the user specified for an exchange backup operation has a mailbox. ### Changed - +- Item.Attachments are disabled from being restored for the patching of ([#2353](https://github.com/alcionai/corso/issues/2353)) - BetaClient introduced. Enables Corso to be able to interact with SharePoint Page objects. Package located `/internal/connector/graph/betasdk` - Handle case where user's drive has not been initialized - Inline attachments (e.g. copy/paste ) are discovered and backed up correctly ([#2163](https://github.com/alcionai/corso/issues/2163)) diff --git a/src/cli/backup/onedrive.go b/src/cli/backup/onedrive.go index 60a055dce..9dfb20b79 100644 --- a/src/cli/backup/onedrive.go +++ b/src/cli/backup/onedrive.go @@ -79,6 +79,7 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command { switch cmd.Use { case createCommand: c, fs = utils.AddCommand(cmd, oneDriveCreateCmd()) + options.AddFeatureToggle(cmd, options.EnablePermissionsBackup()) c.Use = c.Use + " " + oneDriveServiceCommandCreateUseSuffix c.Example = oneDriveServiceCommandCreateExamples diff --git a/src/cli/backup/onedrive_integration_test.go b/src/cli/backup/onedrive_integration_test.go index e24cba34f..05231fd11 100644 --- a/src/cli/backup/onedrive_integration_test.go +++ b/src/cli/backup/onedrive_integration_test.go @@ -72,7 +72,13 @@ func (suite *NoBackupOneDriveIntegrationSuite) SetupSuite() { suite.m365UserID = tester.M365UserID(t) // init the repo first - suite.repo, err = repository.Initialize(ctx, suite.acct, suite.st, control.Options{}) + suite.repo, err = repository.Initialize( + ctx, + suite.acct, + suite.st, + control.Options{ + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }) require.NoError(t, err) } @@ -152,7 +158,13 @@ func (suite *BackupDeleteOneDriveIntegrationSuite) SetupSuite() { defer flush() // init the repo first - suite.repo, err = repository.Initialize(ctx, suite.acct, suite.st, control.Options{}) + suite.repo, err = repository.Initialize( + ctx, + suite.acct, + suite.st, + control.Options{ + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }) require.NoError(t, err) m365UserID := tester.M365UserID(t) diff --git a/src/cli/cli.go b/src/cli/cli.go index b67663d06..77a03b1a7 100644 --- a/src/cli/cli.go +++ b/src/cli/cli.go @@ -6,7 +6,9 @@ import ( "regexp" "strings" + "github.com/alcionai/clues" "github.com/spf13/cobra" + "golang.org/x/exp/slices" "github.com/alcionai/corso/src/cli/backup" "github.com/alcionai/corso/src/cli/config" @@ -50,6 +52,13 @@ func preRun(cc *cobra.Command, args []string) error { flagSl = append(flagSl, f) } + avoidTheseCommands := []string{ + "corso", "env", "help", "backup", "details", "list", "restore", "delete", "repo", "init", "connect", + } + if len(logger.LogFile) > 0 && !slices.Contains(avoidTheseCommands, cc.Use) { + print.Info(cc.Context(), "Logging to file: "+logger.LogFile) + } + log.Infow("cli command", "command", cc.CommandPath(), "flags", flagSl, "version", version.CurrentVersion()) return nil @@ -121,6 +130,9 @@ func Handle() { }() if err := corsoCmd.ExecuteContext(ctx); err != nil { + logger.Ctx(ctx). + With("err", err). + Errorw("cli execution", clues.InErr(err).Slice()...) os.Exit(1) } } diff --git a/src/cli/options/options.go b/src/cli/options/options.go index 4988c29ca..2b423836c 100644 --- a/src/cli/options/options.go +++ b/src/cli/options/options.go @@ -11,17 +11,11 @@ import ( func Control() control.Options { opt := control.Defaults() - if fastFail { - opt.FailFast = true - } - - if noStats { - opt.DisableMetrics = true - } - - if disableIncrementals { - opt.ToggleFeatures.DisableIncrementals = true - } + opt.FailFast = fastFail + opt.DisableMetrics = noStats + opt.RestorePermissions = restorePermissions + opt.ToggleFeatures.DisableIncrementals = disableIncrementals + opt.ToggleFeatures.EnablePermissionsBackup = enablePermissionsBackup return opt } @@ -31,8 +25,9 @@ func Control() control.Options { // --------------------------------------------------------------------------- var ( - fastFail bool - noStats bool + fastFail bool + noStats bool + restorePermissions bool ) // AddOperationFlags adds command-local operation flags @@ -49,11 +44,22 @@ func AddGlobalOperationFlags(cmd *cobra.Command) { fs.BoolVar(&noStats, "no-stats", false, "disable anonymous usage statistics gathering") } +// AddRestorePermissionsFlag adds OneDrive flag for restoring permissions +func AddRestorePermissionsFlag(cmd *cobra.Command) { + fs := cmd.Flags() + fs.BoolVar(&restorePermissions, "restore-permissions", false, "Restore permissions for files and folders") + // TODO: reveal this flag once backing up permissions becomes default + cobra.CheckErr(fs.MarkHidden("restore-permissions")) +} + // --------------------------------------------------------------------------- // Feature Flags // --------------------------------------------------------------------------- -var disableIncrementals bool +var ( + disableIncrementals bool + enablePermissionsBackup bool +) type exposeFeatureFlag func(*pflag.FlagSet) @@ -78,3 +84,16 @@ func DisableIncrementals() func(*pflag.FlagSet) { cobra.CheckErr(fs.MarkHidden("disable-incrementals")) } } + +// Adds the hidden '--enable-permissions-backup' cli flag which, when +// set, enables backing up permissions. +func EnablePermissionsBackup() func(*pflag.FlagSet) { + return func(fs *pflag.FlagSet) { + fs.BoolVar( + &enablePermissionsBackup, + "enable-permissions-backup", + false, + "Enable backing up item permissions for OneDrive") + cobra.CheckErr(fs.MarkHidden("enable-permissions-backup")) + } +} diff --git a/src/cli/restore/onedrive.go b/src/cli/restore/onedrive.go index 526db414b..bd8dc7816 100644 --- a/src/cli/restore/onedrive.go +++ b/src/cli/restore/onedrive.go @@ -63,6 +63,9 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command { utils.FileFN, nil, "Restore items by file name or ID") + // permissions restore flag + options.AddRestorePermissionsFlag(c) + // onedrive info flags fs.StringVar( @@ -97,6 +100,9 @@ const ( oneDriveServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef +# Restore file with ID 98765abcdef along with its associated permissions +corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --restore-permissions + # Restore Alice's file named "FY2021 Planning.xlsx in "Documents/Finance Reports" from a specific backup corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd \ --user alice@example.com --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports" diff --git a/src/cmd/factory/impl/common.go b/src/cmd/factory/impl/common.go index 0ea6835dd..78a5dca0e 100644 --- a/src/cmd/factory/impl/common.go +++ b/src/cmd/factory/impl/common.go @@ -16,6 +16,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/mockconnector" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/credentials" @@ -50,6 +51,7 @@ func generateAndRestoreItems( tenantID, userID, destFldr string, howMany int, dbf dataBuilderFunc, + opts control.Options, ) (*details.Details, error) { items := make([]item, 0, howMany) @@ -74,7 +76,7 @@ func generateAndRestoreItems( items: items, }} - // TODO: fit the desination to the containers + // TODO: fit the destination to the containers dest := control.DefaultRestoreDestination(common.SimpleTimeTesting) dest.ContainerName = destFldr @@ -90,7 +92,7 @@ func generateAndRestoreItems( Infof(ctx, "Generating %d %s items in %s\n", howMany, cat, Destination) - return gc.RestoreDataCollections(ctx, acct, sel, dest, dataColls) + return gc.RestoreDataCollections(ctx, backup.Version, acct, sel, dest, opts, dataColls) } // ------------------------------------------------------------------------------------------ diff --git a/src/cmd/factory/impl/exchange.go b/src/cmd/factory/impl/exchange.go index 26f7eef09..39e3c13a1 100644 --- a/src/cmd/factory/impl/exchange.go +++ b/src/cmd/factory/impl/exchange.go @@ -6,6 +6,7 @@ import ( . "github.com/alcionai/corso/src/cli/print" "github.com/alcionai/corso/src/cli/utils" "github.com/alcionai/corso/src/internal/connector/mockconnector" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -67,6 +68,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error { subject, body, body, now, now, now, now) }, + control.Options{}, ) if err != nil { return Only(ctx, err) @@ -107,6 +109,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error User, subject, body, body, now, now, false) }, + control.Options{}, ) if err != nil { return Only(ctx, err) @@ -152,6 +155,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error { "123-456-7890", ) }, + control.Options{}, ) if err != nil { return Only(ctx, err) diff --git a/src/go.mod b/src/go.mod index a8054ab0e..bf43f6494 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,8 +4,8 @@ go 1.19 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 - github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40 - github.com/aws/aws-sdk-go v1.44.190 + github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 + github.com/aws/aws-sdk-go v1.44.192 github.com/aws/aws-xray-sdk-go v1.8.0 github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 @@ -71,7 +71,6 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.12 // indirect diff --git a/src/go.sum b/src/go.sum index 72af64b2d..17d6f25bd 100644 --- a/src/go.sum +++ b/src/go.sum @@ -52,8 +52,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40 h1:bvAwz0dcJeIyRjudVyzmmawOvc4SqlSerKd0B4dh0yw= -github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4= +github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 h1:eTgICcmcydEWG8J+hgnidf0pzujV3Gd2XqmknykZkzA= +github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/aws/aws-sdk-go v1.44.190 h1:QC+Pf/Ooj7Waf2obOPZbIQOqr00hy4h54j3ZK9mvHcc= -github.com/aws/aws-sdk-go v1.44.190/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM= +github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY= github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -209,8 +209,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf h1:FtEj8sfIcaaBfAKrE1Cwb61YDtYq9JxChK1c7AKce7s= -github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf/go.mod h1:yrqSXGoD/4EKfF26AOGzscPOgTTJcyAwM2rpixWT+t4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= diff --git a/src/internal/connector/data_collections.go b/src/internal/connector/data_collections.go index 7d187a854..410a05462 100644 --- a/src/internal/connector/data_collections.go +++ b/src/internal/connector/data_collections.go @@ -83,7 +83,7 @@ func (gc *GraphConnector) DataCollections( return colls, excludes, nil case selectors.ServiceOneDrive: - return gc.OneDriveDataCollections(ctx, sels, ctrlOpts) + return gc.OneDriveDataCollections(ctx, sels, metadata, ctrlOpts) case selectors.ServiceSharePoint: colls, excludes, err := sharepoint.DataCollections( @@ -182,6 +182,7 @@ func (fm odFolderMatcher) Matches(dir string) bool { func (gc *GraphConnector) OneDriveDataCollections( ctx context.Context, selector selectors.Selector, + metadata []data.Collection, ctrlOpts control.Options, ) ([]data.Collection, map[string]struct{}, error) { odb, err := selector.ToOneDriveBackup() @@ -209,7 +210,7 @@ func (gc *GraphConnector) OneDriveDataCollections( gc.Service, gc.UpdateStatus, ctrlOpts, - ).Get(ctx) + ).Get(ctx, metadata) if err != nil { return nil, nil, support.WrapAndAppend(user, err, errs) } diff --git a/src/internal/connector/discovery/api/beta_service.go b/src/internal/connector/discovery/api/beta_service.go index df2b1533b..0208ace69 100644 --- a/src/internal/connector/discovery/api/beta_service.go +++ b/src/internal/connector/discovery/api/beta_service.go @@ -1,32 +1,33 @@ package api import ( - "github.com/alcionai/corso/src/internal/connector/graph/betasdk" absser "github.com/microsoft/kiota-abstractions-go/serialization" msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/pkg/errors" + + "github.com/alcionai/corso/src/internal/connector/graph/betasdk" ) // Service wraps BetaClient's functionality. // Abstraction created to comply loosely with graph.Servicer // methods for ease of switching between v1.0 and beta connnectors -type Service struct { +type BetaService struct { client *betasdk.BetaClient } -func (s Service) Client() *betasdk.BetaClient { +func (s BetaService) Client() *betasdk.BetaClient { return s.client } -func NewBetaService(adpt *msgraphsdk.GraphRequestAdapter) *Service { - return &Service{ +func NewBetaService(adpt *msgraphsdk.GraphRequestAdapter) *BetaService { + return &BetaService{ client: betasdk.NewBetaClient(adpt), } } // Seraialize writes an M365 parsable object into a byte array using the built-in // application/json writer within the adapter. -func (s Service) Serialize(object absser.Parsable) ([]byte, error) { +func (s BetaService) Serialize(object absser.Parsable) ([]byte, error) { writer, err := s.client.Adapter(). GetSerializationWriterFactory(). GetSerializationWriter("application/json") diff --git a/src/internal/connector/discovery/api/users.go b/src/internal/connector/discovery/api/users.go index ff41ee06f..c08297a9e 100644 --- a/src/internal/connector/discovery/api/users.go +++ b/src/internal/connector/discovery/api/users.go @@ -3,6 +3,7 @@ package api import ( "context" + absser "github.com/microsoft/kiota-abstractions-go" msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/users" @@ -58,14 +59,27 @@ const ( // require more fine-tuned controls in the future. // https://stackoverflow.com/questions/64044266/error-message-unsupported-or-invalid-query-filter-clause-specified-for-property // +// ne 'Guest' ensures we don't filter out users where userType = null, which can happen +// for user accounts created prior to 2014. In order to use the `ne` comparator, we +// MUST include $count=true and the ConsistencyLevel: eventual header. +// https://stackoverflow.com/questions/49340485/how-to-filter-users-by-usertype-null +// //nolint:lll -var userFilterNoGuests = "onPremisesSyncEnabled eq true OR userType eq 'Member'" +var userFilterNoGuests = "onPremisesSyncEnabled eq true OR userType ne 'Guest'" + +// I can't believe I have to do this. +var t = true func userOptions(fs *string) *users.UsersRequestBuilderGetRequestConfiguration { + headers := absser.NewRequestHeaders() + headers.Add("ConsistencyLevel", "eventual") + return &users.UsersRequestBuilderGetRequestConfiguration{ + Headers: headers, QueryParameters: &users.UsersRequestBuilderGetQueryParameters{ Select: []string{userSelectID, userSelectPrincipalName, userSelectDisplayName}, Filter: fs, + Count: &t, }, } } @@ -77,7 +91,13 @@ func (c Users) GetAll(ctx context.Context) ([]models.Userable, error) { return nil, err } - resp, err := service.Client().Users().Get(ctx, userOptions(&userFilterNoGuests)) + var resp models.UserCollectionResponseable + + err = graph.RunWithRetry(func() error { + resp, err = service.Client().Users().Get(ctx, userOptions(&userFilterNoGuests)) + return err + }) + if err != nil { return nil, support.ConnectorStackErrorTraceWrap(err, "getting all users") } @@ -114,22 +134,37 @@ func (c Users) GetAll(ctx context.Context) ([]models.Userable, error) { } func (c Users) GetByID(ctx context.Context, userID string) (models.Userable, error) { - user, err := c.stable.Client().UsersById(userID).Get(ctx, nil) + var ( + resp models.Userable + err error + ) + + err = graph.RunWithRetry(func() error { + resp, err = c.stable.Client().UsersById(userID).Get(ctx, nil) + return err + }) + if err != nil { return nil, support.ConnectorStackErrorTraceWrap(err, "getting user by id") } - return user, nil + return resp, err } func (c Users) GetInfo(ctx context.Context, userID string) (*UserInfo, error) { // Assume all services are enabled // then filter down to only services the user has enabled - userInfo := newUserInfo() + var ( + err error + userInfo = newUserInfo() + ) // TODO: OneDrive + err = graph.RunWithRetry(func() error { + _, err = c.stable.Client().UsersById(userID).MailFolders().Get(ctx, nil) + return err + }) - _, err := c.stable.Client().UsersById(userID).MailFolders().Get(ctx, nil) if err != nil { if !graph.IsErrExchangeMailFolderNotFound(err) { return nil, support.ConnectorStackErrorTraceWrap(err, "getting user's exchange mailfolders") diff --git a/src/internal/connector/exchange/api/contacts.go b/src/internal/connector/exchange/api/contacts.go index 0db1e964c..ac4afeb34 100644 --- a/src/internal/connector/exchange/api/contacts.go +++ b/src/internal/connector/exchange/api/contacts.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/alcionai/clues" "github.com/hashicorp/go-multierror" "github.com/microsoft/kiota-abstractions-go/serialization" kioser "github.com/microsoft/kiota-serialization-json-go" @@ -16,6 +17,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/graph/api" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/pkg/backup/details" + "github.com/alcionai/corso/src/pkg/selectors" ) // --------------------------------------------------------------------------- @@ -61,7 +63,16 @@ func (c Contacts) GetItem( ctx context.Context, user, itemID string, ) (serialization.Parsable, *details.ExchangeInfo, error) { - cont, err := c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil) + var ( + cont models.Contactable + err error + ) + + err = graph.RunWithRetry(func() error { + cont, err = c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil) + return err + }) + if err != nil { return nil, nil, err } @@ -81,7 +92,14 @@ func (c Contacts) GetAllContactFolderNamesForUser( return nil, err } - return c.stable.Client().UsersById(user).ContactFolders().Get(ctx, options) + var resp models.ContactFolderCollectionResponseable + + err = graph.RunWithRetry(func() error { + resp, err = c.stable.Client().UsersById(user).ContactFolders().Get(ctx, options) + return err + }) + + return resp, err } func (c Contacts) GetContainerByID( @@ -93,10 +111,14 @@ func (c Contacts) GetContainerByID( return nil, errors.Wrap(err, "options for contact folder") } - return c.stable.Client(). - UsersById(userID). - ContactFoldersById(dirID). - Get(ctx, ofcf) + var resp models.ContactFolderable + + err = graph.RunWithRetry(func() error { + resp, err = c.stable.Client().UsersById(userID).ContactFoldersById(dirID).Get(ctx, ofcf) + return err + }) + + return resp, err } // EnumerateContainers iterates through all of the users current @@ -117,6 +139,7 @@ func (c Contacts) EnumerateContainers( var ( errs *multierror.Error + resp models.ContactFolderCollectionResponseable fields = []string{"displayName", "parentFolderId"} ) @@ -131,7 +154,11 @@ func (c Contacts) EnumerateContainers( ChildFolders() for { - resp, err := builder.Get(ctx, ofcf) + err = graph.RunWithRetry(func() error { + resp, err = builder.Get(ctx, ofcf) + return err + }) + if err != nil { return errors.Wrap(err, support.ConnectorStackErrorTrace(err)) } @@ -174,7 +201,17 @@ type contactPager struct { } func (p *contactPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { - return p.builder.Get(ctx, p.options) + var ( + resp api.DeltaPageLinker + err error + ) + + err = graph.RunWithRetry(func() error { + resp, err = p.builder.Get(ctx, p.options) + return err + }) + + return resp, err } func (p *contactPager) setNext(nextLink string) { @@ -199,6 +236,11 @@ func (c Contacts) GetAddedAndRemovedItemIDs( resetDelta bool ) + ctx = clues.AddAll( + ctx, + "category", selectors.ExchangeContact, + "folder_id", directoryID) + options, err := optionsForContactFoldersItemDelta([]string{"parentFolderId"}) if err != nil { return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options") diff --git a/src/internal/connector/exchange/api/events.go b/src/internal/connector/exchange/api/events.go index e643c1f89..b9c16f319 100644 --- a/src/internal/connector/exchange/api/events.go +++ b/src/internal/connector/exchange/api/events.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/alcionai/clues" "github.com/hashicorp/go-multierror" "github.com/microsoft/kiota-abstractions-go/serialization" kioser "github.com/microsoft/kiota-serialization-json-go" @@ -19,6 +20,7 @@ import ( "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" + "github.com/alcionai/corso/src/pkg/selectors" ) // --------------------------------------------------------------------------- @@ -73,7 +75,13 @@ func (c Events) GetContainerByID( return nil, errors.Wrap(err, "options for event calendar") } - cal, err := service.Client().UsersById(userID).CalendarsById(containerID).Get(ctx, ofc) + var cal models.Calendarable + + err = graph.RunWithRetry(func() error { + cal, err = service.Client().UsersById(userID).CalendarsById(containerID).Get(ctx, ofc) + return err + }) + if err != nil { return nil, err } @@ -86,12 +94,28 @@ func (c Events) GetItem( ctx context.Context, user, itemID string, ) (serialization.Parsable, *details.ExchangeInfo, error) { - event, err := c.stable.Client().UsersById(user).EventsById(itemID).Get(ctx, nil) + var ( + event models.Eventable + err error + ) + + err = graph.RunWithRetry(func() error { + event, err = c.stable.Client().UsersById(user).EventsById(itemID).Get(ctx, nil) + return err + }) + if err != nil { return nil, nil, err } - var errs *multierror.Error + var ( + errs *multierror.Error + options = &users.ItemEventsItemAttachmentsRequestBuilderGetRequestConfiguration{ + QueryParameters: &users.ItemEventsItemAttachmentsRequestBuilderGetQueryParameters{ + Expand: []string{"microsoft.graph.itemattachment/item"}, + }, + } + ) if *event.GetHasAttachments() || HasAttachments(event.GetBody()) { for count := 0; count < numberOfRetries; count++ { @@ -100,7 +124,7 @@ func (c Events) GetItem( UsersById(user). EventsById(itemID). Attachments(). - Get(ctx, nil) + Get(ctx, options) if err == nil { event.SetAttachments(attached.GetValue()) break @@ -128,7 +152,14 @@ func (c Client) GetAllCalendarNamesForUser( return nil, err } - return c.stable.Client().UsersById(user).Calendars().Get(ctx, options) + var resp models.CalendarCollectionResponseable + + err = graph.RunWithRetry(func() error { + resp, err = c.stable.Client().UsersById(user).Calendars().Get(ctx, options) + return err + }) + + return resp, err } // EnumerateContainers iterates through all of the users current @@ -147,7 +178,10 @@ func (c Events) EnumerateContainers( return err } - var errs *multierror.Error + var ( + resp models.CalendarCollectionResponseable + errs *multierror.Error + ) ofc, err := optionsForCalendars([]string{"name"}) if err != nil { @@ -157,7 +191,13 @@ func (c Events) EnumerateContainers( builder := service.Client().UsersById(userID).Calendars() for { - resp, err := builder.Get(ctx, ofc) + var err error + + err = graph.RunWithRetry(func() error { + resp, err = builder.Get(ctx, ofc) + return err + }) + if err != nil { return errors.Wrap(err, support.ConnectorStackErrorTrace(err)) } @@ -205,7 +245,16 @@ type eventPager struct { } func (p *eventPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { - resp, err := p.builder.Get(ctx, p.options) + var ( + resp api.DeltaPageLinker + err error + ) + + err = graph.RunWithRetry(func() error { + resp, err = p.builder.Get(ctx, p.options) + return err + }) + return resp, err } @@ -231,6 +280,11 @@ func (c Events) GetAddedAndRemovedItemIDs( errs *multierror.Error ) + ctx = clues.AddAll( + ctx, + "category", selectors.ExchangeEvent, + "calendar_id", calendarID) + if len(oldDelta) > 0 { builder := users.NewItemCalendarsItemEventsDeltaRequestBuilder(oldDelta, service.Adapter()) pgr := &eventPager{service, builder, nil} diff --git a/src/internal/connector/exchange/api/mail.go b/src/internal/connector/exchange/api/mail.go index bbac48a66..01a485fbb 100644 --- a/src/internal/connector/exchange/api/mail.go +++ b/src/internal/connector/exchange/api/mail.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/alcionai/clues" "github.com/hashicorp/go-multierror" "github.com/microsoft/kiota-abstractions-go/serialization" kioser "github.com/microsoft/kiota-serialization-json-go" @@ -17,6 +18,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/logger" + "github.com/alcionai/corso/src/pkg/selectors" ) // --------------------------------------------------------------------------- @@ -95,7 +97,14 @@ func (c Mail) GetContainerByID( return nil, errors.Wrap(err, "options for mail folder") } - return service.Client().UsersById(userID).MailFoldersById(dirID).Get(ctx, ofmf) + var resp graph.Container + + err = graph.RunWithRetry(func() error { + resp, err = service.Client().UsersById(userID).MailFoldersById(dirID).Get(ctx, ofmf) + return err + }) + + return resp, err } // GetItem retrieves a Messageable item. If the item contains an attachment, that @@ -104,7 +113,16 @@ func (c Mail) GetItem( ctx context.Context, user, itemID string, ) (serialization.Parsable, *details.ExchangeInfo, error) { - mail, err := c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil) + var ( + mail models.Messageable + err error + ) + + err = graph.RunWithRetry(func() error { + mail, err = c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil) + return err + }) + if err != nil { return nil, nil, err } @@ -112,13 +130,18 @@ func (c Mail) GetItem( var errs *multierror.Error if *mail.GetHasAttachments() || HasAttachments(mail.GetBody()) { + options := &users.ItemMessagesItemAttachmentsRequestBuilderGetRequestConfiguration{ + QueryParameters: &users.ItemMessagesItemAttachmentsRequestBuilderGetQueryParameters{ + Expand: []string{"microsoft.graph.itemattachment/item"}, + }, + } for count := 0; count < numberOfRetries; count++ { attached, err := c.largeItem. Client(). UsersById(user). MessagesById(itemID). Attachments(). - Get(ctx, nil) + Get(ctx, options) if err == nil { mail.SetAttachments(attached.GetValue()) break @@ -154,6 +177,7 @@ func (c Mail) EnumerateContainers( } var ( + resp users.ItemMailFoldersDeltaResponseable errs *multierror.Error builder = service.Client(). UsersById(userID). @@ -162,7 +186,13 @@ func (c Mail) EnumerateContainers( ) for { - resp, err := builder.Get(ctx, nil) + var err error + + err = graph.RunWithRetry(func() error { + resp, err = builder.Get(ctx, nil) + return err + }) + if err != nil { return errors.Wrap(err, support.ConnectorStackErrorTrace(err)) } @@ -200,7 +230,17 @@ type mailPager struct { } func (p *mailPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) { - return p.builder.Get(ctx, p.options) + var ( + page api.DeltaPageLinker + err error + ) + + err = graph.RunWithRetry(func() error { + page, err = p.builder.Get(ctx, p.options) + return err + }) + + return page, err } func (p *mailPager) setNext(nextLink string) { @@ -226,6 +266,11 @@ func (c Mail) GetAddedAndRemovedItemIDs( resetDelta bool ) + ctx = clues.AddAll( + ctx, + "category", selectors.ExchangeMail, + "folder_id", directoryID) + options, err := optionsForFolderMessagesDelta([]string{"isRead"}) if err != nil { return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options") diff --git a/src/internal/connector/exchange/api/options.go b/src/internal/connector/exchange/api/options.go index 49debf334..67725225f 100644 --- a/src/internal/connector/exchange/api/options.go +++ b/src/internal/connector/exchange/api/options.go @@ -3,6 +3,7 @@ package api import ( "fmt" + abstractions "github.com/microsoft/kiota-abstractions-go" "github.com/microsoftgraph/msgraph-sdk-go/users" ) @@ -53,6 +54,16 @@ var ( } ) +const ( + // headerKeyPrefer is used to set query preferences + headerKeyPrefer = "Prefer" + // maxPageSizeHeaderFmt is used to indicate max page size + // preferences + maxPageSizeHeaderFmt = "odata.maxpagesize=%d" + // deltaMaxPageSize is the max page size to use for delta queries + deltaMaxPageSize = 200 +) + // ----------------------------------------------------------------------- // exchange.Query Option Section // These functions can be used to filter a response on M365 @@ -71,8 +82,10 @@ func optionsForFolderMessagesDelta( requestParameters := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{ Select: selecting, } + options := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{ QueryParameters: requestParameters, + Headers: buildDeltaRequestHeaders(), } return options, nil @@ -218,6 +231,7 @@ func optionsForContactFoldersItemDelta( options := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration{ QueryParameters: requestParameters, + Headers: buildDeltaRequestHeaders(), } return options, nil @@ -275,3 +289,11 @@ func buildOptions(fields []string, allowed map[string]struct{}) ([]string, error return append(returnedOptions, fields...), nil } + +// buildDeltaRequestHeaders returns the headers we add to delta page requests +func buildDeltaRequestHeaders() *abstractions.RequestHeaders { + headers := abstractions.NewRequestHeaders() + headers.Add(headerKeyPrefer, fmt.Sprintf(maxPageSizeHeaderFmt, deltaMaxPageSize)) + + return headers +} diff --git a/src/internal/connector/exchange/api/shared.go b/src/internal/connector/exchange/api/shared.go index d89ce7411..e4d563e90 100644 --- a/src/internal/connector/exchange/api/shared.go +++ b/src/internal/connector/exchange/api/shared.go @@ -8,6 +8,7 @@ import ( "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph/api" "github.com/alcionai/corso/src/internal/connector/support" + "github.com/alcionai/corso/src/pkg/logger" ) // --------------------------------------------------------------------------- @@ -64,6 +65,9 @@ func getItemsAddedAndRemovedFromContainer( deltaURL string ) + itemCount := 0 + page := 0 + for { // get the next page of data, check for standard errors resp, err := pager.getPage(ctx) @@ -82,6 +86,14 @@ func getItemsAddedAndRemovedFromContainer( return nil, nil, "", err } + itemCount += len(items) + page++ + + // Log every ~1000 items (the page size we use is 200) + if page%5 == 0 { + logger.Ctx(ctx).Infow("queried items", "count", itemCount) + } + // iterate through the items in the page for _, item := range items { // if the additional data conains a `@removed` key, the value will either @@ -114,5 +126,7 @@ func getItemsAddedAndRemovedFromContainer( pager.setNext(nextLink) } + logger.Ctx(ctx).Infow("completed enumeration", "count", itemCount) + return addedIDs, removedIDs, deltaURL, nil } diff --git a/src/internal/connector/exchange/attachment.go b/src/internal/connector/exchange/attachment.go index 5cbce271c..94e6dbc6a 100644 --- a/src/internal/connector/exchange/attachment.go +++ b/src/internal/connector/exchange/attachment.go @@ -8,6 +8,7 @@ import ( "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" + "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/connector/uploadsession" "github.com/alcionai/corso/src/pkg/logger" ) @@ -44,8 +45,11 @@ func uploadAttachment( attachment models.Attachmentable, ) error { logger.Ctx(ctx).Debugf("uploading attachment with size %d", *attachment.GetSize()) - attachmentType := attachmentType(attachment) + var ( + attachmentType = attachmentType(attachment) + err error + ) // Reference attachments that are inline() do not need to be recreated. The contents are part of the body. if attachmentType == models.REFERENCE_ATTACHMENTTYPE && attachment.GetIsInline() != nil && *attachment.GetIsInline() { @@ -53,6 +57,30 @@ func uploadAttachment( return nil } + // item Attachments to be skipped until the completion of Issue #2353 + if attachmentType == models.ITEM_ATTACHMENTTYPE { + prev := attachment + + attachment, err = support.ToItemAttachment(attachment) + if err != nil { + name := "" + if prev.GetName() != nil { + name = *prev.GetName() + } + + // TODO: Update to support PII protection + logger.Ctx(ctx).Infow("item attachment uploads are not supported ", + "err", err, + "attachment_name", name, + "attachment_type", attachmentType, + "internal_item_type", getItemAttachmentItemType(prev), + "attachment_id", *prev.GetId(), + ) + + return nil + } + } + // For Item/Reference attachments *or* file attachments < 3MB, use the attachments endpoint if attachmentType != models.FILE_ATTACHMENTTYPE || *attachment.GetSize() < largeAttachmentSize { err := uploader.uploadSmallAttachment(ctx, attachment) @@ -90,3 +118,19 @@ func uploadLargeAttachment(ctx context.Context, uploader attachmentUploadable, return nil } + +func getItemAttachmentItemType(query models.Attachmentable) string { + empty := "" + attachment, ok := query.(models.ItemAttachmentable) + + if !ok { + return empty + } + + item := attachment.GetItem() + if item.GetOdataType() == nil { + return empty + } + + return *item.GetOdataType() +} diff --git a/src/internal/connector/exchange/container_resolver_test.go b/src/internal/connector/exchange/container_resolver_test.go index d7c6651f1..be0704f46 100644 --- a/src/internal/connector/exchange/container_resolver_test.go +++ b/src/internal/connector/exchange/container_resolver_test.go @@ -501,10 +501,11 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() { directoryCaches = make(map[path.CategoryType]graph.ContainerResolver) folderName = tester.DefaultTestRestoreDestination().ContainerName tests = []struct { - name string - pathFunc1 func(t *testing.T) path.Path - pathFunc2 func(t *testing.T) path.Path - category path.CategoryType + name string + pathFunc1 func(t *testing.T) path.Path + pathFunc2 func(t *testing.T) path.Path + category path.CategoryType + folderPrefix string }{ { name: "Mail Cache Test", @@ -587,6 +588,7 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() { require.NoError(t, err) return aPath }, + folderPrefix: calendarOthersFolder, }, } ) @@ -617,8 +619,9 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() { _, err = resolver.IDToPath(ctx, secondID) require.NoError(t, err) - _, ok := resolver.PathInCache(folderName) - require.True(t, ok) + p := stdpath.Join(test.folderPrefix, folderName) + _, ok := resolver.PathInCache(p) + require.True(t, ok, "looking for path in cache: %s", p) }) } } diff --git a/src/internal/connector/exchange/data_collections_test.go b/src/internal/connector/exchange/data_collections_test.go index 70f413239..07eef5e7a 100644 --- a/src/internal/connector/exchange/data_collections_test.go +++ b/src/internal/connector/exchange/data_collections_test.go @@ -537,9 +537,9 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression( }, { name: "Birthday Calendar", - expected: "Birthdays", + expected: calendarOthersFolder + "/Birthdays", scope: selectors.NewExchangeBackup(users).EventCalendars( - []string{"Birthdays"}, + []string{calendarOthersFolder + "/Birthdays"}, selectors.PrefixMatch(), )[0], }, diff --git a/src/internal/connector/exchange/event_calendar_cache.go b/src/internal/connector/exchange/event_calendar_cache.go index e497a272a..0377433ee 100644 --- a/src/internal/connector/exchange/event_calendar_cache.go +++ b/src/internal/connector/exchange/event_calendar_cache.go @@ -64,7 +64,15 @@ func (ecc *eventCalendarCache) Populate( return errors.Wrap(err, "initializing") } - err := ecc.enumer.EnumerateContainers(ctx, ecc.userID, "", ecc.addFolder) + err := ecc.enumer.EnumerateContainers( + ctx, + ecc.userID, + "", + func(cf graph.CacheFolder) error { + cf.SetPath(path.Builder{}.Append(calendarOthersFolder, *cf.GetDisplayName())) + return ecc.addFolder(cf) + }, + ) if err != nil { return errors.Wrap(err, "enumerating containers") } @@ -83,7 +91,7 @@ func (ecc *eventCalendarCache) AddToCache(ctx context.Context, f graph.Container return errors.Wrap(err, "validating container") } - temp := graph.NewCacheFolder(f, path.Builder{}.Append(*f.GetDisplayName())) + temp := graph.NewCacheFolder(f, path.Builder{}.Append(calendarOthersFolder, *f.GetDisplayName())) if err := ecc.addFolder(temp); err != nil { return errors.Wrap(err, "adding container") diff --git a/src/internal/connector/exchange/exchange_vars.go b/src/internal/connector/exchange/exchange_vars.go index e45de0bf0..988d20330 100644 --- a/src/internal/connector/exchange/exchange_vars.go +++ b/src/internal/connector/exchange/exchange_vars.go @@ -38,4 +38,5 @@ const ( rootFolderAlias = "msgfolderroot" DefaultContactFolder = "Contacts" DefaultCalendar = "Calendar" + calendarOthersFolder = "Other Calendars" ) diff --git a/src/internal/connector/exchange/restore_test.go b/src/internal/connector/exchange/restore_test.go index 9c32fd530..360d15266 100644 --- a/src/internal/connector/exchange/restore_test.go +++ b/src/internal/connector/exchange/restore_test.go @@ -175,6 +175,30 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { return *folder.GetId() }, }, + { + name: "Test Mail: Item Attachment_Event", + bytes: mockconnector.GetMockMessageWithItemAttachmentEvent("Event Item Attachment"), + category: path.EmailCategory, + destination: func(t *testing.T, ctx context.Context) string { + folderName := "TestRestoreEventItemAttachment: " + common.FormatSimpleDateTime(now) + folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) + require.NoError(t, err) + + return *folder.GetId() + }, + }, + { // Restore will upload the Message without uploading the attachment + name: "Test Mail: Item Attachment_NestedEvent", + bytes: mockconnector.GetMockMessageWithNestedItemAttachmentEvent("Nested Item Attachment"), + category: path.EmailCategory, + destination: func(t *testing.T, ctx context.Context) string { + folderName := "TestRestoreNestedEventItemAttachment: " + common.FormatSimpleDateTime(now) + folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName) + require.NoError(t, err) + + return *folder.GetId() + }, + }, { name: "Test Mail: One Large Attachment", bytes: mockconnector.GetMockMessageWithLargeAttachment("Restore Large Attachment"), @@ -266,7 +290,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() { userID, ) assert.NoError(t, err, support.ConnectorStackErrorTrace(err)) - assert.NotNil(t, info, "item info is populated") + assert.NotNil(t, info, "item info was not populated") assert.NoError(t, deleters[test.category].DeleteContainer(ctx, userID, destination)) }) } diff --git a/src/internal/connector/exchange/service_restore.go b/src/internal/connector/exchange/service_restore.go index e1144249a..e6fa592f7 100644 --- a/src/internal/connector/exchange/service_restore.go +++ b/src/internal/connector/exchange/service_restore.go @@ -189,23 +189,32 @@ func RestoreMailMessage( // 1st: No transmission // 2nd: Send Date // 3rd: Recv Date + svlep := make([]models.SingleValueLegacyExtendedPropertyable, 0) sv1 := models.NewSingleValueLegacyExtendedProperty() sv1.SetId(&valueID) sv1.SetValue(&enableValue) + svlep = append(svlep, sv1) - sv2 := models.NewSingleValueLegacyExtendedProperty() - sendPropertyValue := common.FormatLegacyTime(*clone.GetSentDateTime()) - sendPropertyTag := MailSendDateTimeOverrideProperty - sv2.SetId(&sendPropertyTag) - sv2.SetValue(&sendPropertyValue) + if clone.GetSentDateTime() != nil { + sv2 := models.NewSingleValueLegacyExtendedProperty() + sendPropertyValue := common.FormatLegacyTime(*clone.GetSentDateTime()) + sendPropertyTag := MailSendDateTimeOverrideProperty + sv2.SetId(&sendPropertyTag) + sv2.SetValue(&sendPropertyValue) - sv3 := models.NewSingleValueLegacyExtendedProperty() - recvPropertyValue := common.FormatLegacyTime(*clone.GetReceivedDateTime()) - recvPropertyTag := MailReceiveDateTimeOverriveProperty - sv3.SetId(&recvPropertyTag) - sv3.SetValue(&recvPropertyValue) + svlep = append(svlep, sv2) + } + + if clone.GetReceivedDateTime() != nil { + sv3 := models.NewSingleValueLegacyExtendedProperty() + recvPropertyValue := common.FormatLegacyTime(*clone.GetReceivedDateTime()) + recvPropertyTag := MailReceiveDateTimeOverriveProperty + sv3.SetId(&recvPropertyTag) + sv3.SetValue(&recvPropertyValue) + + svlep = append(svlep, sv3) + } - svlep := []models.SingleValueLegacyExtendedPropertyable{sv1, sv2, sv3} clone.SetSingleValueExtendedProperties(svlep) // Switch workflow based on collision policy @@ -248,10 +257,9 @@ func SendMailToBackStore( errs error ) - if *message.GetHasAttachments() { - attached = message.GetAttachments() - message.SetAttachments([]models.Attachmentable{}) - } + // Item.Attachments --> HasAttachments doesn't always have a value populated when deserialized + attached = message.GetAttachments() + message.SetAttachments([]models.Attachmentable{}) sentMessage, err := service.Client().UsersById(user).MailFoldersById(destination).Messages().Post(ctx, message, nil) if err != nil { @@ -637,7 +645,11 @@ func establishEventsRestoreLocation( user string, isNewCache bool, ) (string, error) { - cached, ok := ecc.PathInCache(folders[0]) + // Need to prefix with the "Other Calendars" folder so lookup happens properly. + cached, ok := ecc.PathInCache(path.Builder{}.Append( + calendarOthersFolder, + folders[0], + ).String()) if ok { return cached, nil } diff --git a/src/internal/connector/graph/betasdk/beta_client.go b/src/internal/connector/graph/betasdk/beta_client.go index f33b110d6..795c85e2a 100644 --- a/src/internal/connector/graph/betasdk/beta_client.go +++ b/src/internal/connector/graph/betasdk/beta_client.go @@ -1,13 +1,14 @@ package betasdk import ( - i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites" absser "github.com/microsoft/kiota-abstractions-go" kioser "github.com/microsoft/kiota-abstractions-go/serialization" kform "github.com/microsoft/kiota-serialization-form-go" kw "github.com/microsoft/kiota-serialization-json-go" ktext "github.com/microsoft/kiota-serialization-text-go" msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" + + i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites" ) // BetaClient the main entry point of the SDK, exposes the configuration and the fluent API. diff --git a/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go b/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go index 43c2643a2..80e208ffe 100644 --- a/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go +++ b/src/internal/connector/graph/betasdk/models/horizontal_section_layout_type.go @@ -1,52 +1,54 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the remove method. type HorizontalSectionLayoutType int const ( - NONE_HORIZONTALSECTIONLAYOUTTYPE HorizontalSectionLayoutType = iota - ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE - TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE - THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE - ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE - ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE - FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE - UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE + NONE_HORIZONTALSECTIONLAYOUTTYPE HorizontalSectionLayoutType = iota + ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE + TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE + THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE + ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE + ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE + FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE + UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE ) func (i HorizontalSectionLayoutType) String() string { - return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i] + return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i] } func ParseHorizontalSectionLayoutType(v string) (interface{}, error) { - result := NONE_HORIZONTALSECTIONLAYOUTTYPE - switch v { - case "none": - result = NONE_HORIZONTALSECTIONLAYOUTTYPE - case "oneColumn": - result = ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE - case "twoColumns": - result = TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE - case "threeColumns": - result = THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE - case "oneThirdLeftColumn": - result = ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE - case "oneThirdRightColumn": - result = ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE - case "fullWidth": - result = FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE - case "unknownFutureValue": - result = UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE - default: - return 0, errors.New("Unknown HorizontalSectionLayoutType value: " + v) - } - return &result, nil + result := NONE_HORIZONTALSECTIONLAYOUTTYPE + switch v { + case "none": + result = NONE_HORIZONTALSECTIONLAYOUTTYPE + case "oneColumn": + result = ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE + case "twoColumns": + result = TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE + case "threeColumns": + result = THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE + case "oneThirdLeftColumn": + result = ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE + case "oneThirdRightColumn": + result = ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE + case "fullWidth": + result = FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE + case "unknownFutureValue": + result = UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE + default: + return 0, errors.New("Unknown HorizontalSectionLayoutType value: " + v) + } + return &result, nil } func SerializeHorizontalSectionLayoutType(values []HorizontalSectionLayoutType) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/meta_data_key_string_pair.go b/src/internal/connector/graph/betasdk/models/meta_data_key_string_pair.go index e7df06165..c79f17cfb 100644 --- a/src/internal/connector/graph/betasdk/models/meta_data_key_string_pair.go +++ b/src/internal/connector/graph/betasdk/models/meta_data_key_string_pair.go @@ -1,123 +1,134 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// MetaDataKeyStringPair +// MetaDataKeyStringPair type MetaDataKeyStringPair struct { - // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. - additionalData map[string]interface{} - // Key of the meta data. - key *string - // The OdataType property - odataType *string - // Value of the meta data. - value *string + // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. + additionalData map[string]interface{} + // Key of the meta data. + key *string + // The OdataType property + odataType *string + // Value of the meta data. + value *string } + // NewMetaDataKeyStringPair instantiates a new metaDataKeyStringPair and sets the default values. -func NewMetaDataKeyStringPair()(*MetaDataKeyStringPair) { - m := &MetaDataKeyStringPair{ - } - m.SetAdditionalData(make(map[string]interface{})); - return m +func NewMetaDataKeyStringPair() *MetaDataKeyStringPair { + m := &MetaDataKeyStringPair{} + m.SetAdditionalData(make(map[string]interface{})) + return m } + // CreateMetaDataKeyStringPairFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value -func CreateMetaDataKeyStringPairFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { - return NewMetaDataKeyStringPair(), nil +func CreateMetaDataKeyStringPairFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { + return NewMetaDataKeyStringPair(), nil } + // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *MetaDataKeyStringPair) GetAdditionalData()(map[string]interface{}) { - return m.additionalData +func (m *MetaDataKeyStringPair) GetAdditionalData() map[string]interface{} { + return m.additionalData } + // GetFieldDeserializers the deserialization information for the current model -func (m *MetaDataKeyStringPair) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { - res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) - res["key"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetKey(val) - } - return nil - } - res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetOdataType(val) - } - return nil - } - res["value"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetValue(val) - } - return nil - } - return res +func (m *MetaDataKeyStringPair) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error) + res["key"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetKey(val) + } + return nil + } + res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetOdataType(val) + } + return nil + } + res["value"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetValue(val) + } + return nil + } + return res } + // GetKey gets the key property value. Key of the meta data. -func (m *MetaDataKeyStringPair) GetKey()(*string) { - return m.key +func (m *MetaDataKeyStringPair) GetKey() *string { + return m.key } + // GetOdataType gets the @odata.type property value. The OdataType property -func (m *MetaDataKeyStringPair) GetOdataType()(*string) { - return m.odataType +func (m *MetaDataKeyStringPair) GetOdataType() *string { + return m.odataType } + // GetValue gets the value property value. Value of the meta data. -func (m *MetaDataKeyStringPair) GetValue()(*string) { - return m.value +func (m *MetaDataKeyStringPair) GetValue() *string { + return m.value } + // Serialize serializes information the current object -func (m *MetaDataKeyStringPair) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { - { - err := writer.WriteStringValue("key", m.GetKey()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("@odata.type", m.GetOdataType()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("value", m.GetValue()) - if err != nil { - return err - } - } - { - err := writer.WriteAdditionalData(m.GetAdditionalData()) - if err != nil { - return err - } - } - return nil +func (m *MetaDataKeyStringPair) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error { + { + err := writer.WriteStringValue("key", m.GetKey()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("@odata.type", m.GetOdataType()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("value", m.GetValue()) + if err != nil { + return err + } + } + { + err := writer.WriteAdditionalData(m.GetAdditionalData()) + if err != nil { + return err + } + } + return nil } + // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *MetaDataKeyStringPair) SetAdditionalData(value map[string]interface{})() { - m.additionalData = value +func (m *MetaDataKeyStringPair) SetAdditionalData(value map[string]interface{}) { + m.additionalData = value } + // SetKey sets the key property value. Key of the meta data. -func (m *MetaDataKeyStringPair) SetKey(value *string)() { - m.key = value +func (m *MetaDataKeyStringPair) SetKey(value *string) { + m.key = value } + // SetOdataType sets the @odata.type property value. The OdataType property -func (m *MetaDataKeyStringPair) SetOdataType(value *string)() { - m.odataType = value +func (m *MetaDataKeyStringPair) SetOdataType(value *string) { + m.odataType = value } + // SetValue sets the value property value. Value of the meta data. -func (m *MetaDataKeyStringPair) SetValue(value *string)() { - m.value = value +func (m *MetaDataKeyStringPair) SetValue(value *string) { + m.value = value } diff --git a/src/internal/connector/graph/betasdk/models/meta_data_key_string_pairable.go b/src/internal/connector/graph/betasdk/models/meta_data_key_string_pairable.go index 49908469e..4168f4dce 100644 --- a/src/internal/connector/graph/betasdk/models/meta_data_key_string_pairable.go +++ b/src/internal/connector/graph/betasdk/models/meta_data_key_string_pairable.go @@ -1,17 +1,17 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// MetaDataKeyStringPairable +// MetaDataKeyStringPairable type MetaDataKeyStringPairable interface { - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable - GetKey()(*string) - GetOdataType()(*string) - GetValue()(*string) - SetKey(value *string)() - SetOdataType(value *string)() - SetValue(value *string)() + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable + GetKey() *string + GetOdataType() *string + GetValue() *string + SetKey(value *string) + SetOdataType(value *string) + SetValue(value *string) } diff --git a/src/internal/connector/graph/betasdk/models/page_layout_type.go b/src/internal/connector/graph/betasdk/models/page_layout_type.go index 0338a5c30..fce795760 100644 --- a/src/internal/connector/graph/betasdk/models/page_layout_type.go +++ b/src/internal/connector/graph/betasdk/models/page_layout_type.go @@ -1,40 +1,42 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the remove method. type PageLayoutType int const ( - MICROSOFTRESERVED_PAGELAYOUTTYPE PageLayoutType = iota - ARTICLE_PAGELAYOUTTYPE - HOME_PAGELAYOUTTYPE - UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE + MICROSOFTRESERVED_PAGELAYOUTTYPE PageLayoutType = iota + ARTICLE_PAGELAYOUTTYPE + HOME_PAGELAYOUTTYPE + UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE ) func (i PageLayoutType) String() string { - return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i] + return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i] } func ParsePageLayoutType(v string) (interface{}, error) { - result := MICROSOFTRESERVED_PAGELAYOUTTYPE - switch v { - case "microsoftReserved": - result = MICROSOFTRESERVED_PAGELAYOUTTYPE - case "article": - result = ARTICLE_PAGELAYOUTTYPE - case "home": - result = HOME_PAGELAYOUTTYPE - case "unknownFutureValue": - result = UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE - default: - return 0, errors.New("Unknown PageLayoutType value: " + v) - } - return &result, nil + result := MICROSOFTRESERVED_PAGELAYOUTTYPE + switch v { + case "microsoftReserved": + result = MICROSOFTRESERVED_PAGELAYOUTTYPE + case "article": + result = ARTICLE_PAGELAYOUTTYPE + case "home": + result = HOME_PAGELAYOUTTYPE + case "unknownFutureValue": + result = UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE + default: + return 0, errors.New("Unknown PageLayoutType value: " + v) + } + return &result, nil } func SerializePageLayoutType(values []PageLayoutType) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/page_promotion_type.go b/src/internal/connector/graph/betasdk/models/page_promotion_type.go index a8cbcd058..e78ce63f0 100644 --- a/src/internal/connector/graph/betasdk/models/page_promotion_type.go +++ b/src/internal/connector/graph/betasdk/models/page_promotion_type.go @@ -1,40 +1,42 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the remove method. type PagePromotionType int const ( - MICROSOFTRESERVED_PAGEPROMOTIONTYPE PagePromotionType = iota - PAGE_PAGEPROMOTIONTYPE - NEWSPOST_PAGEPROMOTIONTYPE - UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE + MICROSOFTRESERVED_PAGEPROMOTIONTYPE PagePromotionType = iota + PAGE_PAGEPROMOTIONTYPE + NEWSPOST_PAGEPROMOTIONTYPE + UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE ) func (i PagePromotionType) String() string { - return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i] + return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i] } func ParsePagePromotionType(v string) (interface{}, error) { - result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE - switch v { - case "microsoftReserved": - result = MICROSOFTRESERVED_PAGEPROMOTIONTYPE - case "page": - result = PAGE_PAGEPROMOTIONTYPE - case "newsPost": - result = NEWSPOST_PAGEPROMOTIONTYPE - case "unknownFutureValue": - result = UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE - default: - return 0, errors.New("Unknown PagePromotionType value: " + v) - } - return &result, nil + result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE + switch v { + case "microsoftReserved": + result = MICROSOFTRESERVED_PAGEPROMOTIONTYPE + case "page": + result = PAGE_PAGEPROMOTIONTYPE + case "newsPost": + result = NEWSPOST_PAGEPROMOTIONTYPE + case "unknownFutureValue": + result = UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE + default: + return 0, errors.New("Unknown PagePromotionType value: " + v) + } + return &result, nil } func SerializePagePromotionType(values []PagePromotionType) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/publication_facet.go b/src/internal/connector/graph/betasdk/models/publication_facet.go index 87e59d34b..860b88bf3 100644 --- a/src/internal/connector/graph/betasdk/models/publication_facet.go +++ b/src/internal/connector/graph/betasdk/models/publication_facet.go @@ -1,123 +1,134 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// PublicationFacet +// PublicationFacet type PublicationFacet struct { - // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. - additionalData map[string]interface{} - // The state of publication for this document. Either published or checkout. Read-only. - level *string - // The OdataType property - odataType *string - // The unique identifier for the version that is visible to the current caller. Read-only. - versionId *string + // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. + additionalData map[string]interface{} + // The state of publication for this document. Either published or checkout. Read-only. + level *string + // The OdataType property + odataType *string + // The unique identifier for the version that is visible to the current caller. Read-only. + versionId *string } + // NewPublicationFacet instantiates a new publicationFacet and sets the default values. -func NewPublicationFacet()(*PublicationFacet) { - m := &PublicationFacet{ - } - m.SetAdditionalData(make(map[string]interface{})); - return m +func NewPublicationFacet() *PublicationFacet { + m := &PublicationFacet{} + m.SetAdditionalData(make(map[string]interface{})) + return m } + // CreatePublicationFacetFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value -func CreatePublicationFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { - return NewPublicationFacet(), nil +func CreatePublicationFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { + return NewPublicationFacet(), nil } + // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *PublicationFacet) GetAdditionalData()(map[string]interface{}) { - return m.additionalData +func (m *PublicationFacet) GetAdditionalData() map[string]interface{} { + return m.additionalData } + // GetFieldDeserializers the deserialization information for the current model -func (m *PublicationFacet) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { - res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) - res["level"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetLevel(val) - } - return nil - } - res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetOdataType(val) - } - return nil - } - res["versionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetVersionId(val) - } - return nil - } - return res +func (m *PublicationFacet) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error) + res["level"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetLevel(val) + } + return nil + } + res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetOdataType(val) + } + return nil + } + res["versionId"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetVersionId(val) + } + return nil + } + return res } + // GetLevel gets the level property value. The state of publication for this document. Either published or checkout. Read-only. -func (m *PublicationFacet) GetLevel()(*string) { - return m.level +func (m *PublicationFacet) GetLevel() *string { + return m.level } + // GetOdataType gets the @odata.type property value. The OdataType property -func (m *PublicationFacet) GetOdataType()(*string) { - return m.odataType +func (m *PublicationFacet) GetOdataType() *string { + return m.odataType } + // GetVersionId gets the versionId property value. The unique identifier for the version that is visible to the current caller. Read-only. -func (m *PublicationFacet) GetVersionId()(*string) { - return m.versionId +func (m *PublicationFacet) GetVersionId() *string { + return m.versionId } + // Serialize serializes information the current object -func (m *PublicationFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { - { - err := writer.WriteStringValue("level", m.GetLevel()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("@odata.type", m.GetOdataType()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("versionId", m.GetVersionId()) - if err != nil { - return err - } - } - { - err := writer.WriteAdditionalData(m.GetAdditionalData()) - if err != nil { - return err - } - } - return nil +func (m *PublicationFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error { + { + err := writer.WriteStringValue("level", m.GetLevel()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("@odata.type", m.GetOdataType()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("versionId", m.GetVersionId()) + if err != nil { + return err + } + } + { + err := writer.WriteAdditionalData(m.GetAdditionalData()) + if err != nil { + return err + } + } + return nil } + // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *PublicationFacet) SetAdditionalData(value map[string]interface{})() { - m.additionalData = value +func (m *PublicationFacet) SetAdditionalData(value map[string]interface{}) { + m.additionalData = value } + // SetLevel sets the level property value. The state of publication for this document. Either published or checkout. Read-only. -func (m *PublicationFacet) SetLevel(value *string)() { - m.level = value +func (m *PublicationFacet) SetLevel(value *string) { + m.level = value } + // SetOdataType sets the @odata.type property value. The OdataType property -func (m *PublicationFacet) SetOdataType(value *string)() { - m.odataType = value +func (m *PublicationFacet) SetOdataType(value *string) { + m.odataType = value } + // SetVersionId sets the versionId property value. The unique identifier for the version that is visible to the current caller. Read-only. -func (m *PublicationFacet) SetVersionId(value *string)() { - m.versionId = value +func (m *PublicationFacet) SetVersionId(value *string) { + m.versionId = value } diff --git a/src/internal/connector/graph/betasdk/models/publication_facetable.go b/src/internal/connector/graph/betasdk/models/publication_facetable.go index 20d82ccf8..4098c89b1 100644 --- a/src/internal/connector/graph/betasdk/models/publication_facetable.go +++ b/src/internal/connector/graph/betasdk/models/publication_facetable.go @@ -1,17 +1,17 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// PublicationFacetable +// PublicationFacetable type PublicationFacetable interface { - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable - GetLevel()(*string) - GetOdataType()(*string) - GetVersionId()(*string) - SetLevel(value *string)() - SetOdataType(value *string)() - SetVersionId(value *string)() + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable + GetLevel() *string + GetOdataType() *string + GetVersionId() *string + SetLevel(value *string) + SetOdataType(value *string) + SetVersionId(value *string) } diff --git a/src/internal/connector/graph/betasdk/models/reactions_facet.go b/src/internal/connector/graph/betasdk/models/reactions_facet.go index b298a9fe1..c971925dc 100644 --- a/src/internal/connector/graph/betasdk/models/reactions_facet.go +++ b/src/internal/connector/graph/betasdk/models/reactions_facet.go @@ -1,149 +1,162 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// ReactionsFacet +// ReactionsFacet type ReactionsFacet struct { - // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. - additionalData map[string]interface{} - // Count of comments. - commentCount *int32 - // Count of likes. - likeCount *int32 - // The OdataType property - odataType *string - // Count of shares. - shareCount *int32 + // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. + additionalData map[string]interface{} + // Count of comments. + commentCount *int32 + // Count of likes. + likeCount *int32 + // The OdataType property + odataType *string + // Count of shares. + shareCount *int32 } + // NewReactionsFacet instantiates a new reactionsFacet and sets the default values. -func NewReactionsFacet()(*ReactionsFacet) { - m := &ReactionsFacet{ - } - m.SetAdditionalData(make(map[string]interface{})); - return m +func NewReactionsFacet() *ReactionsFacet { + m := &ReactionsFacet{} + m.SetAdditionalData(make(map[string]interface{})) + return m } + // CreateReactionsFacetFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value -func CreateReactionsFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { - return NewReactionsFacet(), nil +func CreateReactionsFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { + return NewReactionsFacet(), nil } + // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *ReactionsFacet) GetAdditionalData()(map[string]interface{}) { - return m.additionalData +func (m *ReactionsFacet) GetAdditionalData() map[string]interface{} { + return m.additionalData } + // GetCommentCount gets the commentCount property value. Count of comments. -func (m *ReactionsFacet) GetCommentCount()(*int32) { - return m.commentCount +func (m *ReactionsFacet) GetCommentCount() *int32 { + return m.commentCount } + // GetFieldDeserializers the deserialization information for the current model -func (m *ReactionsFacet) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { - res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) - res["commentCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetInt32Value() - if err != nil { - return err - } - if val != nil { - m.SetCommentCount(val) - } - return nil - } - res["likeCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetInt32Value() - if err != nil { - return err - } - if val != nil { - m.SetLikeCount(val) - } - return nil - } - res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetOdataType(val) - } - return nil - } - res["shareCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetInt32Value() - if err != nil { - return err - } - if val != nil { - m.SetShareCount(val) - } - return nil - } - return res +func (m *ReactionsFacet) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error) + res["commentCount"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetInt32Value() + if err != nil { + return err + } + if val != nil { + m.SetCommentCount(val) + } + return nil + } + res["likeCount"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetInt32Value() + if err != nil { + return err + } + if val != nil { + m.SetLikeCount(val) + } + return nil + } + res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetOdataType(val) + } + return nil + } + res["shareCount"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetInt32Value() + if err != nil { + return err + } + if val != nil { + m.SetShareCount(val) + } + return nil + } + return res } + // GetLikeCount gets the likeCount property value. Count of likes. -func (m *ReactionsFacet) GetLikeCount()(*int32) { - return m.likeCount +func (m *ReactionsFacet) GetLikeCount() *int32 { + return m.likeCount } + // GetOdataType gets the @odata.type property value. The OdataType property -func (m *ReactionsFacet) GetOdataType()(*string) { - return m.odataType +func (m *ReactionsFacet) GetOdataType() *string { + return m.odataType } + // GetShareCount gets the shareCount property value. Count of shares. -func (m *ReactionsFacet) GetShareCount()(*int32) { - return m.shareCount +func (m *ReactionsFacet) GetShareCount() *int32 { + return m.shareCount } + // Serialize serializes information the current object -func (m *ReactionsFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { - { - err := writer.WriteInt32Value("commentCount", m.GetCommentCount()) - if err != nil { - return err - } - } - { - err := writer.WriteInt32Value("likeCount", m.GetLikeCount()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("@odata.type", m.GetOdataType()) - if err != nil { - return err - } - } - { - err := writer.WriteInt32Value("shareCount", m.GetShareCount()) - if err != nil { - return err - } - } - { - err := writer.WriteAdditionalData(m.GetAdditionalData()) - if err != nil { - return err - } - } - return nil +func (m *ReactionsFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error { + { + err := writer.WriteInt32Value("commentCount", m.GetCommentCount()) + if err != nil { + return err + } + } + { + err := writer.WriteInt32Value("likeCount", m.GetLikeCount()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("@odata.type", m.GetOdataType()) + if err != nil { + return err + } + } + { + err := writer.WriteInt32Value("shareCount", m.GetShareCount()) + if err != nil { + return err + } + } + { + err := writer.WriteAdditionalData(m.GetAdditionalData()) + if err != nil { + return err + } + } + return nil } + // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *ReactionsFacet) SetAdditionalData(value map[string]interface{})() { - m.additionalData = value +func (m *ReactionsFacet) SetAdditionalData(value map[string]interface{}) { + m.additionalData = value } + // SetCommentCount sets the commentCount property value. Count of comments. -func (m *ReactionsFacet) SetCommentCount(value *int32)() { - m.commentCount = value +func (m *ReactionsFacet) SetCommentCount(value *int32) { + m.commentCount = value } + // SetLikeCount sets the likeCount property value. Count of likes. -func (m *ReactionsFacet) SetLikeCount(value *int32)() { - m.likeCount = value +func (m *ReactionsFacet) SetLikeCount(value *int32) { + m.likeCount = value } + // SetOdataType sets the @odata.type property value. The OdataType property -func (m *ReactionsFacet) SetOdataType(value *string)() { - m.odataType = value +func (m *ReactionsFacet) SetOdataType(value *string) { + m.odataType = value } + // SetShareCount sets the shareCount property value. Count of shares. -func (m *ReactionsFacet) SetShareCount(value *int32)() { - m.shareCount = value +func (m *ReactionsFacet) SetShareCount(value *int32) { + m.shareCount = value } diff --git a/src/internal/connector/graph/betasdk/models/reactions_facetable.go b/src/internal/connector/graph/betasdk/models/reactions_facetable.go index 4e5086047..acdefec37 100644 --- a/src/internal/connector/graph/betasdk/models/reactions_facetable.go +++ b/src/internal/connector/graph/betasdk/models/reactions_facetable.go @@ -1,19 +1,19 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// ReactionsFacetable +// ReactionsFacetable type ReactionsFacetable interface { - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable - GetCommentCount()(*int32) - GetLikeCount()(*int32) - GetOdataType()(*string) - GetShareCount()(*int32) - SetCommentCount(value *int32)() - SetLikeCount(value *int32)() - SetOdataType(value *string)() - SetShareCount(value *int32)() + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable + GetCommentCount() *int32 + GetLikeCount() *int32 + GetOdataType() *string + GetShareCount() *int32 + SetCommentCount(value *int32) + SetLikeCount(value *int32) + SetOdataType(value *string) + SetShareCount(value *int32) } diff --git a/src/internal/connector/graph/betasdk/models/section_emphasis_type.go b/src/internal/connector/graph/betasdk/models/section_emphasis_type.go index 0016aec10..301ae839f 100644 --- a/src/internal/connector/graph/betasdk/models/section_emphasis_type.go +++ b/src/internal/connector/graph/betasdk/models/section_emphasis_type.go @@ -1,43 +1,45 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the remove method. type SectionEmphasisType int const ( - NONE_SECTIONEMPHASISTYPE SectionEmphasisType = iota - NEUTRAL_SECTIONEMPHASISTYPE - SOFT_SECTIONEMPHASISTYPE - STRONG_SECTIONEMPHASISTYPE - UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE + NONE_SECTIONEMPHASISTYPE SectionEmphasisType = iota + NEUTRAL_SECTIONEMPHASISTYPE + SOFT_SECTIONEMPHASISTYPE + STRONG_SECTIONEMPHASISTYPE + UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE ) func (i SectionEmphasisType) String() string { - return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i] + return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i] } func ParseSectionEmphasisType(v string) (interface{}, error) { - result := NONE_SECTIONEMPHASISTYPE - switch v { - case "none": - result = NONE_SECTIONEMPHASISTYPE - case "neutral": - result = NEUTRAL_SECTIONEMPHASISTYPE - case "soft": - result = SOFT_SECTIONEMPHASISTYPE - case "strong": - result = STRONG_SECTIONEMPHASISTYPE - case "unknownFutureValue": - result = UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE - default: - return 0, errors.New("Unknown SectionEmphasisType value: " + v) - } - return &result, nil + result := NONE_SECTIONEMPHASISTYPE + switch v { + case "none": + result = NONE_SECTIONEMPHASISTYPE + case "neutral": + result = NEUTRAL_SECTIONEMPHASISTYPE + case "soft": + result = SOFT_SECTIONEMPHASISTYPE + case "strong": + result = STRONG_SECTIONEMPHASISTYPE + case "unknownFutureValue": + result = UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE + default: + return 0, errors.New("Unknown SectionEmphasisType value: " + v) + } + return &result, nil } func SerializeSectionEmphasisType(values []SectionEmphasisType) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/site_access_type.go b/src/internal/connector/graph/betasdk/models/site_access_type.go index 052a2efdb..2d4cedffe 100644 --- a/src/internal/connector/graph/betasdk/models/site_access_type.go +++ b/src/internal/connector/graph/betasdk/models/site_access_type.go @@ -1,37 +1,39 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the remove method. type SiteAccessType int const ( - BLOCK_SITEACCESSTYPE SiteAccessType = iota - FULL_SITEACCESSTYPE - LIMITED_SITEACCESSTYPE + BLOCK_SITEACCESSTYPE SiteAccessType = iota + FULL_SITEACCESSTYPE + LIMITED_SITEACCESSTYPE ) func (i SiteAccessType) String() string { - return []string{"block", "full", "limited"}[i] + return []string{"block", "full", "limited"}[i] } func ParseSiteAccessType(v string) (interface{}, error) { - result := BLOCK_SITEACCESSTYPE - switch v { - case "block": - result = BLOCK_SITEACCESSTYPE - case "full": - result = FULL_SITEACCESSTYPE - case "limited": - result = LIMITED_SITEACCESSTYPE - default: - return 0, errors.New("Unknown SiteAccessType value: " + v) - } - return &result, nil + result := BLOCK_SITEACCESSTYPE + switch v { + case "block": + result = BLOCK_SITEACCESSTYPE + case "full": + result = FULL_SITEACCESSTYPE + case "limited": + result = LIMITED_SITEACCESSTYPE + default: + return 0, errors.New("Unknown SiteAccessType value: " + v) + } + return &result, nil } func SerializeSiteAccessType(values []SiteAccessType) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/site_page_collection_response.go b/src/internal/connector/graph/betasdk/models/site_page_collection_response.go index f66cdafdf..bbd79c3a4 100644 --- a/src/internal/connector/graph/betasdk/models/site_page_collection_response.go +++ b/src/internal/connector/graph/betasdk/models/site_page_collection_response.go @@ -2,7 +2,6 @@ package models import ( i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" - msmodel "github.com/microsoftgraph/msgraph-sdk-go/models" ) diff --git a/src/internal/connector/graph/betasdk/models/site_security_level.go b/src/internal/connector/graph/betasdk/models/site_security_level.go index d2733ce47..0c75c164e 100644 --- a/src/internal/connector/graph/betasdk/models/site_security_level.go +++ b/src/internal/connector/graph/betasdk/models/site_security_level.go @@ -1,52 +1,54 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the add method. type SiteSecurityLevel int const ( - // User Defined, default value, no intent. - USERDEFINED_SITESECURITYLEVEL SiteSecurityLevel = iota - // Low. - LOW_SITESECURITYLEVEL - // Medium-low. - MEDIUMLOW_SITESECURITYLEVEL - // Medium. - MEDIUM_SITESECURITYLEVEL - // Medium-high. - MEDIUMHIGH_SITESECURITYLEVEL - // High. - HIGH_SITESECURITYLEVEL + // User Defined, default value, no intent. + USERDEFINED_SITESECURITYLEVEL SiteSecurityLevel = iota + // Low. + LOW_SITESECURITYLEVEL + // Medium-low. + MEDIUMLOW_SITESECURITYLEVEL + // Medium. + MEDIUM_SITESECURITYLEVEL + // Medium-high. + MEDIUMHIGH_SITESECURITYLEVEL + // High. + HIGH_SITESECURITYLEVEL ) func (i SiteSecurityLevel) String() string { - return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i] + return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i] } func ParseSiteSecurityLevel(v string) (interface{}, error) { - result := USERDEFINED_SITESECURITYLEVEL - switch v { - case "userDefined": - result = USERDEFINED_SITESECURITYLEVEL - case "low": - result = LOW_SITESECURITYLEVEL - case "mediumLow": - result = MEDIUMLOW_SITESECURITYLEVEL - case "medium": - result = MEDIUM_SITESECURITYLEVEL - case "mediumHigh": - result = MEDIUMHIGH_SITESECURITYLEVEL - case "high": - result = HIGH_SITESECURITYLEVEL - default: - return 0, errors.New("Unknown SiteSecurityLevel value: " + v) - } - return &result, nil + result := USERDEFINED_SITESECURITYLEVEL + switch v { + case "userDefined": + result = USERDEFINED_SITESECURITYLEVEL + case "low": + result = LOW_SITESECURITYLEVEL + case "mediumLow": + result = MEDIUMLOW_SITESECURITYLEVEL + case "medium": + result = MEDIUM_SITESECURITYLEVEL + case "mediumHigh": + result = MEDIUMHIGH_SITESECURITYLEVEL + case "high": + result = HIGH_SITESECURITYLEVEL + default: + return 0, errors.New("Unknown SiteSecurityLevel value: " + v) + } + return &result, nil } func SerializeSiteSecurityLevel(values []SiteSecurityLevel) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/site_settings.go b/src/internal/connector/graph/betasdk/models/site_settings.go index a2a36d94a..1f8930408 100644 --- a/src/internal/connector/graph/betasdk/models/site_settings.go +++ b/src/internal/connector/graph/betasdk/models/site_settings.go @@ -1,123 +1,134 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// SiteSettings +// SiteSettings type SiteSettings struct { - // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. - additionalData map[string]interface{} - // The language tag for the language used on this site. - languageTag *string - // The OdataType property - odataType *string - // Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC). - timeZone *string + // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. + additionalData map[string]interface{} + // The language tag for the language used on this site. + languageTag *string + // The OdataType property + odataType *string + // Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC). + timeZone *string } + // NewSiteSettings instantiates a new siteSettings and sets the default values. -func NewSiteSettings()(*SiteSettings) { - m := &SiteSettings{ - } - m.SetAdditionalData(make(map[string]interface{})); - return m +func NewSiteSettings() *SiteSettings { + m := &SiteSettings{} + m.SetAdditionalData(make(map[string]interface{})) + return m } + // CreateSiteSettingsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value -func CreateSiteSettingsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { - return NewSiteSettings(), nil +func CreateSiteSettingsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { + return NewSiteSettings(), nil } + // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *SiteSettings) GetAdditionalData()(map[string]interface{}) { - return m.additionalData +func (m *SiteSettings) GetAdditionalData() map[string]interface{} { + return m.additionalData } + // GetFieldDeserializers the deserialization information for the current model -func (m *SiteSettings) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { - res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) - res["languageTag"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetLanguageTag(val) - } - return nil - } - res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetOdataType(val) - } - return nil - } - res["timeZone"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetTimeZone(val) - } - return nil - } - return res +func (m *SiteSettings) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error) + res["languageTag"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetLanguageTag(val) + } + return nil + } + res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetOdataType(val) + } + return nil + } + res["timeZone"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetTimeZone(val) + } + return nil + } + return res } + // GetLanguageTag gets the languageTag property value. The language tag for the language used on this site. -func (m *SiteSettings) GetLanguageTag()(*string) { - return m.languageTag +func (m *SiteSettings) GetLanguageTag() *string { + return m.languageTag } + // GetOdataType gets the @odata.type property value. The OdataType property -func (m *SiteSettings) GetOdataType()(*string) { - return m.odataType +func (m *SiteSettings) GetOdataType() *string { + return m.odataType } + // GetTimeZone gets the timeZone property value. Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC). -func (m *SiteSettings) GetTimeZone()(*string) { - return m.timeZone +func (m *SiteSettings) GetTimeZone() *string { + return m.timeZone } + // Serialize serializes information the current object -func (m *SiteSettings) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { - { - err := writer.WriteStringValue("languageTag", m.GetLanguageTag()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("@odata.type", m.GetOdataType()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("timeZone", m.GetTimeZone()) - if err != nil { - return err - } - } - { - err := writer.WriteAdditionalData(m.GetAdditionalData()) - if err != nil { - return err - } - } - return nil +func (m *SiteSettings) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error { + { + err := writer.WriteStringValue("languageTag", m.GetLanguageTag()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("@odata.type", m.GetOdataType()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("timeZone", m.GetTimeZone()) + if err != nil { + return err + } + } + { + err := writer.WriteAdditionalData(m.GetAdditionalData()) + if err != nil { + return err + } + } + return nil } + // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *SiteSettings) SetAdditionalData(value map[string]interface{})() { - m.additionalData = value +func (m *SiteSettings) SetAdditionalData(value map[string]interface{}) { + m.additionalData = value } + // SetLanguageTag sets the languageTag property value. The language tag for the language used on this site. -func (m *SiteSettings) SetLanguageTag(value *string)() { - m.languageTag = value +func (m *SiteSettings) SetLanguageTag(value *string) { + m.languageTag = value } + // SetOdataType sets the @odata.type property value. The OdataType property -func (m *SiteSettings) SetOdataType(value *string)() { - m.odataType = value +func (m *SiteSettings) SetOdataType(value *string) { + m.odataType = value } + // SetTimeZone sets the timeZone property value. Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC). -func (m *SiteSettings) SetTimeZone(value *string)() { - m.timeZone = value +func (m *SiteSettings) SetTimeZone(value *string) { + m.timeZone = value } diff --git a/src/internal/connector/graph/betasdk/models/site_settingsable.go b/src/internal/connector/graph/betasdk/models/site_settingsable.go index 0423550ea..1b3825e05 100644 --- a/src/internal/connector/graph/betasdk/models/site_settingsable.go +++ b/src/internal/connector/graph/betasdk/models/site_settingsable.go @@ -1,17 +1,17 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// SiteSettingsable +// SiteSettingsable type SiteSettingsable interface { - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable - GetLanguageTag()(*string) - GetOdataType()(*string) - GetTimeZone()(*string) - SetLanguageTag(value *string)() - SetOdataType(value *string)() - SetTimeZone(value *string)() + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable + GetLanguageTag() *string + GetOdataType() *string + GetTimeZone() *string + SetLanguageTag(value *string) + SetOdataType(value *string) + SetTimeZone(value *string) } diff --git a/src/internal/connector/graph/betasdk/models/standard_web_part.go b/src/internal/connector/graph/betasdk/models/standard_web_part.go index 0b7b4427a..4532e1d24 100644 --- a/src/internal/connector/graph/betasdk/models/standard_web_part.go +++ b/src/internal/connector/graph/betasdk/models/standard_web_part.go @@ -1,88 +1,96 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// StandardWebPart +// StandardWebPart type StandardWebPart struct { - WebPart - // Data of the webPart. - data WebPartDataable - // A Guid which indicates the type of the webParts - webPartType *string + WebPart + // Data of the webPart. + data WebPartDataable + // A Guid which indicates the type of the webParts + webPartType *string } + // NewStandardWebPart instantiates a new StandardWebPart and sets the default values. -func NewStandardWebPart()(*StandardWebPart) { - m := &StandardWebPart{ - WebPart: *NewWebPart(), - } - odataTypeValue := "#microsoft.graph.standardWebPart"; - m.SetOdataType(&odataTypeValue); - return m +func NewStandardWebPart() *StandardWebPart { + m := &StandardWebPart{ + WebPart: *NewWebPart(), + } + odataTypeValue := "#microsoft.graph.standardWebPart" + m.SetOdataType(&odataTypeValue) + return m } + // CreateStandardWebPartFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value -func CreateStandardWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { - return NewStandardWebPart(), nil +func CreateStandardWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { + return NewStandardWebPart(), nil } + // GetData gets the data property value. Data of the webPart. -func (m *StandardWebPart) GetData()(WebPartDataable) { - return m.data +func (m *StandardWebPart) GetData() WebPartDataable { + return m.data } + // GetFieldDeserializers the deserialization information for the current model -func (m *StandardWebPart) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { - res := m.WebPart.GetFieldDeserializers() - res["data"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetObjectValue(CreateWebPartDataFromDiscriminatorValue) - if err != nil { - return err - } - if val != nil { - m.SetData(val.(WebPartDataable)) - } - return nil - } - res["webPartType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetWebPartType(val) - } - return nil - } - return res +func (m *StandardWebPart) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + res := m.WebPart.GetFieldDeserializers() + res["data"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetObjectValue(CreateWebPartDataFromDiscriminatorValue) + if err != nil { + return err + } + if val != nil { + m.SetData(val.(WebPartDataable)) + } + return nil + } + res["webPartType"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetWebPartType(val) + } + return nil + } + return res } + // GetWebPartType gets the webPartType property value. A Guid which indicates the type of the webParts -func (m *StandardWebPart) GetWebPartType()(*string) { - return m.webPartType +func (m *StandardWebPart) GetWebPartType() *string { + return m.webPartType } + // Serialize serializes information the current object -func (m *StandardWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { - err := m.WebPart.Serialize(writer) - if err != nil { - return err - } - { - err = writer.WriteObjectValue("data", m.GetData()) - if err != nil { - return err - } - } - { - err = writer.WriteStringValue("webPartType", m.GetWebPartType()) - if err != nil { - return err - } - } - return nil +func (m *StandardWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error { + err := m.WebPart.Serialize(writer) + if err != nil { + return err + } + { + err = writer.WriteObjectValue("data", m.GetData()) + if err != nil { + return err + } + } + { + err = writer.WriteStringValue("webPartType", m.GetWebPartType()) + if err != nil { + return err + } + } + return nil } + // SetData sets the data property value. Data of the webPart. -func (m *StandardWebPart) SetData(value WebPartDataable)() { - m.data = value +func (m *StandardWebPart) SetData(value WebPartDataable) { + m.data = value } + // SetWebPartType sets the webPartType property value. A Guid which indicates the type of the webParts -func (m *StandardWebPart) SetWebPartType(value *string)() { - m.webPartType = value +func (m *StandardWebPart) SetWebPartType(value *string) { + m.webPartType = value } diff --git a/src/internal/connector/graph/betasdk/models/standard_web_partable.go b/src/internal/connector/graph/betasdk/models/standard_web_partable.go index e09160b2b..b33c25f15 100644 --- a/src/internal/connector/graph/betasdk/models/standard_web_partable.go +++ b/src/internal/connector/graph/betasdk/models/standard_web_partable.go @@ -1,15 +1,15 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// StandardWebPartable +// StandardWebPartable type StandardWebPartable interface { - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable - WebPartable - GetData()(WebPartDataable) - GetWebPartType()(*string) - SetData(value WebPartDataable)() - SetWebPartType(value *string)() + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable + WebPartable + GetData() WebPartDataable + GetWebPartType() *string + SetData(value WebPartDataable) + SetWebPartType(value *string) } diff --git a/src/internal/connector/graph/betasdk/models/text_web_part.go b/src/internal/connector/graph/betasdk/models/text_web_part.go index f607ffa31..1ae554671 100644 --- a/src/internal/connector/graph/betasdk/models/text_web_part.go +++ b/src/internal/connector/graph/betasdk/models/text_web_part.go @@ -1,62 +1,68 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// TextWebPart +// TextWebPart type TextWebPart struct { - WebPart - // The HTML string in text web part. - innerHtml *string + WebPart + // The HTML string in text web part. + innerHtml *string } + // NewTextWebPart instantiates a new TextWebPart and sets the default values. -func NewTextWebPart()(*TextWebPart) { - m := &TextWebPart{ - WebPart: *NewWebPart(), - } - odataTypeValue := "#microsoft.graph.textWebPart"; - m.SetOdataType(&odataTypeValue); - return m +func NewTextWebPart() *TextWebPart { + m := &TextWebPart{ + WebPart: *NewWebPart(), + } + odataTypeValue := "#microsoft.graph.textWebPart" + m.SetOdataType(&odataTypeValue) + return m } + // CreateTextWebPartFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value -func CreateTextWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { - return NewTextWebPart(), nil +func CreateTextWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { + return NewTextWebPart(), nil } + // GetFieldDeserializers the deserialization information for the current model -func (m *TextWebPart) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { - res := m.WebPart.GetFieldDeserializers() - res["innerHtml"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetInnerHtml(val) - } - return nil - } - return res +func (m *TextWebPart) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + res := m.WebPart.GetFieldDeserializers() + res["innerHtml"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetInnerHtml(val) + } + return nil + } + return res } + // GetInnerHtml gets the innerHtml property value. The HTML string in text web part. -func (m *TextWebPart) GetInnerHtml()(*string) { - return m.innerHtml +func (m *TextWebPart) GetInnerHtml() *string { + return m.innerHtml } + // Serialize serializes information the current object -func (m *TextWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { - err := m.WebPart.Serialize(writer) - if err != nil { - return err - } - { - err = writer.WriteStringValue("innerHtml", m.GetInnerHtml()) - if err != nil { - return err - } - } - return nil +func (m *TextWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error { + err := m.WebPart.Serialize(writer) + if err != nil { + return err + } + { + err = writer.WriteStringValue("innerHtml", m.GetInnerHtml()) + if err != nil { + return err + } + } + return nil } + // SetInnerHtml sets the innerHtml property value. The HTML string in text web part. -func (m *TextWebPart) SetInnerHtml(value *string)() { - m.innerHtml = value +func (m *TextWebPart) SetInnerHtml(value *string) { + m.innerHtml = value } diff --git a/src/internal/connector/graph/betasdk/models/text_web_partable.go b/src/internal/connector/graph/betasdk/models/text_web_partable.go index 45e21d92b..f58b6a0c8 100644 --- a/src/internal/connector/graph/betasdk/models/text_web_partable.go +++ b/src/internal/connector/graph/betasdk/models/text_web_partable.go @@ -1,13 +1,13 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// TextWebPartable +// TextWebPartable type TextWebPartable interface { - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable - WebPartable - GetInnerHtml()(*string) - SetInnerHtml(value *string)() + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable + WebPartable + GetInnerHtml() *string + SetInnerHtml(value *string) } diff --git a/src/internal/connector/graph/betasdk/models/title_area_layout_type.go b/src/internal/connector/graph/betasdk/models/title_area_layout_type.go index 375b68874..3621288a4 100644 --- a/src/internal/connector/graph/betasdk/models/title_area_layout_type.go +++ b/src/internal/connector/graph/betasdk/models/title_area_layout_type.go @@ -1,43 +1,45 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the remove method. type TitleAreaLayoutType int const ( - IMAGEANDTITLE_TITLEAREALAYOUTTYPE TitleAreaLayoutType = iota - PLAIN_TITLEAREALAYOUTTYPE - COLORBLOCK_TITLEAREALAYOUTTYPE - OVERLAP_TITLEAREALAYOUTTYPE - UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE + IMAGEANDTITLE_TITLEAREALAYOUTTYPE TitleAreaLayoutType = iota + PLAIN_TITLEAREALAYOUTTYPE + COLORBLOCK_TITLEAREALAYOUTTYPE + OVERLAP_TITLEAREALAYOUTTYPE + UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE ) func (i TitleAreaLayoutType) String() string { - return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i] + return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i] } func ParseTitleAreaLayoutType(v string) (interface{}, error) { - result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE - switch v { - case "imageAndTitle": - result = IMAGEANDTITLE_TITLEAREALAYOUTTYPE - case "plain": - result = PLAIN_TITLEAREALAYOUTTYPE - case "colorBlock": - result = COLORBLOCK_TITLEAREALAYOUTTYPE - case "overlap": - result = OVERLAP_TITLEAREALAYOUTTYPE - case "unknownFutureValue": - result = UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE - default: - return 0, errors.New("Unknown TitleAreaLayoutType value: " + v) - } - return &result, nil + result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE + switch v { + case "imageAndTitle": + result = IMAGEANDTITLE_TITLEAREALAYOUTTYPE + case "plain": + result = PLAIN_TITLEAREALAYOUTTYPE + case "colorBlock": + result = COLORBLOCK_TITLEAREALAYOUTTYPE + case "overlap": + result = OVERLAP_TITLEAREALAYOUTTYPE + case "unknownFutureValue": + result = UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE + default: + return 0, errors.New("Unknown TitleAreaLayoutType value: " + v) + } + return &result, nil } func SerializeTitleAreaLayoutType(values []TitleAreaLayoutType) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go b/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go index 27b1e1dba..a34f41dbe 100644 --- a/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go +++ b/src/internal/connector/graph/betasdk/models/title_area_text_alignment_type.go @@ -1,37 +1,39 @@ package models + import ( - "errors" + "errors" ) + // Provides operations to call the remove method. type TitleAreaTextAlignmentType int const ( - LEFT_TITLEAREATEXTALIGNMENTTYPE TitleAreaTextAlignmentType = iota - CENTER_TITLEAREATEXTALIGNMENTTYPE - UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE + LEFT_TITLEAREATEXTALIGNMENTTYPE TitleAreaTextAlignmentType = iota + CENTER_TITLEAREATEXTALIGNMENTTYPE + UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE ) func (i TitleAreaTextAlignmentType) String() string { - return []string{"left", "center", "unknownFutureValue"}[i] + return []string{"left", "center", "unknownFutureValue"}[i] } func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) { - result := LEFT_TITLEAREATEXTALIGNMENTTYPE - switch v { - case "left": - result = LEFT_TITLEAREATEXTALIGNMENTTYPE - case "center": - result = CENTER_TITLEAREATEXTALIGNMENTTYPE - case "unknownFutureValue": - result = UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE - default: - return 0, errors.New("Unknown TitleAreaTextAlignmentType value: " + v) - } - return &result, nil + result := LEFT_TITLEAREATEXTALIGNMENTTYPE + switch v { + case "left": + result = LEFT_TITLEAREATEXTALIGNMENTTYPE + case "center": + result = CENTER_TITLEAREATEXTALIGNMENTTYPE + case "unknownFutureValue": + result = UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE + default: + return 0, errors.New("Unknown TitleAreaTextAlignmentType value: " + v) + } + return &result, nil } func SerializeTitleAreaTextAlignmentType(values []TitleAreaTextAlignmentType) []string { - result := make([]string, len(values)) - for i, v := range values { - result[i] = v.String() - } - return result + result := make([]string, len(values)) + for i, v := range values { + result[i] = v.String() + } + return result } diff --git a/src/internal/connector/graph/betasdk/models/web_part_position.go b/src/internal/connector/graph/betasdk/models/web_part_position.go index f2f1c3c9e..f3be0e651 100644 --- a/src/internal/connector/graph/betasdk/models/web_part_position.go +++ b/src/internal/connector/graph/betasdk/models/web_part_position.go @@ -1,175 +1,190 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// WebPartPosition +// WebPartPosition type WebPartPosition struct { - // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. - additionalData map[string]interface{} - // Indicates the identifier of the column where the web part is located. - columnId *float64 - // Indicates the horizontal section where the web part is located. - horizontalSectionId *float64 - // Indicates whether the web part is located in the vertical section. - isInVerticalSection *bool - // The OdataType property - odataType *string - // Index of the current web part. Represents the order of the web part in this column or section. - webPartIndex *float64 + // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. + additionalData map[string]interface{} + // Indicates the identifier of the column where the web part is located. + columnId *float64 + // Indicates the horizontal section where the web part is located. + horizontalSectionId *float64 + // Indicates whether the web part is located in the vertical section. + isInVerticalSection *bool + // The OdataType property + odataType *string + // Index of the current web part. Represents the order of the web part in this column or section. + webPartIndex *float64 } + // NewWebPartPosition instantiates a new webPartPosition and sets the default values. -func NewWebPartPosition()(*WebPartPosition) { - m := &WebPartPosition{ - } - m.SetAdditionalData(make(map[string]interface{})); - return m +func NewWebPartPosition() *WebPartPosition { + m := &WebPartPosition{} + m.SetAdditionalData(make(map[string]interface{})) + return m } + // CreateWebPartPositionFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value -func CreateWebPartPositionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { - return NewWebPartPosition(), nil +func CreateWebPartPositionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { + return NewWebPartPosition(), nil } + // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *WebPartPosition) GetAdditionalData()(map[string]interface{}) { - return m.additionalData +func (m *WebPartPosition) GetAdditionalData() map[string]interface{} { + return m.additionalData } + // GetColumnId gets the columnId property value. Indicates the identifier of the column where the web part is located. -func (m *WebPartPosition) GetColumnId()(*float64) { - return m.columnId +func (m *WebPartPosition) GetColumnId() *float64 { + return m.columnId } + // GetFieldDeserializers the deserialization information for the current model -func (m *WebPartPosition) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { - res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) - res["columnId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetFloat64Value() - if err != nil { - return err - } - if val != nil { - m.SetColumnId(val) - } - return nil - } - res["horizontalSectionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetFloat64Value() - if err != nil { - return err - } - if val != nil { - m.SetHorizontalSectionId(val) - } - return nil - } - res["isInVerticalSection"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetBoolValue() - if err != nil { - return err - } - if val != nil { - m.SetIsInVerticalSection(val) - } - return nil - } - res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetStringValue() - if err != nil { - return err - } - if val != nil { - m.SetOdataType(val) - } - return nil - } - res["webPartIndex"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { - val, err := n.GetFloat64Value() - if err != nil { - return err - } - if val != nil { - m.SetWebPartIndex(val) - } - return nil - } - return res +func (m *WebPartPosition) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error) + res["columnId"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetFloat64Value() + if err != nil { + return err + } + if val != nil { + m.SetColumnId(val) + } + return nil + } + res["horizontalSectionId"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetFloat64Value() + if err != nil { + return err + } + if val != nil { + m.SetHorizontalSectionId(val) + } + return nil + } + res["isInVerticalSection"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetBoolValue() + if err != nil { + return err + } + if val != nil { + m.SetIsInVerticalSection(val) + } + return nil + } + res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetStringValue() + if err != nil { + return err + } + if val != nil { + m.SetOdataType(val) + } + return nil + } + res["webPartIndex"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { + val, err := n.GetFloat64Value() + if err != nil { + return err + } + if val != nil { + m.SetWebPartIndex(val) + } + return nil + } + return res } + // GetHorizontalSectionId gets the horizontalSectionId property value. Indicates the horizontal section where the web part is located. -func (m *WebPartPosition) GetHorizontalSectionId()(*float64) { - return m.horizontalSectionId +func (m *WebPartPosition) GetHorizontalSectionId() *float64 { + return m.horizontalSectionId } + // GetIsInVerticalSection gets the isInVerticalSection property value. Indicates whether the web part is located in the vertical section. -func (m *WebPartPosition) GetIsInVerticalSection()(*bool) { - return m.isInVerticalSection +func (m *WebPartPosition) GetIsInVerticalSection() *bool { + return m.isInVerticalSection } + // GetOdataType gets the @odata.type property value. The OdataType property -func (m *WebPartPosition) GetOdataType()(*string) { - return m.odataType +func (m *WebPartPosition) GetOdataType() *string { + return m.odataType } + // GetWebPartIndex gets the webPartIndex property value. Index of the current web part. Represents the order of the web part in this column or section. -func (m *WebPartPosition) GetWebPartIndex()(*float64) { - return m.webPartIndex +func (m *WebPartPosition) GetWebPartIndex() *float64 { + return m.webPartIndex } + // Serialize serializes information the current object -func (m *WebPartPosition) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { - { - err := writer.WriteFloat64Value("columnId", m.GetColumnId()) - if err != nil { - return err - } - } - { - err := writer.WriteFloat64Value("horizontalSectionId", m.GetHorizontalSectionId()) - if err != nil { - return err - } - } - { - err := writer.WriteBoolValue("isInVerticalSection", m.GetIsInVerticalSection()) - if err != nil { - return err - } - } - { - err := writer.WriteStringValue("@odata.type", m.GetOdataType()) - if err != nil { - return err - } - } - { - err := writer.WriteFloat64Value("webPartIndex", m.GetWebPartIndex()) - if err != nil { - return err - } - } - { - err := writer.WriteAdditionalData(m.GetAdditionalData()) - if err != nil { - return err - } - } - return nil +func (m *WebPartPosition) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error { + { + err := writer.WriteFloat64Value("columnId", m.GetColumnId()) + if err != nil { + return err + } + } + { + err := writer.WriteFloat64Value("horizontalSectionId", m.GetHorizontalSectionId()) + if err != nil { + return err + } + } + { + err := writer.WriteBoolValue("isInVerticalSection", m.GetIsInVerticalSection()) + if err != nil { + return err + } + } + { + err := writer.WriteStringValue("@odata.type", m.GetOdataType()) + if err != nil { + return err + } + } + { + err := writer.WriteFloat64Value("webPartIndex", m.GetWebPartIndex()) + if err != nil { + return err + } + } + { + err := writer.WriteAdditionalData(m.GetAdditionalData()) + if err != nil { + return err + } + } + return nil } + // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. -func (m *WebPartPosition) SetAdditionalData(value map[string]interface{})() { - m.additionalData = value +func (m *WebPartPosition) SetAdditionalData(value map[string]interface{}) { + m.additionalData = value } + // SetColumnId sets the columnId property value. Indicates the identifier of the column where the web part is located. -func (m *WebPartPosition) SetColumnId(value *float64)() { - m.columnId = value +func (m *WebPartPosition) SetColumnId(value *float64) { + m.columnId = value } + // SetHorizontalSectionId sets the horizontalSectionId property value. Indicates the horizontal section where the web part is located. -func (m *WebPartPosition) SetHorizontalSectionId(value *float64)() { - m.horizontalSectionId = value +func (m *WebPartPosition) SetHorizontalSectionId(value *float64) { + m.horizontalSectionId = value } + // SetIsInVerticalSection sets the isInVerticalSection property value. Indicates whether the web part is located in the vertical section. -func (m *WebPartPosition) SetIsInVerticalSection(value *bool)() { - m.isInVerticalSection = value +func (m *WebPartPosition) SetIsInVerticalSection(value *bool) { + m.isInVerticalSection = value } + // SetOdataType sets the @odata.type property value. The OdataType property -func (m *WebPartPosition) SetOdataType(value *string)() { - m.odataType = value +func (m *WebPartPosition) SetOdataType(value *string) { + m.odataType = value } + // SetWebPartIndex sets the webPartIndex property value. Index of the current web part. Represents the order of the web part in this column or section. -func (m *WebPartPosition) SetWebPartIndex(value *float64)() { - m.webPartIndex = value +func (m *WebPartPosition) SetWebPartIndex(value *float64) { + m.webPartIndex = value } diff --git a/src/internal/connector/graph/betasdk/models/web_part_positionable.go b/src/internal/connector/graph/betasdk/models/web_part_positionable.go index f0939db2e..9655ac285 100644 --- a/src/internal/connector/graph/betasdk/models/web_part_positionable.go +++ b/src/internal/connector/graph/betasdk/models/web_part_positionable.go @@ -1,21 +1,21 @@ package models import ( - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) -// WebPartPositionable +// WebPartPositionable type WebPartPositionable interface { - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder - i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable - GetColumnId()(*float64) - GetHorizontalSectionId()(*float64) - GetIsInVerticalSection()(*bool) - GetOdataType()(*string) - GetWebPartIndex()(*float64) - SetColumnId(value *float64)() - SetHorizontalSectionId(value *float64)() - SetIsInVerticalSection(value *bool)() - SetOdataType(value *string)() - SetWebPartIndex(value *float64)() + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder + i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable + GetColumnId() *float64 + GetHorizontalSectionId() *float64 + GetIsInVerticalSection() *bool + GetOdataType() *string + GetWebPartIndex() *float64 + SetColumnId(value *float64) + SetHorizontalSectionId(value *float64) + SetIsInVerticalSection(value *bool) + SetOdataType(value *string) + SetWebPartIndex(value *float64) } diff --git a/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_item_get_position_of_web_part_request_builder.go b/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_item_get_position_of_web_part_request_builder.go index 4bb325673..9db79ace5 100644 --- a/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_item_get_position_of_web_part_request_builder.go +++ b/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_item_get_position_of_web_part_request_builder.go @@ -3,9 +3,10 @@ package sites import ( "context" - ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go" i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" + + ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" ) // ItemPagesItemWebPartsItemGetPositionOfWebPartRequestBuilder provides operations to call the getPositionOfWebPart method. diff --git a/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_request_builder.go b/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_request_builder.go index 0e349df74..e2e32c640 100644 --- a/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_request_builder.go +++ b/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_request_builder.go @@ -3,9 +3,10 @@ package sites import ( "context" - ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go" i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" + + ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" ) // ItemPagesItemWebPartsRequestBuilder provides operations to manage the webParts property of the microsoft.graph.sitePage entity. diff --git a/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_web_part_item_request_builder.go b/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_web_part_item_request_builder.go index 25dba98cf..1c16fc8df 100644 --- a/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_web_part_item_request_builder.go +++ b/src/internal/connector/graph/betasdk/sites/item_pages_item_web_parts_web_part_item_request_builder.go @@ -3,10 +3,10 @@ package sites import ( "context" - ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go" - i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" + + ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" ) // ItemPagesItemWebPartsWebPartItemRequestBuilder provides operations to manage the webParts property of the microsoft.graph.sitePage entity. diff --git a/src/internal/connector/graph/betasdk/sites/item_pages_request_builder.go b/src/internal/connector/graph/betasdk/sites/item_pages_request_builder.go index 43f503439..6c82f58df 100644 --- a/src/internal/connector/graph/betasdk/sites/item_pages_request_builder.go +++ b/src/internal/connector/graph/betasdk/sites/item_pages_request_builder.go @@ -3,9 +3,10 @@ package sites import ( "context" - ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go" i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" + + ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" ) // ItemPagesRequestBuilder provides operations to manage the pages property of the microsoft.graph.site entity. diff --git a/src/internal/connector/graph/errors.go b/src/internal/connector/graph/errors.go index c75e4a6cb..21116057d 100644 --- a/src/internal/connector/graph/errors.go +++ b/src/internal/connector/graph/errors.go @@ -17,6 +17,7 @@ import ( // --------------------------------------------------------------------------- const ( + errCodeActivityLimitReached = "activityLimitReached" errCodeItemNotFound = "ErrorItemNotFound" errCodeEmailFolderNotFound = "ErrorSyncFolderNotFound" errCodeResyncRequired = "ResyncRequired" @@ -31,8 +32,10 @@ var ( // normally the graph client will catch this for us, but in case we // run our own client Do(), we need to translate it to a timeout type // failure locally. - Err429TooManyRequests = errors.New("429 too many requests") - Err503ServiceUnavailable = errors.New("503 Service Unavailable") + Err429TooManyRequests = errors.New("429 too many requests") + Err503ServiceUnavailable = errors.New("503 Service Unavailable") + Err504GatewayTimeout = errors.New("504 Gateway Timeout") + Err500InternalServerError = errors.New("500 Internal Server Error") ) // The folder or item was deleted between the time we identified @@ -113,6 +116,10 @@ func IsErrThrottled(err error) bool { return true } + if hasErrorCode(err, errCodeActivityLimitReached) { + return true + } + e := ErrThrottled{} return errors.As(err, &e) @@ -135,21 +142,18 @@ func IsErrUnauthorized(err error) bool { return errors.As(err, &e) } -type ErrServiceUnavailable struct { +type ErrInternalServerError struct { common.Err } -func IsSericeUnavailable(err error) bool { - if errors.Is(err, Err503ServiceUnavailable) { +func IsInternalServerError(err error) bool { + if errors.Is(err, Err500InternalServerError) { return true } - e := ErrUnauthorized{} - if errors.As(err, &e) { - return true - } + e := ErrInternalServerError{} - return true + return errors.As(err, &e) } // --------------------------------------------------------------------------- diff --git a/src/internal/connector/graph/errors_test.go b/src/internal/connector/graph/errors_test.go new file mode 100644 index 000000000..c7f889b83 --- /dev/null +++ b/src/internal/connector/graph/errors_test.go @@ -0,0 +1,248 @@ +package graph + +import ( + "context" + "testing" + + "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/common" +) + +type GraphErrorsUnitSuite struct { + suite.Suite +} + +func TestGraphErrorsUnitSuite(t *testing.T) { + suite.Run(t, new(GraphErrorsUnitSuite)) +} + +func odErr(code string) *odataerrors.ODataError { + odErr := &odataerrors.ODataError{} + merr := odataerrors.MainError{} + merr.SetCode(&code) + odErr.SetError(&merr) + + return odErr +} + +func (suite *GraphErrorsUnitSuite) TestIsErrDeletedInFlight() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrDeletedInFlight{Err: *common.EncapsulateError(assert.AnError)}, + expect: assert.True, + }, + { + name: "non-matching oDataErr", + err: odErr("fnords"), + expect: assert.False, + }, + { + name: "not-found oDataErr", + err: odErr(errCodeItemNotFound), + expect: assert.True, + }, + { + name: "sync-not-found oDataErr", + err: odErr(errCodeSyncFolderNotFound), + expect: assert.True, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + test.expect(t, IsErrDeletedInFlight(test.err)) + }) + } +} + +func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrInvalidDelta{Err: *common.EncapsulateError(assert.AnError)}, + expect: assert.True, + }, + { + name: "non-matching oDataErr", + err: odErr("fnords"), + expect: assert.False, + }, + { + name: "resync-required oDataErr", + err: odErr(errCodeResyncRequired), + expect: assert.True, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + test.expect(t, IsErrInvalidDelta(test.err)) + }) + } +} + +func (suite *GraphErrorsUnitSuite) TestIsErrTimeout() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrTimeout{Err: *common.EncapsulateError(assert.AnError)}, + expect: assert.True, + }, + { + name: "context deadline", + err: context.DeadlineExceeded, + expect: assert.True, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + test.expect(t, IsErrTimeout(test.err)) + }) + } +} + +func (suite *GraphErrorsUnitSuite) TestIsErrThrottled() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrThrottled{Err: *common.EncapsulateError(assert.AnError)}, + expect: assert.True, + }, + { + name: "is429", + err: Err429TooManyRequests, + expect: assert.True, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + test.expect(t, IsErrThrottled(test.err)) + }) + } +} + +func (suite *GraphErrorsUnitSuite) TestIsErrUnauthorized() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrUnauthorized{Err: *common.EncapsulateError(assert.AnError)}, + expect: assert.True, + }, + { + name: "is429", + err: Err401Unauthorized, + expect: assert.True, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + test.expect(t, IsErrUnauthorized(test.err)) + }) + } +} + +func (suite *GraphErrorsUnitSuite) TestIsInternalServerError() { + table := []struct { + name string + err error + expect assert.BoolAssertionFunc + }{ + { + name: "nil", + err: nil, + expect: assert.False, + }, + { + name: "non-matching", + err: assert.AnError, + expect: assert.False, + }, + { + name: "as", + err: ErrInternalServerError{Err: *common.EncapsulateError(assert.AnError)}, + expect: assert.True, + }, + { + name: "is429", + err: Err500InternalServerError, + expect: assert.True, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + test.expect(t, IsInternalServerError(test.err)) + }) + } +} diff --git a/src/internal/connector/graph/service.go b/src/internal/connector/graph/service.go index 6c0e6dbc1..fd6142028 100644 --- a/src/internal/connector/graph/service.go +++ b/src/internal/connector/graph/service.go @@ -8,6 +8,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/alcionai/corso/src/internal/connector/support" "github.com/microsoft/kiota-abstractions-go/serialization" ka "github.com/microsoft/kiota-authentication-azure-go" khttp "github.com/microsoft/kiota-http-go" @@ -22,6 +23,7 @@ import ( const ( logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS" + numberOfRetries = 3 ) // AllMetadataFileNames produces the standard set of filenames used to store graph @@ -149,7 +151,7 @@ func HTTPClient(opts ...option) *http.Client { middlewares := msgraphgocore.GetDefaultMiddlewaresWithOptions(&clientOptions) middlewares = append(middlewares, &LoggingMiddleware{}) httpClient := msgraphgocore.GetDefaultClient(&clientOptions, middlewares...) - httpClient.Timeout = time.Second * 90 + httpClient.Timeout = time.Minute * 3 (&clientConfig{}). populate(opts...). @@ -250,7 +252,6 @@ func (handler *LoggingMiddleware) Intercept( respDump, _ := httputil.DumpResponse(resp, false) metadata := []any{ - "idx", middlewareIndex, "method", req.Method, "status", resp.Status, "statusCode", resp.StatusCode, @@ -273,7 +274,6 @@ func (handler *LoggingMiddleware) Intercept( respDump, _ := httputil.DumpResponse(resp, true) metadata := []any{ - "idx", middlewareIndex, "method", req.Method, "status", resp.Status, "statusCode", resp.StatusCode, @@ -296,3 +296,26 @@ func (handler *LoggingMiddleware) Intercept( return resp, err } + +// Run a function with retries +func RunWithRetry(run func() error) error { + var err error + + for i := 0; i < numberOfRetries; i++ { + err = run() + if err == nil { + return nil + } + + // only retry on timeouts and 500-internal-errors. + if !(IsErrTimeout(err) || IsInternalServerError(err)) { + break + } + + if i < numberOfRetries { + time.Sleep(time.Duration(3*(i+2)) * time.Second) + } + } + + return support.ConnectorStackErrorTraceWrap(err, "maximum retries or unretryable") +} diff --git a/src/internal/connector/graph/service_test.go b/src/internal/connector/graph/service_test.go index 14bdc9c36..c2ef2d699 100644 --- a/src/internal/connector/graph/service_test.go +++ b/src/internal/connector/graph/service_test.go @@ -53,7 +53,7 @@ func (suite *GraphUnitSuite) TestHTTPClient() { name: "no options", opts: []option{}, check: func(t *testing.T, c *http.Client) { - assert.Equal(t, 90*time.Second, c.Timeout, "default timeout") + assert.Equal(t, 3*time.Minute, c.Timeout, "default timeout") }, }, { diff --git a/src/internal/connector/graph_connector.go b/src/internal/connector/graph_connector.go index 3dbc0e60c..def430f14 100644 --- a/src/internal/connector/graph_connector.go +++ b/src/internal/connector/graph_connector.go @@ -266,9 +266,11 @@ func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls // SideEffect: gc.status is updated at the completion of operation func (gc *GraphConnector) RestoreDataCollections( ctx context.Context, + backupVersion int, acct account.Account, selector selectors.Selector, dest control.RestoreDestination, + opts control.Options, dcs []data.Collection, ) (*details.Details, error) { ctx, end := D.Span(ctx, "connector:restore") @@ -289,9 +291,9 @@ func (gc *GraphConnector) RestoreDataCollections( case selectors.ServiceExchange: status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets) case selectors.ServiceOneDrive: - status, err = onedrive.RestoreCollections(ctx, gc.Service, dest, dcs, deets) + status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets) case selectors.ServiceSharePoint: - status, err = sharepoint.RestoreCollections(ctx, gc.Service, dest, dcs, deets) + status, err = sharepoint.RestoreCollections(ctx, backupVersion, gc.Service, dest, dcs, deets) default: err = errors.Errorf("restore data from service %s not supported", selector.Service.String()) } diff --git a/src/internal/connector/graph_connector_helper_test.go b/src/internal/connector/graph_connector_helper_test.go index 698ee8527..539cbf501 100644 --- a/src/internal/connector/graph_connector_helper_test.go +++ b/src/internal/connector/graph_connector_helper_test.go @@ -2,9 +2,11 @@ package connector import ( "context" + "encoding/json" "io" "net/http" "reflect" + "strings" "testing" "github.com/microsoftgraph/msgraph-sdk-go/models" @@ -14,6 +16,7 @@ import ( "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/connector/mockconnector" + "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" @@ -169,6 +172,14 @@ type restoreBackupInfo struct { resource resource } +type restoreBackupInfoMultiVersion struct { + name string + service path.ServiceType + collectionsLatest []colInfo + collectionsPrevious []colInfo + resource resource +} + func attachmentEqual( expected models.Attachmentable, got models.Attachmentable, @@ -645,21 +656,52 @@ func compareOneDriveItem( t *testing.T, expected map[string][]byte, item data.Stream, + restorePermissions bool, ) { + name := item.UUID() + expectedData := expected[item.UUID()] - if !assert.NotNil(t, expectedData, "unexpected file with name %s", item.UUID) { + if !assert.NotNil(t, expectedData, "unexpected file with name %s", item.UUID()) { return } - // OneDrive items are just byte buffers of the data. Nothing special to - // interpret. May need to do chunked comparisons in the future if we test - // large item equality. buf, err := io.ReadAll(item.ToReader()) if !assert.NoError(t, err) { return } - assert.Equal(t, expectedData, buf) + if !strings.HasSuffix(name, onedrive.MetaFileSuffix) && !strings.HasSuffix(name, onedrive.DirMetaFileSuffix) { + // OneDrive data items are just byte buffers of the data. Nothing special to + // interpret. May need to do chunked comparisons in the future if we test + // large item equality. + assert.Equal(t, expectedData, buf) + return + } + + var ( + itemMeta onedrive.Metadata + expectedMeta onedrive.Metadata + ) + + err = json.Unmarshal(buf, &itemMeta) + assert.Nil(t, err) + + err = json.Unmarshal(expectedData, &expectedMeta) + assert.Nil(t, err) + + if !restorePermissions { + assert.Equal(t, 0, len(itemMeta.Permissions)) + return + } + + assert.Equal(t, len(expectedMeta.Permissions), len(itemMeta.Permissions), "number of permissions after restore") + + // FIXME(meain): The permissions before and after might not be in the same order. + for i, p := range expectedMeta.Permissions { + assert.Equal(t, p.Email, itemMeta.Permissions[i].Email) + assert.Equal(t, p.Roles, itemMeta.Permissions[i].Roles) + assert.Equal(t, p.Expiration, itemMeta.Permissions[i].Expiration) + } } func compareItem( @@ -668,6 +710,7 @@ func compareItem( service path.ServiceType, category path.CategoryType, item data.Stream, + restorePermissions bool, ) { if mt, ok := item.(data.StreamModTime); ok { assert.NotZero(t, mt.ModTime()) @@ -687,7 +730,7 @@ func compareItem( } case path.OneDriveService: - compareOneDriveItem(t, expected, item) + compareOneDriveItem(t, expected, item, restorePermissions) default: assert.FailNowf(t, "unexpected service: %s", service.String()) @@ -720,6 +763,7 @@ func checkCollections( expectedItems int, expected map[string]map[string][]byte, got []data.Collection, + restorePermissions bool, ) int { collectionsWithItems := []data.Collection{} @@ -754,7 +798,7 @@ func checkCollections( continue } - compareItem(t, expectedColData, service, category, item) + compareItem(t, expectedColData, service, category, item, restorePermissions) } if gotItems != startingItems { @@ -906,10 +950,63 @@ func collectionsForInfo( tenant, user string, dest control.RestoreDestination, allInfo []colInfo, -) (int, []data.Collection, map[string]map[string][]byte) { +) (int, int, []data.Collection, map[string]map[string][]byte) { collections := make([]data.Collection, 0, len(allInfo)) expectedData := make(map[string]map[string][]byte, len(allInfo)) totalItems := 0 + kopiaEntries := 0 + + for _, info := range allInfo { + pth := mustToDataLayerPath( + t, + service, + tenant, + user, + info.category, + info.pathElements, + false, + ) + c := mockconnector.NewMockExchangeCollection(pth, len(info.items)) + baseDestPath := backupOutputPathFromRestore(t, dest, pth) + + baseExpected := expectedData[baseDestPath.String()] + if baseExpected == nil { + expectedData[baseDestPath.String()] = make(map[string][]byte, len(info.items)) + baseExpected = expectedData[baseDestPath.String()] + } + + for i := 0; i < len(info.items); i++ { + c.Names[i] = info.items[i].name + c.Data[i] = info.items[i].data + + baseExpected[info.items[i].lookupKey] = info.items[i].data + + // We do not count metadata files against item count + if service != path.OneDriveService || + (service == path.OneDriveService && + strings.HasSuffix(info.items[i].name, onedrive.DataFileSuffix)) { + totalItems++ + } + } + + collections = append(collections, c) + kopiaEntries += len(info.items) + } + + return totalItems, kopiaEntries, collections, expectedData +} + +func collectionsForInfoVersion0( + t *testing.T, + service path.ServiceType, + tenant, user string, + dest control.RestoreDestination, + allInfo []colInfo, +) (int, int, []data.Collection, map[string]map[string][]byte) { + collections := make([]data.Collection, 0, len(allInfo)) + expectedData := make(map[string]map[string][]byte, len(allInfo)) + totalItems := 0 + kopiaEntries := 0 for _, info := range allInfo { pth := mustToDataLayerPath( @@ -939,9 +1036,10 @@ func collectionsForInfo( collections = append(collections, c) totalItems += len(info.items) + kopiaEntries += len(info.items) } - return totalItems, collections, expectedData + return totalItems, kopiaEntries, collections, expectedData } //nolint:deadcode diff --git a/src/internal/connector/graph_connector_test.go b/src/internal/connector/graph_connector_test.go index be1439c35..b3b55a15e 100644 --- a/src/internal/connector/graph_connector_test.go +++ b/src/internal/connector/graph_connector_test.go @@ -2,6 +2,8 @@ package connector import ( "context" + "encoding/base64" + "encoding/json" "strings" "testing" "time" @@ -15,10 +17,12 @@ import ( "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/mockconnector" + "github.com/alcionai/corso/src/internal/connector/onedrive" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/backup" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" @@ -135,9 +139,10 @@ func (suite *GraphConnectorUnitSuite) TestUnionSiteIDsAndWebURLs() { type GraphConnectorIntegrationSuite struct { suite.Suite - connector *GraphConnector - user string - acct account.Account + connector *GraphConnector + user string + secondaryUser string + acct account.Account } func TestGraphConnectorIntegrationSuite(t *testing.T) { @@ -158,6 +163,7 @@ func (suite *GraphConnectorIntegrationSuite) SetupSuite() { suite.connector = loadConnector(ctx, suite.T(), graph.HTTPClient(graph.NoTimeout()), Users) suite.user = tester.M365UserID(suite.T()) + suite.secondaryUser = tester.SecondaryM365UserID(suite.T()) suite.acct = tester.NewM365Account(suite.T()) tester.LogTimeOfTest(suite.T()) @@ -226,7 +232,18 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() { } ) - deets, err := suite.connector.RestoreDataCollections(ctx, acct, sel, dest, nil) + deets, err := suite.connector.RestoreDataCollections( + ctx, + backup.Version, + acct, + sel, + dest, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + nil, + ) assert.Error(t, err) assert.NotNil(t, deets) @@ -294,10 +311,16 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() { deets, err := suite.connector.RestoreDataCollections( ctx, + backup.Version, suite.acct, test.sel, dest, - test.col) + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + test.col, + ) require.NoError(t, err) assert.NotNil(t, deets) @@ -344,11 +367,13 @@ func runRestoreBackupTest( test restoreBackupInfo, tenant string, resourceOwners []string, + opts control.Options, ) { var ( - collections []data.Collection - expectedData = map[string]map[string][]byte{} - totalItems = 0 + collections []data.Collection + expectedData = map[string]map[string][]byte{} + totalItems = 0 + totalKopiaItems = 0 // Get a dest per test so they're independent. dest = tester.DefaultTestRestoreDestination() ) @@ -357,7 +382,7 @@ func runRestoreBackupTest( defer flush() for _, owner := range resourceOwners { - numItems, ownerCollections, userExpectedData := collectionsForInfo( + numItems, kopiaItems, ownerCollections, userExpectedData := collectionsForInfo( t, test.service, tenant, @@ -368,6 +393,7 @@ func runRestoreBackupTest( collections = append(collections, ownerCollections...) totalItems += numItems + totalKopiaItems += kopiaItems maps.Copy(expectedData, userExpectedData) } @@ -383,10 +409,13 @@ func runRestoreBackupTest( restoreSel := getSelectorWith(t, test.service, resourceOwners, true) deets, err := restoreGC.RestoreDataCollections( ctx, + backup.Version, acct, restoreSel, dest, - collections) + opts, + collections, + ) require.NoError(t, err) assert.NotNil(t, deets) @@ -425,7 +454,15 @@ func runRestoreBackupTest( t.Logf("Selective backup of %s\n", backupSel) start = time.Now() - dcs, excludes, err := backupGC.DataCollections(ctx, backupSel, nil, control.Options{}) + dcs, excludes, err := backupGC.DataCollections( + ctx, + backupSel, + nil, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) require.NoError(t, err) // No excludes yet because this isn't an incremental backup. assert.Empty(t, excludes) @@ -434,7 +471,7 @@ func runRestoreBackupTest( // Pull the data prior to waiting for the status as otherwise it will // deadlock. - skipped := checkCollections(t, totalItems, expectedData, dcs) + skipped := checkCollections(t, totalKopiaItems, expectedData, dcs, opts.RestorePermissions) status = backupGC.AwaitStatus() @@ -446,6 +483,143 @@ func runRestoreBackupTest( "backup status.Successful; wanted %d items + %d skipped", totalItems, skipped) } +// runRestoreBackupTestVersion0 restores with data from an older +// version of the backup and check the restored data against the +// something that would be in the form of a newer backup. +func runRestoreBackupTestVersion0( + t *testing.T, + acct account.Account, + test restoreBackupInfoMultiVersion, + tenant string, + resourceOwners []string, + opts control.Options, +) { + var ( + collections []data.Collection + expectedData = map[string]map[string][]byte{} + totalItems = 0 + totalKopiaItems = 0 + // Get a dest per test so they're independent. + dest = tester.DefaultTestRestoreDestination() + ) + + ctx, flush := tester.NewContext() + defer flush() + + for _, owner := range resourceOwners { + _, _, ownerCollections, _ := collectionsForInfoVersion0( + t, + test.service, + tenant, + owner, + dest, + test.collectionsPrevious, + ) + + collections = append(collections, ownerCollections...) + } + + t.Logf( + "Restoring collections to %s for resourceOwners(s) %v\n", + dest.ContainerName, + resourceOwners, + ) + + start := time.Now() + + restoreGC := loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), test.resource) + restoreSel := getSelectorWith(t, test.service, resourceOwners, true) + deets, err := restoreGC.RestoreDataCollections( + ctx, + 0, // The OG version ;) + acct, + restoreSel, + dest, + opts, + collections, + ) + require.NoError(t, err) + assert.NotNil(t, deets) + + assert.NotNil(t, restoreGC.AwaitStatus()) + + runTime := time.Since(start) + + t.Logf("Restore complete in %v\n", runTime) + + // Run a backup and compare its output with what we put in. + for _, owner := range resourceOwners { + numItems, kopiaItems, _, userExpectedData := collectionsForInfo( + t, + test.service, + tenant, + owner, + dest, + test.collectionsLatest, + ) + + totalItems += numItems + totalKopiaItems += kopiaItems + + maps.Copy(expectedData, userExpectedData) + } + + cats := make(map[path.CategoryType]struct{}, len(test.collectionsLatest)) + for _, c := range test.collectionsLatest { + cats[c.category] = struct{}{} + } + + expectedDests := make([]destAndCats, 0, len(resourceOwners)) + for _, ro := range resourceOwners { + expectedDests = append(expectedDests, destAndCats{ + resourceOwner: ro, + dest: dest.ContainerName, + cats: cats, + }) + } + + backupGC := loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), test.resource) + backupSel := backupSelectorForExpected(t, test.service, expectedDests) + + start = time.Now() + dcs, excludes, err := backupGC.DataCollections( + ctx, + backupSel, + nil, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) + require.NoError(t, err) + // No excludes yet because this isn't an incremental backup. + assert.Empty(t, excludes) + + t.Logf("Backup enumeration complete in %v\n", time.Since(start)) + + // Pull the data prior to waiting for the status as otherwise it will + // deadlock. + skipped := checkCollections(t, totalKopiaItems, expectedData, dcs, opts.RestorePermissions) + + status := backupGC.AwaitStatus() + assert.Equal(t, totalItems+skipped, status.ObjectCount, "status.ObjectCount") + assert.Equal(t, totalItems+skipped, status.Successful, "status.Successful") +} + +func getTestMetaJSON(t *testing.T, user string, roles []string) []byte { + id := base64.StdEncoding.EncodeToString([]byte(user + strings.Join(roles, "+"))) + testMeta := onedrive.Metadata{Permissions: []onedrive.UserPermission{ + {ID: id, Roles: roles, Email: user}, + }} + + testMetaJSON, err := json.Marshal(testMeta) + if err != nil { + t.Fatal("unable to marshall test permissions", err) + } + + return testMetaJSON +} + func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { bodyText := "This email has some text. However, all the text is on the same line." subjectText := "Test message for restore" @@ -564,7 +738,7 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { }, }, { - name: "MultipleContactsMutlipleFolders", + name: "MultipleContactsMultipleFolders", service: path.ExchangeService, resource: Users, collections: []colInfo{ @@ -682,6 +856,226 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { service: path.OneDriveService, resource: Users, collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("a", 33)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "folder-a" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "folder-a" + onedrive.DirMetaFileSuffix, + }, + { + name: "b" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "folder-a", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("b", 65)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "b" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "folder-a", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("c", 129)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "folder-a" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "folder-a" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "folder-a", + "b", + "folder-a", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("d", 257)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("e", 257)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + { + name: "OneDriveFoldersAndFilesWithMetadata", + service: path.OneDriveService, + resource: Users, + collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("a", 33)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "b" + onedrive.DirMetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("e", 66)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + } + + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + runRestoreBackupTest( + t, + suite.acct, + test, + suite.connector.tenant, + []string{suite.user}, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) + }) + } +} + +func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackupVersion0() { + ctx, flush := tester.NewContext() + defer flush() + + // Get the default drive ID for the test user. + driveID := mustGetDefaultDriveID( + suite.T(), + ctx, + suite.connector.Service, + suite.user, + ) + + table := []restoreBackupInfoMultiVersion{ + { + name: "OneDriveMultipleFoldersAndFiles", + service: path.OneDriveService, + resource: Users, + + collectionsPrevious: []colInfo{ { pathElements: []string{ "drives", @@ -765,12 +1159,152 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() { }, }, }, + + collectionsLatest: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("a", 33)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "folder-a" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "folder-a" + onedrive.DirMetaFileSuffix, + }, + { + name: "b" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "folder-a", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("b", 65)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "b" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "folder-a", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("c", 129)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "folder-a" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "folder-a" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "folder-a", + "b", + "folder-a", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("d", 257)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("e", 257)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, }, } for _, test := range table { suite.T().Run(test.name, func(t *testing.T) { - runRestoreBackupTest(t, suite.acct, test, suite.connector.tenant, []string{suite.user}) + runRestoreBackupTestVersion0( + t, + suite.acct, + test, + suite.connector.tenant, + []string{suite.user}, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) }) } } @@ -857,7 +1391,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames }, }) - totalItems, collections, expectedData := collectionsForInfo( + totalItems, _, collections, expectedData := collectionsForInfo( t, test.service, suite.connector.tenant, @@ -879,7 +1413,18 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames ) restoreGC := loadConnector(ctx, t, graph.HTTPClient(graph.NoTimeout()), test.resource) - deets, err := restoreGC.RestoreDataCollections(ctx, suite.acct, restoreSel, dest, collections) + deets, err := restoreGC.RestoreDataCollections( + ctx, + backup.Version, + suite.acct, + restoreSel, + dest, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + collections, + ) require.NoError(t, err) require.NotNil(t, deets) @@ -900,7 +1445,15 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames backupSel := backupSelectorForExpected(t, test.service, expectedDests) t.Log("Selective backup of", backupSel) - dcs, excludes, err := backupGC.DataCollections(ctx, backupSel, nil, control.Options{}) + dcs, excludes, err := backupGC.DataCollections( + ctx, + backupSel, + nil, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) require.NoError(t, err) // No excludes yet because this isn't an incremental backup. assert.Empty(t, excludes) @@ -909,7 +1462,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames // Pull the data prior to waiting for the status as otherwise it will // deadlock. - skipped := checkCollections(t, allItems, allExpectedData, dcs) + skipped := checkCollections(t, allItems, allExpectedData, dcs, true) status := backupGC.AwaitStatus() assert.Equal(t, allItems+skipped, status.ObjectCount, "status.ObjectCount") @@ -918,6 +1471,319 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames } } +func (suite *GraphConnectorIntegrationSuite) TestPermissionsRestoreAndBackup() { + ctx, flush := tester.NewContext() + defer flush() + + // Get the default drive ID for the test user. + driveID := mustGetDefaultDriveID( + suite.T(), + ctx, + suite.connector.Service, + suite.user, + ) + + table := []restoreBackupInfo{ + { + name: "FilePermissionsRestore", + service: path.OneDriveService, + resource: Users, + collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("a", 33)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + + { + name: "FileInsideFolderPermissionsRestore", + service: path.OneDriveService, + resource: Users, + collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("a", 33)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "b" + onedrive.DirMetaFileSuffix, + data: []byte("{}"), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("e", 66)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + + { + name: "FileAndFolderPermissionsResote", + service: path.OneDriveService, + resource: Users, + collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("a", 33)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + { + name: "b" + onedrive.DirMetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("e", 66)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + + { + name: "FileAndFolderSeparatePermissionsResote", + service: path.OneDriveService, + resource: Users, + collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "b" + onedrive.DirMetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("e", 66)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + + { + name: "FolderAndNoChildPermissionsResote", + service: path.OneDriveService, + resource: Users, + collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "b" + onedrive.DirMetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"read"}), + lookupKey: "b" + onedrive.DirMetaFileSuffix, + }, + }, + }, + { + pathElements: []string{ + "drives", + driveID, + "root:", + "b", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("e", 66)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: []byte("{}"), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + } + + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + runRestoreBackupTest(t, + suite.acct, + test, + suite.connector.tenant, + []string{suite.user}, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) + }) + } +} + +func (suite *GraphConnectorIntegrationSuite) TestPermissionsBackupAndNoRestore() { + ctx, flush := tester.NewContext() + defer flush() + + // Get the default drive ID for the test user. + driveID := mustGetDefaultDriveID( + suite.T(), + ctx, + suite.connector.Service, + suite.user, + ) + + table := []restoreBackupInfo{ + { + name: "FilePermissionsRestore", + service: path.OneDriveService, + resource: Users, + collections: []colInfo{ + { + pathElements: []string{ + "drives", + driveID, + "root:", + }, + category: path.FilesCategory, + items: []itemInfo{ + { + name: "test-file.txt" + onedrive.DataFileSuffix, + data: []byte(strings.Repeat("a", 33)), + lookupKey: "test-file.txt" + onedrive.DataFileSuffix, + }, + { + name: "test-file.txt" + onedrive.MetaFileSuffix, + data: getTestMetaJSON(suite.T(), suite.secondaryUser, []string{"write"}), + lookupKey: "test-file.txt" + onedrive.MetaFileSuffix, + }, + }, + }, + }, + }, + } + + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + runRestoreBackupTest( + t, + suite.acct, + test, + suite.connector.tenant, + []string{suite.user}, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) + }) + } +} + // TODO: this should only be run during smoke tests, not part of the standard CI. // That's why it's set aside instead of being included in the other test set. func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup_largeMailAttachment() { @@ -942,5 +1808,15 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup_largeMailAttac }, } - runRestoreBackupTest(suite.T(), suite.acct, test, suite.connector.tenant, []string{suite.user}) + runRestoreBackupTest( + suite.T(), + suite.acct, + test, + suite.connector.tenant, + []string{suite.user}, + control.Options{ + RestorePermissions: true, + ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}, + }, + ) } diff --git a/src/internal/connector/mockconnector/mock_data_collection_test.go b/src/internal/connector/mockconnector/mock_data_collection_test.go index 2af9236c4..f2ba4d08e 100644 --- a/src/internal/connector/mockconnector/mock_data_collection_test.go +++ b/src/internal/connector/mockconnector/mock_data_collection_test.go @@ -202,6 +202,15 @@ func (suite *MockExchangeDataSuite) TestMockByteHydration() { return err }, }, + { + name: "SharePoint: Page", + transformation: func(t *testing.T) error { + bytes := mockconnector.GetMockPage(subject) + _, err := support.CreatePageFromBytes(bytes) + + return err + }, + }, } for _, test := range tests { diff --git a/src/internal/connector/mockconnector/mock_data_message.go b/src/internal/connector/mockconnector/mock_data_message.go index 597447492..4c2e84235 100644 --- a/src/internal/connector/mockconnector/mock_data_message.go +++ b/src/internal/connector/mockconnector/mock_data_message.go @@ -336,3 +336,212 @@ func GetMockEventMessageRequest(subject string) []byte { return []byte(message) } + +func GetMockMessageWithItemAttachmentEvent(subject string) []byte { + //nolint:lll + message := "{\"id\":\"AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThMAAA=\",\"@odata.type\":\"#microsoft.graph.message\"," + + "\"@odata.etag\":\"W/\\\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK3BH\\\"\",\"@odata.context\":\"https://graph.microsoft.com/v1.0/$metadata#users('dustina%408qzvrj.onmicrosoft.com')/messages/$entity\",\"categories\":[]," + + "\"changeKey\":\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK3BH\",\"createdDateTime\":\"2023-02-01T13:48:43Z\",\"lastModifiedDateTime\":\"2023-02-01T18:27:03Z\"," + + "\"attachments\":[{\"id\":\"AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThMAAABEgAQAKHxTL6mNCZPo71dbwrfKYM=\"," + + "\"@odata.type\":\"#microsoft.graph.itemAttachment\",\"isInline\":false,\"lastModifiedDateTime\":\"2023-02-01T13:52:56Z\",\"name\":\"Holidayevent\",\"size\":2059,\"item\":{\"id\":\"\",\"@odata.type\":\"#microsoft.graph.event\"," + + "\"createdDateTime\":\"2023-02-01T13:52:56Z\",\"lastModifiedDateTime\":\"2023-02-01T13:52:56Z\",\"body\":{\"content\":\"\\r\\nLet'slookforfunding!\"," + + "\"contentType\":\"html\"},\"end\":{\"dateTime\":\"2016-12-02T19:00:00.0000000Z\",\"timeZone\":\"UTC\"}," + + "\"hasAttachments\":false,\"isAllDay\":false,\"isCancelled\":false,\"isDraft\":true,\"isOnlineMeeting\":false,\"isOrganizer\":true,\"isReminderOn\":false,\"organizer\":{\"emailAddress\":{\"address\":\"" + defaultMessageFrom + "\",\"name\":\"" + defaultAlias + "\"}}," + + "\"originalEndTimeZone\":\"tzone://Microsoft/Utc\",\"originalStartTimeZone\":\"tzone://Microsoft/Utc\",\"reminderMinutesBeforeStart\":0,\"responseRequested\":true,\"start\":{\"dateTime\":\"2016-12-02T18:00:00.0000000Z\",\"timeZone\":\"UTC\"}," + + "\"subject\":\"Discussgiftsforchildren\",\"type\":\"singleInstance\"}}],\"bccRecipients\":[],\"body\":{\"content\":\"\\r\\n\\r\\n\\r\\nLookingtodothis \",\"contentType\":\"html\"}," + + "\"bodyPreview\":\"Lookingtodothis\",\"ccRecipients\":[],\"conversationId\":\"AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQADGvj5ACBMdGpESX4xSOxCo=\",\"conversationIndex\":\"AQHZNkPmMa+PkAIEx0akRJfjFI7EKg==\",\"flag\":{\"flagStatus\":\"notFlagged\"}," + + "\"from\":{\"emailAddress\":{\"address\":\"" + defaultMessageFrom + "\",\"name\":\"" + defaultAlias + "\"}},\"hasAttachments\":true,\"importance\":\"normal\",\"inferenceClassification\":\"focused\"," + + "\"internetMessageId\":\"\",\"isDeliveryReceiptRequested\":false,\"isDraft\":false,\"isRead\":true,\"isReadReceiptRequested\":false," + + "\"parentFolderId\":\"AQMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4ADVkZWQwNmNlMTgALgAAAw_9XBStqZdPuOVIalVTz7sBAHzBhzS2FNNNiXdGkRghSr4AAAIBDAAAAA==\",\"receivedDateTime\":\"2023-02-01T13:48:47Z\",\"replyTo\":[]," + + "\"sender\":{\"emailAddress\":{\"address\":\"" + defaultMessageSender + "\",\"name\":\"" + defaultAlias + "\"}},\"sentDateTime\":\"2023-02-01T13:48:46Z\"," + + "\"subject\":\"" + subject + "\",\"toRecipients\":[{\"emailAddress\":{\"address\":\"" + defaultMessageTo + "\",\"name\":\"" + defaultAlias + "\"}}]," + + "\"webLink\":\"https://outlook.office365.com/owa/?ItemID=AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8%2B7BwB8wYc0thTTTYl3RpEYIUq%2BAAAAAAEMAAB8wYc0thTTTYl3RpEYIUq%2BAADFfThMAAA%3D&exvsurl=1&viewmodel=ReadMessageItem\"}" + + return []byte(message) +} + +func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte { + //nolint:lll + // Order of fields: + // 1. subject + // 2. alias + // 3. sender address + // 4. from address + // 5. toRecipients email address + template := `{ + "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages(attachments())/$entity", + "@odata.etag": "W/\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK782\"", + "id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThSAAA=", + "createdDateTime": "2023-02-02T21:38:27Z", + "lastModifiedDateTime": "2023-02-02T22:42:49Z", + "changeKey": "CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK782", + "categories": [], + "receivedDateTime": "2023-02-02T21:38:27Z", + "sentDateTime": "2023-02-02T21:38:24Z", + "hasAttachments": true, + "internetMessageId": "", + "subject": "%[1]v", + "bodyPreview": "Dustin,\r\n\r\nI'm here to see if we are still able to discover our object.", + "importance": "normal", + "parentFolderId": "AQMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4ADVkZWQwNmNlMTgALgAAAw_9XBStqZdPuOVIalVTz7sBAHzBhzS2FNNNiXdGkRghSr4AAAIBDAAAAA==", + "conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAB13OyMdkNJJqEaIrGi3Yjc=", + "conversationIndex": "AQHZN06dHXc7Ix2Q0kmoRoisaLdiNw==", + "isDeliveryReceiptRequested": false, + "isReadReceiptRequested": false, + "isRead": false, + "isDraft": false, + "webLink": "https://outlook.office365.com/owa/?ItemID=AAMkAGQ1NzTruncated", + "inferenceClassification": "focused", + "body": { + "contentType": "html", + "content": "\r\n
Dustin,

I'm here to see if we are still able to discover our object. 
" + }, + "sender": { + "emailAddress": { + "name": "%[2]s", + "address": "%[3]s" + } + }, + "from": { + "emailAddress": { + "name": "%[2]s", + "address": "%[4]s" + } + }, + "toRecipients": [ + { + "emailAddress": { + "name": "%[2]s", + "address": "%[5]s" + } + } + ], + "ccRecipients": [], + "bccRecipients": [], + "replyTo": [], + "flag": { + "flagStatus": "notFlagged" + }, + "attachments": [ + { + "@odata.type": "#microsoft.graph.itemAttachment", + "id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThSAAABEgAQAIyAgT1ZccRCjKKyF7VZ3dA=", + "lastModifiedDateTime": "2023-02-02T21:38:27Z", + "name": "Mail Item Attachment", + "contentType": null, + "size": 5362, + "isInline": false, + "item@odata.associationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')/$ref", + "item@odata.navigationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')", + "item": { + "@odata.type": "#microsoft.graph.message", + "id": "", + "createdDateTime": "2023-02-02T21:38:27Z", + "lastModifiedDateTime": "2023-02-02T21:38:27Z", + "receivedDateTime": "2023-02-01T13:48:47Z", + "sentDateTime": "2023-02-01T13:48:46Z", + "hasAttachments": true, + "internetMessageId": "", + "subject": "Mail Item Attachment", + "bodyPreview": "Lookingtodothis", + "importance": "normal", + "conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAMNK0NU7Kx5GhAaHdzhfSRU=", + "conversationIndex": "AQHZN02pw0rQ1TsrHkaEBod3OF9JFQ==", + "isDeliveryReceiptRequested": false, + "isReadReceiptRequested": false, + "isRead": true, + "isDraft": false, + "webLink": "https://outlook.office365.com/owa/?AttachmentItemID=AAMkAGQ1NzViZTdhLTEwMTM", + "body": { + "contentType": "html", + "content": "\r\nLookingtodothis 
" + }, + "sender": { + "emailAddress": { + "name": "A Stranger", + "address": "foobar@8qzvrj.onmicrosoft.com" + } + }, + "from": { + "emailAddress": { + "name": "A Stranger", + "address": "foobar@8qzvrj.onmicrosoft.com" + } + }, + "toRecipients": [ + { + "emailAddress": { + "name": "Direct Report", + "address": "notAvailable@8qzvrj.onmicrosoft.com" + } + } + ], + "flag": { + "flagStatus": "notFlagged" + }, + "attachments": [ + { + "@odata.type": "#microsoft.graph.itemAttachment", + "id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThSAAACEgAQAIyAgT1ZccRCjKKyF7VZ3dASABAAuYCb3N2YZ02RpJrZPzCBFQ==", + "lastModifiedDateTime": "2023-02-02T21:38:27Z", + "name": "Holidayevent", + "contentType": null, + "size": 2331, + "isInline": false, + "item@odata.associationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/events('')/$ref", + "item@odata.navigationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/events('')", + "item": { + "@odata.type": "#microsoft.graph.event", + "id": "", + "createdDateTime": "2023-02-02T21:38:27Z", + "lastModifiedDateTime": "2023-02-02T21:38:27Z", + "originalStartTimeZone": "tzone://Microsoft/Utc", + "originalEndTimeZone": "tzone://Microsoft/Utc", + "reminderMinutesBeforeStart": 0, + "isReminderOn": false, + "hasAttachments": false, + "subject": "Discuss Gifts for Children", + "isAllDay": false, + "isCancelled": false, + "isOrganizer": true, + "responseRequested": true, + "type": "singleInstance", + "isOnlineMeeting": false, + "isDraft": true, + "body": { + "contentType": "html", + "content": "\r\nLet'slookforfunding! " + }, + "start": { + "dateTime": "2016-12-02T18:00:00.0000000Z", + "timeZone": "UTC" + }, + "end": { + "dateTime": "2016-12-02T19:00:00.0000000Z", + "timeZone": "UTC" + }, + "organizer": { + "emailAddress": { + "name": "Event Manager", + "address": "philonis@8qzvrj.onmicrosoft.com" + } + } + } + } + ] + } + } + ] + }` + + message := fmt.Sprintf( + template, + subject, + defaultAlias, + defaultMessageSender, + defaultMessageFrom, + defaultMessageTo, + ) + + return []byte(message) +} diff --git a/src/internal/connector/mockconnector/mock_data_page.go b/src/internal/connector/mockconnector/mock_data_page.go new file mode 100644 index 000000000..0b8425418 --- /dev/null +++ b/src/internal/connector/mockconnector/mock_data_page.go @@ -0,0 +1,25 @@ +package mockconnector + +// GetMockPage returns bytes for models.SitePageable object +// Title string changes of fields: name and title +func GetMockPage(title string) []byte { + fileName := title + ".aspx" + + // Create Test Page + //nolint:lll + byteArray := []byte("{\"name\":\"" + fileName + "\",\"title\":\"" + title + "\",\"pageLayout\":\"article\",\"showComments\":true," + + "\"showRecommendedPages\":false,\"titleArea\":{\"enableGradientEffect\":true,\"imageWebUrl\":\"/_LAYOUTS/IMAGES/VISUALTEMPLATETITLEIMAGE.JPG\"," + + "\"layout\":\"colorBlock\",\"showAuthor\":true,\"showPublishedDate\":false,\"showTextBlockAboveTitle\":false,\"textAboveTitle\":\"TEXTABOVETITLE\"," + + "\"textAlignment\":\"left\",\"imageSourceType\":2,\"title\":\"sample1\"}," + + "\"canvasLayout\":{\"horizontalSections\":[{\"layout\":\"oneThirdRightColumn\",\"id\":\"1\",\"emphasis\":\"none\",\"columns\":[{\"id\":\"1\",\"width\":8," + + "\"webparts\":[{\"id\":\"6f9230af-2a98-4952-b205-9ede4f9ef548\",\"innerHtml\":\"

Hello!

\"}]},{\"id\":\"2\",\"width\":4," + + "\"webparts\":[{\"id\":\"73d07dde-3474-4545-badb-f28ba239e0e1\",\"webPartType\":\"d1d91016-032f-456d-98a4-721247c305e8\",\"data\":{\"dataVersion\":\"1.9\"," + + "\"description\":\"Showanimageonyourpage\",\"title\":\"Image\",\"properties\":{\"imageSourceType\":2,\"altText\":\"\",\"overlayText\":\"\"," + + "\"siteid\":\"0264cabe-6b92-450a-b162-b0c3d54fe5e8\",\"webid\":\"f3989670-cd37-4514-8ccb-0f7c2cbe5314\",\"listid\":\"bdb41041-eb06-474e-ac29-87093386bb14\"," + + "\"uniqueid\":\"d9f94b40-78ba-48d0-a39f-3cb23c2fe7eb\",\"imgWidth\":4288,\"imgHeight\":2848,\"fixAspectRatio\":false,\"captionText\":\"\",\"alignment\":\"Center\"}," + + "\"serverProcessedContent\":{\"imageSources\":[{\"key\":\"imageSource\",\"value\":\"/_LAYOUTS/IMAGES/VISUALTEMPLATEIMAGE1.JPG\"}]," + + "\"customMetadata\":[{\"key\":\"imageSource\",\"value\":{\"siteid\":\"0264cabe-6b92-450a-b162-b0c3d54fe5e8\",\"webid\":\"f3989670-cd37-4514-8ccb-0f7c2cbe5314\"," + + "\"listid\":\"bdb41041-eb06-474e-ac29-87093386bb14\",\"uniqueid\":\"d9f94b40-78ba-48d0-a39f-3cb23c2fe7eb\",\"width\":\"4288\",\"height\":\"2848\"}}]}}}]}]}]}}") + + return byteArray +} diff --git a/src/internal/connector/onedrive/api/drive.go b/src/internal/connector/onedrive/api/drive.go index 6dd7d46a1..ce246da85 100644 --- a/src/internal/connector/onedrive/api/drive.go +++ b/src/internal/connector/onedrive/api/drive.go @@ -3,6 +3,7 @@ package api import ( "context" + msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" mssites "github.com/microsoftgraph/msgraph-sdk-go/sites" msusers "github.com/microsoftgraph/msgraph-sdk-go/users" @@ -12,6 +13,75 @@ import ( "github.com/alcionai/corso/src/internal/connector/graph/api" ) +func getValues[T any](l api.PageLinker) ([]T, error) { + page, ok := l.(interface{ GetValue() []T }) + if !ok { + return nil, errors.Errorf( + "response of type [%T] does not comply with GetValue() interface", + l, + ) + } + + return page.GetValue(), nil +} + +// max we can do is 999 +const pageSize = int32(999) + +type driveItemPager struct { + gs graph.Servicer + builder *msdrives.ItemRootDeltaRequestBuilder + options *msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration +} + +func NewItemPager( + gs graph.Servicer, + driveID, link string, + fields []string, +) *driveItemPager { + pageCount := pageSize + requestConfig := &msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration{ + QueryParameters: &msdrives.ItemRootDeltaRequestBuilderGetQueryParameters{ + Top: &pageCount, + Select: fields, + }, + } + + res := &driveItemPager{ + gs: gs, + options: requestConfig, + builder: gs.Client().DrivesById(driveID).Root().Delta(), + } + + if len(link) > 0 { + res.builder = msdrives.NewItemRootDeltaRequestBuilder(link, gs.Adapter()) + } + + return res +} + +func (p *driveItemPager) GetPage(ctx context.Context) (api.DeltaPageLinker, error) { + var ( + resp api.DeltaPageLinker + err error + ) + + err = graph.RunWithRetry(func() error { + resp, err = p.builder.Get(ctx, p.options) + return err + }) + + return resp, err +} + +func (p *driveItemPager) SetNext(link string) { + p.builder = msdrives.NewItemRootDeltaRequestBuilder(link, p.gs.Adapter()) +} + +func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) { + return getValues[models.DriveItemable](l) +} + type userDrivePager struct { gs graph.Servicer builder *msusers.ItemDrivesRequestBuilder @@ -39,7 +109,17 @@ func NewUserDrivePager( } func (p *userDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) { - return p.builder.Get(ctx, p.options) + var ( + resp api.PageLinker + err error + ) + + err = graph.RunWithRetry(func() error { + resp, err = p.builder.Get(ctx, p.options) + return err + }) + + return resp, err } func (p *userDrivePager) SetNext(link string) { @@ -47,15 +127,7 @@ func (p *userDrivePager) SetNext(link string) { } func (p *userDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) { - page, ok := l.(interface{ GetValue() []models.Driveable }) - if !ok { - return nil, errors.Errorf( - "response of type [%T] does not comply with GetValue() interface", - l, - ) - } - - return page.GetValue(), nil + return getValues[models.Driveable](l) } type siteDrivePager struct { @@ -85,7 +157,17 @@ func NewSiteDrivePager( } func (p *siteDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) { - return p.builder.Get(ctx, p.options) + var ( + resp api.PageLinker + err error + ) + + err = graph.RunWithRetry(func() error { + resp, err = p.builder.Get(ctx, p.options) + return err + }) + + return resp, err } func (p *siteDrivePager) SetNext(link string) { @@ -93,13 +175,5 @@ func (p *siteDrivePager) SetNext(link string) { } func (p *siteDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) { - page, ok := l.(interface{ GetValue() []models.Driveable }) - if !ok { - return nil, errors.Errorf( - "response of type [%T] does not comply with GetValue() interface", - l, - ) - } - - return page.GetValue(), nil + return getValues[models.Driveable](l) } diff --git a/src/internal/connector/onedrive/collection.go b/src/internal/connector/onedrive/collection.go index a786de0ab..343a8911e 100644 --- a/src/internal/connector/onedrive/collection.go +++ b/src/internal/connector/onedrive/collection.go @@ -5,6 +5,7 @@ import ( "context" "io" "net/http" + "strings" "sync" "sync/atomic" "time" @@ -34,6 +35,10 @@ const ( // Max number of retries to get doc from M365 // Seems to timeout at times because of multiple requests maxRetries = 4 // 1 + 3 retries + + MetaFileSuffix = ".meta" + DirMetaFileSuffix = ".dirmeta" + DataFileSuffix = ".data" ) var ( @@ -56,12 +61,13 @@ type Collection struct { // M365 IDs of file items within this collection driveItems map[string]models.DriveItemable // M365 ID of the drive this collection was created from - driveID string - source driveSource - service graph.Servicer - statusUpdater support.StatusUpdater - itemReader itemReaderFunc - ctrl control.Options + driveID string + source driveSource + service graph.Servicer + statusUpdater support.StatusUpdater + itemReader itemReaderFunc + itemMetaReader itemMetaReaderFunc + ctrl control.Options // should only be true if the old delta token expired doNotMergeItems bool @@ -73,6 +79,15 @@ type itemReaderFunc func( item models.DriveItemable, ) (itemInfo details.ItemInfo, itemData io.ReadCloser, err error) +// itemMetaReaderFunc returns a reader for the metadata of the +// specified item +type itemMetaReaderFunc func( + ctx context.Context, + service graph.Servicer, + driveID string, + item models.DriveItemable, +) (io.ReadCloser, int, error) + // NewCollection creates a Collection func NewCollection( itemClient *http.Client, @@ -101,6 +116,7 @@ func NewCollection( c.itemReader = sharePointItemReader default: c.itemReader = oneDriveItemReader + c.itemMetaReader = oneDriveItemMetaReader } return c @@ -138,6 +154,21 @@ func (oc Collection) DoNotMergeItems() bool { return oc.doNotMergeItems } +// FilePermission is used to store permissions of a specific user to a +// OneDrive item. +type UserPermission struct { + ID string `json:"id,omitempty"` + Roles []string `json:"role,omitempty"` + Email string `json:"email,omitempty"` + Expiration *time.Time `json:"expiration,omitempty"` +} + +// ItemMeta contains metadata about the Item. It gets stored in a +// separate file in kopia +type Metadata struct { + Permissions []UserPermission `json:"permissions,omitempty"` +} + // Item represents a single item retrieved from OneDrive type Item struct { id string @@ -173,18 +204,21 @@ func (od *Item) ModTime() time.Time { // and uses the collection `itemReader` to read the item func (oc *Collection) populateItems(ctx context.Context) { var ( - errs error - byteCount int64 - itemsRead int64 - wg sync.WaitGroup - m sync.Mutex + errs error + byteCount int64 + itemsRead int64 + dirsRead int64 + itemsFound int64 + dirsFound int64 + wg sync.WaitGroup + m sync.Mutex ) // Retrieve the OneDrive folder path to set later in // `details.OneDriveInfo` parentPathString, err := path.GetDriveFolderPath(oc.folderPath) if err != nil { - oc.reportAsCompleted(ctx, 0, 0, err) + oc.reportAsCompleted(ctx, 0, 0, 0, err) return } @@ -205,16 +239,11 @@ func (oc *Collection) populateItems(ctx context.Context) { m.Unlock() } - for id, item := range oc.driveItems { + for _, item := range oc.driveItems { if oc.ctrl.FailFast && errs != nil { break } - if item == nil { - errUpdater(id, errors.New("nil item")) - continue - } - semaphoreCh <- struct{}{} wg.Add(1) @@ -223,13 +252,64 @@ func (oc *Collection) populateItems(ctx context.Context) { defer wg.Done() defer func() { <-semaphoreCh }() + // Read the item var ( - itemID = *item.GetId() - itemName = *item.GetName() - itemSize = *item.GetSize() - itemInfo details.ItemInfo + itemID = *item.GetId() + itemName = *item.GetName() + itemSize = *item.GetSize() + itemInfo details.ItemInfo + itemMeta io.ReadCloser + itemMetaSize int + metaSuffix string + err error ) + isFile := item.GetFile() != nil + + if isFile { + atomic.AddInt64(&itemsFound, 1) + + metaSuffix = MetaFileSuffix + } else { + atomic.AddInt64(&dirsFound, 1) + + metaSuffix = DirMetaFileSuffix + } + + if oc.source == OneDriveSource { + // Fetch metadata for the file + for i := 1; i <= maxRetries; i++ { + if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup { + // We are still writing the metadata file but with + // empty permissions as we don't have a way to + // signify that the permissions was explicitly + // not added. + itemMeta = io.NopCloser(strings.NewReader("{}")) + itemMetaSize = 2 + + break + } + + itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item) + + // retry on Timeout type errors, break otherwise. + if err == nil || + !graph.IsErrTimeout(err) || + !graph.IsInternalServerError(err) { + break + } + + if i < maxRetries { + time.Sleep(1 * time.Second) + } + } + + if err != nil { + errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions")) + return + } + } + switch oc.source { case SharePointSource: itemInfo.SharePoint = sharePointItemInfo(item, itemSize) @@ -239,101 +319,127 @@ func (oc *Collection) populateItems(ctx context.Context) { itemInfo.OneDrive.ParentPath = parentPathString } - // Construct a new lazy readCloser to feed to the collection consumer. - // This ensures that downloads won't be attempted unless that consumer - // attempts to read bytes. Assumption is that kopia will check things - // like file modtimes before attempting to read. - itemReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { - // Read the item - var ( - itemData io.ReadCloser - err error - ) + if isFile { + dataSuffix := "" + if oc.source == OneDriveSource { + dataSuffix = DataFileSuffix + } - for i := 1; i <= maxRetries; i++ { - _, itemData, err = oc.itemReader(oc.itemClient, item) - if err == nil { - break - } + // Construct a new lazy readCloser to feed to the collection consumer. + // This ensures that downloads won't be attempted unless that consumer + // attempts to read bytes. Assumption is that kopia will check things + // like file modtimes before attempting to read. + itemReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { + // Read the item + var ( + itemData io.ReadCloser + err error + ) - if graph.IsErrUnauthorized(err) { - // assume unauthorized requests are a sign of an expired - // jwt token, and that we've overrun the available window - // to download the actual file. Re-downloading the item - // will refresh that download url. - di, diErr := getDriveItem(ctx, oc.service, oc.driveID, itemID) - if diErr != nil { - err = errors.Wrap(diErr, "retrieving expired item") + for i := 1; i <= maxRetries; i++ { + _, itemData, err = oc.itemReader(oc.itemClient, item) + if err == nil { break } - item = di + if graph.IsErrUnauthorized(err) { + // assume unauthorized requests are a sign of an expired + // jwt token, and that we've overrun the available window + // to download the actual file. Re-downloading the item + // will refresh that download url. + di, diErr := getDriveItem(ctx, oc.service, oc.driveID, itemID) + if diErr != nil { + err = errors.Wrap(diErr, "retrieving expired item") + break + } - continue + item = di - } else if !graph.IsErrTimeout(err) && !graph.IsErrThrottled(err) && !graph.IsSericeUnavailable(err) { - // TODO: graphAPI will provides headers that state the duration to wait - // in order to succeed again. The one second sleep won't cut it here. - // - // for all non-timeout, non-unauth, non-throttling errors, do not retry - break + continue + + } else if !graph.IsErrTimeout(err) && + !graph.IsInternalServerError(err) { + // Don't retry for non-timeout, on-unauth, as + // we are already retrying it in the default + // retry middleware + break + } + + if i < maxRetries { + time.Sleep(1 * time.Second) + } } - if i < maxRetries { - time.Sleep(1 * time.Second) + // check for errors following retries + if err != nil { + errUpdater(itemID, err) + return nil, err } + + // display/log the item download + progReader, closer := observe.ItemProgress( + ctx, + itemData, + observe.ItemBackupMsg, + observe.PII(itemName+dataSuffix), + itemSize, + ) + go closer() + + return progReader, nil + }) + + oc.data <- &Item{ + id: itemName + dataSuffix, + data: itemReader, + info: itemInfo, } + } - // check for errors following retries - if err != nil { - errUpdater(itemID, err) - return nil, err + if oc.source == OneDriveSource { + metaReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) { + progReader, closer := observe.ItemProgress( + ctx, itemMeta, observe.ItemBackupMsg, + observe.PII(itemName+metaSuffix), int64(itemMetaSize)) + go closer() + return progReader, nil + }) + + oc.data <- &Item{ + id: itemName + metaSuffix, + data: metaReader, + info: itemInfo, } + } - // display/log the item download - progReader, closer := observe.ItemProgress(ctx, itemData, observe.ItemBackupMsg, observe.PII(itemName), itemSize) - go closer() - - return progReader, nil - }) - - // This can cause inaccurate counts. Right now it counts all the items - // we intend to read. Errors within the lazy readCloser will create a - // conflict: an item is both successful and erroneous. But the async - // control to fix that is more error-prone than helpful. - // - // TODO: transform this into a stats bus so that async control of stats - // aggregation is handled at the backup level, not at the item iteration - // level. - // // Item read successfully, add to collection - atomic.AddInt64(&itemsRead, 1) + if isFile { + atomic.AddInt64(&itemsRead, 1) + } else { + atomic.AddInt64(&dirsRead, 1) + } + // byteCount iteration atomic.AddInt64(&byteCount, itemSize) - oc.data <- &Item{ - id: itemName, - data: itemReader, - info: itemInfo, - } folderProgress <- struct{}{} }(item) } wg.Wait() - oc.reportAsCompleted(ctx, int(itemsRead), byteCount, errs) + oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, errs) } -func (oc *Collection) reportAsCompleted(ctx context.Context, itemsRead int, byteCount int64, errs error) { +func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, errs error) { close(oc.data) status := support.CreateStatus(ctx, support.Backup, 1, // num folders (always 1) support.CollectionMetrics{ - Objects: len(oc.driveItems), // items to read, - Successes: itemsRead, // items read successfully, - TotalBytes: byteCount, // Number of bytes read in the operation, + Objects: itemsFound, // items to read, + Successes: itemsRead, // items read successfully, + TotalBytes: byteCount, // Number of bytes read in the operation, }, errs, oc.folderPath.Folder(), // Additional details diff --git a/src/internal/connector/onedrive/collection_test.go b/src/internal/connector/onedrive/collection_test.go index b608e9068..734009d72 100644 --- a/src/internal/connector/onedrive/collection_test.go +++ b/src/internal/connector/onedrive/collection_test.go @@ -2,8 +2,11 @@ package onedrive import ( "bytes" + "context" + "encoding/json" "io" "net/http" + "strings" "sync" "testing" "time" @@ -60,6 +63,14 @@ func (suite *CollectionUnitTestSuite) TestCollection() { testItemName = "itemName" testItemData = []byte("testdata") now = time.Now() + testItemMeta = Metadata{Permissions: []UserPermission{ + { + ID: "testMetaID", + Roles: []string{"read", "write"}, + Email: "email@provider.com", + Expiration: &now, + }, + }} ) type nst struct { @@ -157,13 +168,14 @@ func (suite *CollectionUnitTestSuite) TestCollection() { suite, suite.testStatusUpdater(&wg, &collStatus), test.source, - control.Options{}) + control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}) require.NotNil(t, coll) assert.Equal(t, folderPath, coll.FullPath()) // Set a item reader, add an item and validate we get the item back mockItem := models.NewDriveItem() mockItem.SetId(&testItemID) + mockItem.SetFile(models.NewFile()) mockItem.SetName(&test.itemDeets.name) mockItem.SetSize(&test.itemDeets.size) mockItem.SetCreatedDateTime(&test.itemDeets.time) @@ -174,6 +186,18 @@ func (suite *CollectionUnitTestSuite) TestCollection() { } coll.itemReader = test.itemReader + coll.itemMetaReader = func(_ context.Context, + _ graph.Servicer, + _ string, + _ models.DriveItemable, + ) (io.ReadCloser, int, error) { + metaJSON, err := json.Marshal(testItemMeta) + if err != nil { + return nil, 0, err + } + + return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil + } // Read items from the collection wg.Add(1) @@ -184,28 +208,54 @@ func (suite *CollectionUnitTestSuite) TestCollection() { wg.Wait() + if test.source == OneDriveSource { + require.Len(t, readItems, 2) // .data and .meta + } else { + require.Len(t, readItems, 1) + } + + // Expect only 1 item + require.Equal(t, 1, collStatus.ObjectCount) + require.Equal(t, 1, collStatus.Successful) + // Validate item info and data readItem := readItems[0] readItemInfo := readItem.(data.StreamInfo) - readData, err := io.ReadAll(readItem.ToReader()) - require.NoError(t, err) - assert.Equal(t, testItemData, readData) - - // Expect only 1 item - require.Len(t, readItems, 1) - require.Equal(t, 1, collStatus.ObjectCount, "items iterated") - require.Equal(t, 1, collStatus.Successful, "items successful") - - assert.Equal(t, testItemName, readItem.UUID()) + if test.source == OneDriveSource { + assert.Equal(t, testItemName+DataFileSuffix, readItem.UUID()) + } else { + assert.Equal(t, testItemName, readItem.UUID()) + } require.Implements(t, (*data.StreamModTime)(nil), readItem) mt := readItem.(data.StreamModTime) assert.Equal(t, now, mt.ModTime()) + readData, err := io.ReadAll(readItem.ToReader()) + require.NoError(t, err) + name, parentPath := test.infoFrom(t, readItemInfo.Info()) + + assert.Equal(t, testItemData, readData) assert.Equal(t, testItemName, name) assert.Equal(t, driveFolderPath, parentPath) + + if test.source == OneDriveSource { + readItemMeta := readItems[1] + + assert.Equal(t, testItemName+MetaFileSuffix, readItemMeta.UUID()) + + readMetaData, err := io.ReadAll(readItemMeta.ToReader()) + require.NoError(t, err) + + tm, err := json.Marshal(testItemMeta) + if err != nil { + t.Fatal("unable to marshall test permissions", err) + } + + assert.Equal(t, tm, readMetaData) + } }) } } @@ -251,10 +301,11 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() { suite, suite.testStatusUpdater(&wg, &collStatus), test.source, - control.Options{}) + control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}) mockItem := models.NewDriveItem() mockItem.SetId(&testItemID) + mockItem.SetFile(models.NewFile()) mockItem.SetName(&name) mockItem.SetSize(&size) mockItem.SetCreatedDateTime(&now) @@ -265,6 +316,14 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() { return details.ItemInfo{}, nil, assert.AnError } + coll.itemMetaReader = func(_ context.Context, + _ graph.Servicer, + _ string, + _ models.DriveItemable, + ) (io.ReadCloser, int, error) { + return io.NopCloser(strings.NewReader(`{}`)), 2, nil + } + collItem, ok := <-coll.Items() assert.True(t, ok) @@ -279,3 +338,87 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() { }) } } + +func (suite *CollectionUnitTestSuite) TestCollectionDisablePermissionsBackup() { + table := []struct { + name string + source driveSource + }{ + { + name: "oneDrive", + source: OneDriveSource, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + var ( + testItemID = "fakeItemID" + testItemName = "Fake Item" + testItemSize = int64(10) + + collStatus = support.ConnectorOperationStatus{} + wg = sync.WaitGroup{} + ) + + wg.Add(1) + + folderPath, err := GetCanonicalPath("drive/driveID1/root:/folderPath", "a-tenant", "a-user", test.source) + require.NoError(t, err) + + coll := NewCollection( + graph.HTTPClient(graph.NoTimeout()), + folderPath, + "fakeDriveID", + suite, + suite.testStatusUpdater(&wg, &collStatus), + test.source, + control.Options{ToggleFeatures: control.Toggles{}}) + + now := time.Now() + mockItem := models.NewDriveItem() + mockItem.SetFile(models.NewFile()) + mockItem.SetId(&testItemID) + mockItem.SetName(&testItemName) + mockItem.SetSize(&testItemSize) + mockItem.SetCreatedDateTime(&now) + mockItem.SetLastModifiedDateTime(&now) + coll.Add(mockItem) + + coll.itemReader = func( + *http.Client, + models.DriveItemable, + ) (details.ItemInfo, io.ReadCloser, error) { + return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: "fakeName", Modified: time.Now()}}, + io.NopCloser(strings.NewReader("Fake Data!")), + nil + } + + coll.itemMetaReader = func(_ context.Context, + _ graph.Servicer, + _ string, + _ models.DriveItemable, + ) (io.ReadCloser, int, error) { + return io.NopCloser(strings.NewReader(`{"key": "value"}`)), 16, nil + } + + readItems := []data.Stream{} + for item := range coll.Items() { + readItems = append(readItems, item) + } + + wg.Wait() + + // Expect no items + require.Equal(t, 1, collStatus.ObjectCount) + require.Equal(t, 1, collStatus.Successful) + + for _, i := range readItems { + if strings.HasSuffix(i.UUID(), MetaFileSuffix) { + content, err := io.ReadAll(i.ToReader()) + require.NoError(t, err) + require.Equal(t, content, []byte("{}")) + } + } + }) + } +} diff --git a/src/internal/connector/onedrive/collections.go b/src/internal/connector/onedrive/collections.go index f83ce342a..50c5323d9 100644 --- a/src/internal/connector/onedrive/collections.go +++ b/src/internal/connector/onedrive/collections.go @@ -2,7 +2,9 @@ package onedrive import ( "context" + "encoding/json" "fmt" + "io" "net/http" "strings" @@ -63,6 +65,19 @@ type Collections struct { // for a OneDrive folder CollectionMap map[string]data.Collection + // Not the most ideal, but allows us to change the pager function for testing + // as needed. This will allow us to mock out some scenarios during testing. + drivePagerFunc func( + source driveSource, + servicer graph.Servicer, + resourceOwner string, + fields []string, + ) (drivePager, error) + itemPagerFunc func( + servicer graph.Servicer, + driveID, link string, + ) itemPager + // Track stats from drive enumeration. Represents the items backed up. NumItems int NumFiles int @@ -80,23 +95,169 @@ func NewCollections( ctrlOpts control.Options, ) *Collections { return &Collections{ - itemClient: itemClient, - tenant: tenant, - resourceOwner: resourceOwner, - source: source, - matcher: matcher, - CollectionMap: map[string]data.Collection{}, - service: service, - statusUpdater: statusUpdater, - ctrl: ctrlOpts, + itemClient: itemClient, + tenant: tenant, + resourceOwner: resourceOwner, + source: source, + matcher: matcher, + CollectionMap: map[string]data.Collection{}, + drivePagerFunc: PagerForSource, + itemPagerFunc: defaultItemPager, + service: service, + statusUpdater: statusUpdater, + ctrl: ctrlOpts, } } +func deserializeMetadata( + ctx context.Context, + cols []data.Collection, +) (map[string]string, map[string]map[string]string, error) { + logger.Ctx(ctx).Infow( + "deserialzing previous backup metadata", + "num_collections", + len(cols), + ) + + prevDeltas := map[string]string{} + prevFolders := map[string]map[string]string{} + + for _, col := range cols { + items := col.Items() + + for breakLoop := false; !breakLoop; { + select { + case <-ctx.Done(): + return nil, nil, errors.Wrap(ctx.Err(), "deserialzing previous backup metadata") + + case item, ok := <-items: + if !ok { + // End of collection items. + breakLoop = true + break + } + + var err error + + switch item.UUID() { + case graph.PreviousPathFileName: + err = deserializeMap(item.ToReader(), prevFolders) + + case graph.DeltaURLsFileName: + err = deserializeMap(item.ToReader(), prevDeltas) + + default: + logger.Ctx(ctx).Infow( + "skipping unknown metadata file", + "file_name", + item.UUID(), + ) + + continue + } + + if err == nil { + // Successful decode. + continue + } + + // This is conservative, but report an error if any of the items for + // any of the deserialized maps have duplicate drive IDs. This will + // cause the entire backup to fail, but it's not clear if higher + // layers would have caught this. Worst case if we don't handle this + // we end up in a situation where we're sourcing items from the wrong + // base in kopia wrapper. + if errors.Is(err, errExistingMapping) { + return nil, nil, errors.Wrapf( + err, + "deserializing metadata file %s", + item.UUID(), + ) + } + + logger.Ctx(ctx).Errorw( + "deserializing base backup metadata. Falling back to full backup for selected drives", + "error", + err, + "file_name", + item.UUID(), + ) + } + } + + // Go through and remove partial results (i.e. path mapping but no delta URL + // or vice-versa). + for k, v := range prevDeltas { + // Remove entries with an empty delta token as it's not useful. + if len(v) == 0 { + delete(prevDeltas, k) + delete(prevFolders, k) + } + + // Remove entries without a folders map as we can't tell kopia the + // hierarchy changes. + if _, ok := prevFolders[k]; !ok { + delete(prevDeltas, k) + } + } + + for k := range prevFolders { + if _, ok := prevDeltas[k]; !ok { + delete(prevFolders, k) + } + } + } + + return prevDeltas, prevFolders, nil +} + +var errExistingMapping = errors.New("mapping already exists for same drive ID") + +// deserializeMap takes an reader and a map of already deserialized items and +// adds the newly deserialized items to alreadyFound. Items are only added to +// alreadyFound if none of the keys in the freshly deserialized map already +// exist in alreadyFound. reader is closed at the end of this function. +func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) error { + defer reader.Close() + + tmp := map[string]T{} + + err := json.NewDecoder(reader).Decode(&tmp) + if err != nil { + return errors.Wrap(err, "deserializing file contents") + } + + var duplicate bool + + for k := range tmp { + if _, ok := alreadyFound[k]; ok { + duplicate = true + break + } + } + + if duplicate { + return errors.WithStack(errExistingMapping) + } + + maps.Copy(alreadyFound, tmp) + + return nil +} + // Retrieves drive data as set of `data.Collections` and a set of item names to // be excluded from the upcoming backup. -func (c *Collections) Get(ctx context.Context) ([]data.Collection, map[string]struct{}, error) { +func (c *Collections) Get( + ctx context.Context, + prevMetadata []data.Collection, +) ([]data.Collection, map[string]struct{}, error) { + _, _, err := deserializeMetadata(ctx, prevMetadata) + if err != nil { + return nil, nil, err + } + // Enumerate drives for the specified resourceOwner - pager, err := PagerForSource(c.source, c.service, c.resourceOwner, nil) + pager, err := c.drivePagerFunc(c.source, c.service, c.resourceOwner, nil) if err != nil { return nil, nil, err } @@ -128,7 +289,11 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, map[string]st delta, paths, excluded, err := collectItems( ctx, - c.service, + c.itemPagerFunc( + c.service, + driveID, + "", + ), driveID, driveName, c.UpdateCollections, @@ -137,17 +302,21 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, map[string]st return nil, nil, err } + // It's alright to have an empty folders map (i.e. no folders found) but not + // an empty delta token. This is because when deserializing the metadata we + // remove entries for which there is no corresponding delta token/folder. If + // we leave empty delta tokens then we may end up setting the State field + // for collections when not actually getting delta results. if len(delta) > 0 { deltaURLs[driveID] = delta } - if len(paths) > 0 { - folderPaths[driveID] = map[string]string{} - - for id, p := range paths { - folderPaths[driveID][id] = p - } - } + // Avoid the edge case where there's no paths but we do have a valid delta + // token. We can accomplish this by adding an empty paths map for this + // drive. If we don't have this then the next backup won't use the delta + // token because it thinks the folder paths weren't persisted. + folderPaths[driveID] = map[string]string{} + maps.Copy(folderPaths[driveID], paths) maps.Copy(excludedItems, excluded) } @@ -261,6 +430,12 @@ func (c *Collections) UpdateCollections( // already created and partially populated. updatePath(newPaths, *item.GetId(), folderPath.String()) + if c.source != OneDriveSource { + continue + } + + fallthrough + case item.GetFile() != nil: if item.GetDeleted() != nil { excluded[*item.GetId()] = struct{}{} @@ -276,6 +451,7 @@ func (c *Collections) UpdateCollections( // the exclude list. col, found := c.CollectionMap[collectionPath.String()] + if !found { // TODO(ashmrtn): Compare old and new path and set collection state // accordingly. @@ -290,13 +466,17 @@ func (c *Collections) UpdateCollections( c.CollectionMap[collectionPath.String()] = col c.NumContainers++ - c.NumItems++ } collection := col.(*Collection) collection.Add(item) - c.NumFiles++ + c.NumItems++ + if item.GetFile() != nil { + // This is necessary as we have a fallthrough for + // folders and packages + c.NumFiles++ + } default: return errors.Errorf("item type not supported. item name : %s", *item.GetName()) diff --git a/src/internal/connector/onedrive/collections_test.go b/src/internal/connector/onedrive/collections_test.go index b69253918..f784bad62 100644 --- a/src/internal/connector/onedrive/collections_test.go +++ b/src/internal/connector/onedrive/collections_test.go @@ -1,9 +1,11 @@ package onedrive import ( + "context" "strings" "testing" + "github.com/google/uuid" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -11,8 +13,12 @@ import ( "golang.org/x/exp/maps" "github.com/alcionai/corso/src/internal/connector/graph" + gapi "github.com/alcionai/corso/src/internal/connector/graph/api" + "github.com/alcionai/corso/src/internal/connector/support" + "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/control" + "github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/selectors" ) @@ -133,7 +139,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { user, testBaseDrivePath, ), - expectedItemCount: 2, + expectedItemCount: 1, expectedFileCount: 1, expectedContainerCount: 1, // Root folder is skipped since it's always present. @@ -145,10 +151,15 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { items: []models.DriveItemable{ driveItem("folder", "folder", testBaseDrivePath, false, true, false), }, - inputFolderMap: map[string]string{}, - scope: anyFolder, - expect: assert.NoError, - expectedCollectionPaths: []string{}, + inputFolderMap: map[string]string{}, + scope: anyFolder, + expect: assert.NoError, + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + ), expectedMetadataPaths: map[string]string{ "folder": expectedPathAsSlice( suite.T(), @@ -157,17 +168,24 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+"/folder", )[0], }, - expectedExcludes: map[string]struct{}{}, + expectedItemCount: 1, + expectedContainerCount: 1, + expectedExcludes: map[string]struct{}{}, }, { testCase: "Single Package", items: []models.DriveItemable{ driveItem("package", "package", testBaseDrivePath, false, false, true), }, - inputFolderMap: map[string]string{}, - scope: anyFolder, - expect: assert.NoError, - expectedCollectionPaths: []string{}, + inputFolderMap: map[string]string{}, + scope: anyFolder, + expect: assert.NoError, + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + ), expectedMetadataPaths: map[string]string{ "package": expectedPathAsSlice( suite.T(), @@ -176,7 +194,9 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+"/package", )[0], }, - expectedExcludes: map[string]struct{}{}, + expectedItemCount: 1, + expectedContainerCount: 1, + expectedExcludes: map[string]struct{}{}, }, { testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections", @@ -198,7 +218,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+folder, testBaseDrivePath+pkg, ), - expectedItemCount: 6, + expectedItemCount: 5, expectedFileCount: 3, expectedContainerCount: 3, expectedMetadataPaths: map[string]string{ @@ -232,23 +252,17 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { inputFolderMap: map[string]string{}, scope: (&selectors.OneDriveBackup{}).Folders([]string{"folder"})[0], expect: assert.NoError, - expectedCollectionPaths: append( - expectedPathAsSlice( - suite.T(), - tenant, - user, - testBaseDrivePath+"/folder", - ), - expectedPathAsSlice( - suite.T(), - tenant, - user, - testBaseDrivePath+folderSub+folder, - )..., + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath+"/folder", + testBaseDrivePath+folderSub, + testBaseDrivePath+folderSub+folder, ), expectedItemCount: 4, expectedFileCount: 2, - expectedContainerCount: 2, + expectedContainerCount: 3, // just "folder" isn't added here because the include check is done on the // parent path since we only check later if something is a folder or not. expectedMetadataPaths: map[string]string{ @@ -287,11 +301,12 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { suite.T(), tenant, user, + testBaseDrivePath+folderSub, testBaseDrivePath+folderSub+folder, ), expectedItemCount: 2, expectedFileCount: 1, - expectedContainerCount: 1, + expectedContainerCount: 2, expectedMetadataPaths: map[string]string{ "folder2": expectedPathAsSlice( suite.T(), @@ -322,7 +337,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { user, testBaseDrivePath+folderSub, ), - expectedItemCount: 2, + expectedItemCount: 1, expectedFileCount: 1, expectedContainerCount: 1, // No child folders for subfolder so nothing here. @@ -348,12 +363,17 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+"/folder/subfolder", )[0], }, - scope: anyFolder, - expect: assert.NoError, - expectedCollectionPaths: []string{}, - expectedItemCount: 0, - expectedFileCount: 0, - expectedContainerCount: 0, + scope: anyFolder, + expect: assert.NoError, + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + ), + expectedItemCount: 1, + expectedFileCount: 0, + expectedContainerCount: 1, expectedMetadataPaths: map[string]string{ "folder": expectedPathAsSlice( suite.T(), @@ -389,12 +409,17 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+"/a-folder/subfolder", )[0], }, - scope: anyFolder, - expect: assert.NoError, - expectedCollectionPaths: []string{}, - expectedItemCount: 0, - expectedFileCount: 0, - expectedContainerCount: 0, + scope: anyFolder, + expect: assert.NoError, + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + ), + expectedItemCount: 1, + expectedFileCount: 0, + expectedContainerCount: 1, expectedMetadataPaths: map[string]string{ "folder": expectedPathAsSlice( suite.T(), @@ -431,12 +456,17 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+"/a-folder/subfolder", )[0], }, - scope: anyFolder, - expect: assert.NoError, - expectedCollectionPaths: []string{}, - expectedItemCount: 0, - expectedFileCount: 0, - expectedContainerCount: 0, + scope: anyFolder, + expect: assert.NoError, + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + ), + expectedItemCount: 2, + expectedFileCount: 0, + expectedContainerCount: 1, expectedMetadataPaths: map[string]string{ "folder": expectedPathAsSlice( suite.T(), @@ -473,12 +503,17 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+"/a-folder/subfolder", )[0], }, - scope: anyFolder, - expect: assert.NoError, - expectedCollectionPaths: []string{}, - expectedItemCount: 0, - expectedFileCount: 0, - expectedContainerCount: 0, + scope: anyFolder, + expect: assert.NoError, + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + ), + expectedItemCount: 2, + expectedFileCount: 0, + expectedContainerCount: 1, expectedMetadataPaths: map[string]string{ "folder": expectedPathAsSlice( suite.T(), @@ -544,12 +579,17 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testBaseDrivePath+"/folder/subfolder", )[0], }, - scope: anyFolder, - expect: assert.NoError, - expectedCollectionPaths: []string{}, - expectedItemCount: 0, - expectedFileCount: 0, - expectedContainerCount: 0, + scope: anyFolder, + expect: assert.NoError, + expectedCollectionPaths: expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + ), + expectedItemCount: 1, + expectedFileCount: 0, + expectedContainerCount: 1, expectedMetadataPaths: map[string]string{ "subfolder": expectedPathAsSlice( suite.T(), @@ -595,7 +635,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { testFolderMatcher{tt.scope}, &MockGraphService{}, nil, - control.Options{}) + control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}) err := c.UpdateCollections( ctx, @@ -621,13 +661,787 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() { } } -func driveItem(id string, name string, path string, isFile, isFolder, isPackage bool) models.DriveItemable { +func (suite *OneDriveCollectionsSuite) TestDeserializeMetadata() { + tenant := "a-tenant" + user := "a-user" + driveID1 := "1" + driveID2 := "2" + deltaURL1 := "url/1" + deltaURL2 := "url/2" + + folderID1 := "folder1" + folderID2 := "folder2" + path1 := "folder1/path" + path2 := "folder2/path" + + table := []struct { + name string + // Each function returns the set of files for a single data.Collection. + cols []func() []graph.MetadataCollectionEntry + expectedDeltas map[string]string + expectedPaths map[string]map[string]string + errCheck assert.ErrorAssertionFunc + }{ + { + name: "SuccessOneDriveAllOneCollection", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL1}, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + ), + } + }, + }, + expectedDeltas: map[string]string{ + driveID1: deltaURL1, + }, + expectedPaths: map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + errCheck: assert.NoError, + }, + { + name: "MissingPaths", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL1}, + ), + } + }, + }, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, + errCheck: assert.NoError, + }, + { + name: "MissingDeltas", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + ), + } + }, + }, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, + errCheck: assert.NoError, + }, + { + // An empty path map but valid delta results in metadata being returned + // since it's possible to have a drive with no folders other than the + // root. + name: "EmptyPaths", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL1}, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: {}, + }, + ), + } + }, + }, + expectedDeltas: map[string]string{driveID1: deltaURL1}, + expectedPaths: map[string]map[string]string{driveID1: {}}, + errCheck: assert.NoError, + }, + { + // An empty delta map but valid path results in no metadata for that drive + // being returned since the path map is only useful if we have a valid + // delta. + name: "EmptyDeltas", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{ + driveID1: "", + }, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + ), + } + }, + }, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, + errCheck: assert.NoError, + }, + { + name: "SuccessTwoDrivesTwoCollections", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL1}, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + ), + } + }, + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID2: deltaURL2}, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID2: { + folderID2: path2, + }, + }, + ), + } + }, + }, + expectedDeltas: map[string]string{ + driveID1: deltaURL1, + driveID2: deltaURL2, + }, + expectedPaths: map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + driveID2: { + folderID2: path2, + }, + }, + errCheck: assert.NoError, + }, + { + // Bad formats are logged but skip adding entries to the maps and don't + // return an error. + name: "BadFormat", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]string{driveID1: deltaURL1}, + ), + } + }, + }, + expectedDeltas: map[string]string{}, + expectedPaths: map[string]map[string]string{}, + errCheck: assert.NoError, + }, + { + // Unexpected files are logged and skipped. They don't cause an error to + // be returned. + name: "BadFileName", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL1}, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + ), + graph.NewMetadataEntry( + "foo", + map[string]string{driveID1: deltaURL1}, + ), + } + }, + }, + expectedDeltas: map[string]string{ + driveID1: deltaURL1, + }, + expectedPaths: map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + errCheck: assert.NoError, + }, + { + name: "DriveAlreadyFound_Paths", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL1}, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + ), + } + }, + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID2: path2, + }, + }, + ), + } + }, + }, + expectedDeltas: nil, + expectedPaths: nil, + errCheck: assert.Error, + }, + { + name: "DriveAlreadyFound_Deltas", + cols: []func() []graph.MetadataCollectionEntry{ + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL1}, + ), + graph.NewMetadataEntry( + graph.PreviousPathFileName, + map[string]map[string]string{ + driveID1: { + folderID1: path1, + }, + }, + ), + } + }, + func() []graph.MetadataCollectionEntry { + return []graph.MetadataCollectionEntry{ + graph.NewMetadataEntry( + graph.DeltaURLsFileName, + map[string]string{driveID1: deltaURL2}, + ), + } + }, + }, + expectedDeltas: nil, + expectedPaths: nil, + errCheck: assert.Error, + }, + } + + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + ctx, flush := tester.NewContext() + defer flush() + + cols := []data.Collection{} + + for _, c := range test.cols { + mc, err := graph.MakeMetadataCollection( + tenant, + user, + path.OneDriveService, + path.FilesCategory, + c(), + func(*support.ConnectorOperationStatus) {}, + ) + require.NoError(t, err) + + cols = append(cols, mc) + } + + deltas, paths, err := deserializeMetadata(ctx, cols) + test.errCheck(t, err) + + assert.Equal(t, test.expectedDeltas, deltas) + assert.Equal(t, test.expectedPaths, paths) + }) + } +} + +type mockDeltaPageLinker struct { + link *string + delta *string +} + +func (pl *mockDeltaPageLinker) GetOdataNextLink() *string { + return pl.link +} + +func (pl *mockDeltaPageLinker) GetOdataDeltaLink() *string { + return pl.delta +} + +type deltaPagerResult struct { + items []models.DriveItemable + nextLink *string + deltaLink *string + err error +} + +type mockItemPager struct { + // DriveID -> set of return values for queries for that drive. + toReturn []deltaPagerResult + getIdx int +} + +func (p *mockItemPager) GetPage(context.Context) (gapi.DeltaPageLinker, error) { + if len(p.toReturn) <= p.getIdx { + return nil, assert.AnError + } + + idx := p.getIdx + p.getIdx++ + + return &mockDeltaPageLinker{ + p.toReturn[idx].nextLink, + p.toReturn[idx].deltaLink, + }, p.toReturn[idx].err +} + +func (p *mockItemPager) SetNext(string) {} + +func (p *mockItemPager) ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error) { + idx := p.getIdx + if idx > 0 { + // Return values lag by one since we increment in GetPage(). + idx-- + } + + if len(p.toReturn) <= idx { + return nil, assert.AnError + } + + return p.toReturn[idx].items, nil +} + +func (suite *OneDriveCollectionsSuite) TestGet() { + anyFolder := (&selectors.OneDriveBackup{}).Folders(selectors.Any())[0] + + tenant := "a-tenant" + user := "a-user" + + metadataPath, err := path.Builder{}.ToServiceCategoryMetadataPath( + tenant, + user, + path.OneDriveService, + path.FilesCategory, + false, + ) + require.NoError(suite.T(), err, "making metadata path") + + rootFolderPath := expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + )[0] + folderPath := expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath+"/folder", + )[0] + + empty := "" + next := "next" + delta := "delta1" + delta2 := "delta2" + + driveID1 := uuid.NewString() + drive1 := models.NewDrive() + drive1.SetId(&driveID1) + drive1.SetName(&driveID1) + + driveID2 := uuid.NewString() + drive2 := models.NewDrive() + drive2.SetId(&driveID2) + drive2.SetName(&driveID2) + + driveBasePath2 := "drive/driveID2/root:" + + rootFolderPath2 := expectedPathAsSlice( + suite.T(), + tenant, + user, + driveBasePath2, + )[0] + folderPath2 := expectedPathAsSlice( + suite.T(), + tenant, + user, + driveBasePath2+"/folder", + )[0] + + table := []struct { + name string + drives []models.Driveable + items map[string][]deltaPagerResult + errCheck assert.ErrorAssertionFunc + // Collection name -> set of item IDs. We can't check item data because + // that's not mocked out. Metadata is checked separately. + expectedCollections map[string][]string + expectedDeltaURLs map[string]string + expectedFolderPaths map[string]map[string]string + expectedDelList map[string]struct{} + }{ + { + name: "OneDrive_OneItemPage_DelFileOnly_NoFolders_NoErrors", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + delItem("file", testBaseDrivePath, true, false, false), + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{}, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + // We need an empty map here so deserializing metadata knows the delta + // token for this drive is valid. + driveID1: {}, + }, + expectedDelList: map[string]struct{}{ + "file": {}, + }, + }, + { + name: "OneDrive_OneItemPage_NoFolders_NoErrors", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + driveItem("file", "file", testBaseDrivePath, true, false, false), + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + expectedPathAsSlice( + suite.T(), + tenant, + user, + testBaseDrivePath, + )[0]: {"file"}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + // We need an empty map here so deserializing metadata knows the delta + // token for this drive is valid. + driveID1: {}, + }, + expectedDelList: map[string]struct{}{}, + }, + { + name: "OneDrive_OneItemPage_NoErrors", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + driveItem("folder", "folder", testBaseDrivePath, false, true, false), + driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false), + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + folderPath: {"file"}, + rootFolderPath: {"folder"}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + driveID1: { + "folder": folderPath, + }, + }, + expectedDelList: map[string]struct{}{}, + }, + { + name: "OneDrive_OneItemPage_EmptyDelta_NoErrors", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + driveItem("folder", "folder", testBaseDrivePath, false, true, false), + driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false), + }, + deltaLink: &empty, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + folderPath: {"file"}, + rootFolderPath: {"folder"}, + }, + expectedDeltaURLs: map[string]string{}, + expectedFolderPaths: map[string]map[string]string{}, + expectedDelList: map[string]struct{}{}, + }, + { + name: "OneDrive_TwoItemPages_NoErrors", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + driveItem("folder", "folder", testBaseDrivePath, false, true, false), + driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false), + }, + nextLink: &next, + }, + { + items: []models.DriveItemable{ + driveItem("folder", "folder", testBaseDrivePath, false, true, false), + driveItem("file2", "file2", testBaseDrivePath+"/folder", true, false, false), + }, + deltaLink: &delta, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + folderPath: {"file", "file2"}, + rootFolderPath: {"folder"}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + }, + expectedFolderPaths: map[string]map[string]string{ + driveID1: { + "folder": folderPath, + }, + }, + expectedDelList: map[string]struct{}{}, + }, + { + name: "TwoDrives_OneItemPageEach_NoErrors", + drives: []models.Driveable{ + drive1, + drive2, + }, + items: map[string][]deltaPagerResult{ + driveID1: { + { + items: []models.DriveItemable{ + driveItem("folder", "folder", testBaseDrivePath, false, true, false), + driveItem("file", "file", testBaseDrivePath+"/folder", true, false, false), + }, + deltaLink: &delta, + }, + }, + driveID2: { + { + items: []models.DriveItemable{ + driveItem("folder", "folder", driveBasePath2, false, true, false), + driveItem("file", "file", driveBasePath2+"/folder", true, false, false), + }, + deltaLink: &delta2, + }, + }, + }, + errCheck: assert.NoError, + expectedCollections: map[string][]string{ + folderPath: {"file"}, + folderPath2: {"file"}, + rootFolderPath: {"folder"}, + rootFolderPath2: {"folder"}, + }, + expectedDeltaURLs: map[string]string{ + driveID1: delta, + driveID2: delta2, + }, + expectedFolderPaths: map[string]map[string]string{ + driveID1: { + "folder": folderPath, + }, + driveID2: { + "folder": folderPath2, + }, + }, + expectedDelList: map[string]struct{}{}, + }, + { + name: "OneDrive_OneItemPage_Errors", + drives: []models.Driveable{drive1}, + items: map[string][]deltaPagerResult{ + driveID1: { + { + err: assert.AnError, + }, + }, + }, + errCheck: assert.Error, + expectedCollections: nil, + expectedDeltaURLs: nil, + expectedFolderPaths: nil, + expectedDelList: nil, + }, + } + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + ctx, flush := tester.NewContext() + defer flush() + + drivePagerFunc := func( + source driveSource, + servicer graph.Servicer, + resourceOwner string, + fields []string, + ) (drivePager, error) { + return &mockDrivePager{ + toReturn: []pagerResult{ + { + drives: test.drives, + }, + }, + }, nil + } + + itemPagerFunc := func( + servicer graph.Servicer, + driveID, link string, + ) itemPager { + return &mockItemPager{ + toReturn: test.items[driveID], + } + } + + c := NewCollections( + graph.HTTPClient(graph.NoTimeout()), + tenant, + user, + OneDriveSource, + testFolderMatcher{anyFolder}, + &MockGraphService{}, + func(*support.ConnectorOperationStatus) {}, + control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}, + ) + c.drivePagerFunc = drivePagerFunc + c.itemPagerFunc = itemPagerFunc + + // TODO(ashmrtn): Allow passing previous metadata. + cols, _, err := c.Get(ctx, nil) + test.errCheck(t, err) + + if err != nil { + return + } + + for _, baseCol := range cols { + folderPath := baseCol.FullPath().String() + if folderPath == metadataPath.String() { + deltas, paths, err := deserializeMetadata(ctx, []data.Collection{baseCol}) + if !assert.NoError(t, err, "deserializing metadata") { + continue + } + + assert.Equal(t, test.expectedDeltaURLs, deltas) + assert.Equal(t, test.expectedFolderPaths, paths) + + continue + } + + // TODO(ashmrtn): We should really be getting items in the collection + // via the Items() channel, but we don't have a way to mock out the + // actual item fetch yet (mostly wiring issues). The lack of that makes + // this check a bit more bittle since internal details can change. + col, ok := baseCol.(*Collection) + require.True(t, ok, "getting onedrive.Collection handle") + + itemIDs := make([]string, 0, len(col.driveItems)) + + for id := range col.driveItems { + itemIDs = append(itemIDs, id) + } + + assert.ElementsMatch(t, test.expectedCollections[folderPath], itemIDs) + } + + // TODO(ashmrtn): Uncomment this when we begin return the set of items to + // remove from the upcoming backup. + // assert.Equal(t, test.expectedDelList, delList) + }) + } +} + +func driveItem( + id string, + name string, + parentPath string, + isFile, isFolder, isPackage bool, +) models.DriveItemable { item := models.NewDriveItem() item.SetName(&name) item.SetId(&id) parentReference := models.NewItemReference() - parentReference.SetPath(&path) + parentReference.SetPath(&parentPath) item.SetParentReference(parentReference) switch { @@ -644,13 +1458,17 @@ func driveItem(id string, name string, path string, isFile, isFolder, isPackage // delItem creates a DriveItemable that is marked as deleted. path must be set // to the base drive path. -func delItem(id string, path string, isFile, isFolder, isPackage bool) models.DriveItemable { +func delItem( + id string, + parentPath string, + isFile, isFolder, isPackage bool, +) models.DriveItemable { item := models.NewDriveItem() item.SetId(&id) item.SetDeleted(models.NewDeleted()) parentReference := models.NewItemReference() - parentReference.SetPath(&path) + parentReference.SetPath(&parentPath) item.SetParentReference(parentReference) switch { diff --git a/src/internal/connector/onedrive/drive.go b/src/internal/connector/onedrive/drive.go index 6270ec08a..ebcbe8b6f 100644 --- a/src/internal/connector/onedrive/drive.go +++ b/src/internal/connector/onedrive/drive.go @@ -7,7 +7,6 @@ import ( "time" msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive" - msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives" "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" "github.com/pkg/errors" @@ -81,7 +80,7 @@ func drives( page, err = pager.GetPage(ctx) if err != nil { // Various error handling. May return an error or perform a retry. - detailedError := support.ConnectorStackErrorTrace(err) + detailedError := err.Error() if strings.Contains(detailedError, userMysiteURLNotFound) || strings.Contains(detailedError, userMysiteNotFound) { logger.Ctx(ctx).Infof("resource owner does not have a drive") @@ -135,11 +134,42 @@ type itemCollector func( excluded map[string]struct{}, ) error +type itemPager interface { + GetPage(context.Context) (gapi.DeltaPageLinker, error) + SetNext(nextLink string) + ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error) +} + +func defaultItemPager( + servicer graph.Servicer, + driveID, link string, +) itemPager { + return api.NewItemPager( + servicer, + driveID, + link, + []string{ + "content.downloadUrl", + "createdBy", + "createdDateTime", + "file", + "folder", + "id", + "lastModifiedDateTime", + "name", + "package", + "parentReference", + "root", + "size", + }, + ) +} + // collectItems will enumerate all items in the specified drive and hand them to the // provided `collector` method func collectItems( ctx context.Context, - service graph.Servicer, + pager itemPager, driveID, driveName string, collector itemCollector, ) (string, map[string]string, map[string]struct{}, error) { @@ -154,34 +184,8 @@ func collectItems( maps.Copy(newPaths, oldPaths) - // TODO: Specify a timestamp in the delta query - // https://docs.microsoft.com/en-us/graph/api/driveitem-delta? - // view=graph-rest-1.0&tabs=http#example-4-retrieving-delta-results-using-a-timestamp - builder := service.Client().DrivesById(driveID).Root().Delta() - pageCount := int32(999) // max we can do is 999 - requestFields := []string{ - "content.downloadUrl", - "createdBy", - "createdDateTime", - "file", - "folder", - "id", - "lastModifiedDateTime", - "name", - "package", - "parentReference", - "root", - "size", - } - requestConfig := &msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration{ - QueryParameters: &msdrives.ItemRootDeltaRequestBuilderGetQueryParameters{ - Top: &pageCount, - Select: requestFields, - }, - } - for { - r, err := builder.Get(ctx, requestConfig) + page, err := pager.GetPage(ctx) if err != nil { return "", nil, nil, errors.Wrapf( err, @@ -190,23 +194,29 @@ func collectItems( ) } - err = collector(ctx, driveID, driveName, r.GetValue(), oldPaths, newPaths, excluded) + vals, err := pager.ValuesIn(page) + if err != nil { + return "", nil, nil, errors.Wrap(err, "extracting items from response") + } + + err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded) if err != nil { return "", nil, nil, err } - if r.GetOdataDeltaLink() != nil && len(*r.GetOdataDeltaLink()) > 0 { - newDeltaURL = *r.GetOdataDeltaLink() + nextLink, deltaLink := gapi.NextAndDeltaLink(page) + + if len(deltaLink) > 0 { + newDeltaURL = deltaLink } // Check if there are more items - nextLink := r.GetOdataNextLink() - if nextLink == nil { + if len(nextLink) == 0 { break } - logger.Ctx(ctx).Debugf("Found %s nextLink", *nextLink) - builder = msdrives.NewItemRootDeltaRequestBuilder(*nextLink, service.Adapter()) + logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink) + pager.SetNext(nextLink) } return newDeltaURL, newPaths, excluded, nil @@ -226,7 +236,16 @@ func getFolder( rawURL := fmt.Sprintf(itemByPathRawURLFmt, driveID, parentFolderID, folderName) builder := msdrive.NewItemsDriveItemItemRequestBuilder(rawURL, service.Adapter()) - foundItem, err := builder.Get(ctx, nil) + var ( + foundItem models.DriveItemable + err error + ) + + err = graph.RunWithRetry(func() error { + foundItem, err = builder.Get(ctx, nil) + return err + }) + if err != nil { var oDataError *odataerrors.ODataError if errors.As(err, &oDataError) && @@ -318,7 +337,11 @@ func GetAllFolders( for _, d := range drives { _, _, _, err = collectItems( ctx, - gs, + defaultItemPager( + gs, + *d.GetId(), + "", + ), *d.GetId(), *d.GetName(), func( diff --git a/src/internal/connector/onedrive/drive_test.go b/src/internal/connector/onedrive/drive_test.go index 36fef30ab..5eeda6aac 100644 --- a/src/internal/connector/onedrive/drive_test.go +++ b/src/internal/connector/onedrive/drive_test.go @@ -15,6 +15,7 @@ import ( "github.com/alcionai/corso/src/internal/common" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/graph/api" + "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" @@ -76,6 +77,15 @@ func TestOneDriveUnitSuite(t *testing.T) { suite.Run(t, new(OneDriveUnitSuite)) } +func odErr(code string) *odataerrors.ODataError { + odErr := &odataerrors.ODataError{} + merr := odataerrors.MainError{} + merr.SetCode(&code) + odErr.SetError(&merr) + + return odErr +} + func (suite *OneDriveUnitSuite) TestDrives() { numDriveResults := 4 emptyLink := "" @@ -84,26 +94,18 @@ func (suite *OneDriveUnitSuite) TestDrives() { // These errors won't be the "correct" format when compared to what graph // returns, but they're close enough to have the same info when the inner // details are extracted via support package. - tmp := userMysiteURLNotFound - tmpMySiteURLNotFound := odataerrors.NewMainError() - tmpMySiteURLNotFound.SetMessage(&tmp) - - mySiteURLNotFound := odataerrors.NewODataError() - mySiteURLNotFound.SetError(tmpMySiteURLNotFound) - - tmp2 := userMysiteNotFound - tmpMySiteNotFound := odataerrors.NewMainError() - tmpMySiteNotFound.SetMessage(&tmp2) - - mySiteNotFound := odataerrors.NewODataError() - mySiteNotFound.SetError(tmpMySiteNotFound) - - tmp3 := contextDeadlineExceeded - tmpDeadlineExceeded := odataerrors.NewMainError() - tmpDeadlineExceeded.SetMessage(&tmp3) - - deadlineExceeded := odataerrors.NewODataError() - deadlineExceeded.SetError(tmpDeadlineExceeded) + mySiteURLNotFound := support.ConnectorStackErrorTraceWrap( + odErr(userMysiteURLNotFound), + "maximum retries or unretryable", + ) + mySiteNotFound := support.ConnectorStackErrorTraceWrap( + odErr(userMysiteNotFound), + "maximum retries or unretryable", + ) + deadlineExceeded := support.ConnectorStackErrorTraceWrap( + odErr(contextDeadlineExceeded), + "maximum retries or unretryable", + ) resultDrives := make([]models.Driveable, 0, numDriveResults) @@ -462,8 +464,8 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() { testFolderMatcher{scope}, service, service.updateStatus, - control.Options{}, - ).Get(ctx) + control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}}, + ).Get(ctx, nil) assert.NoError(t, err) // Don't expect excludes as this isn't an incremental backup. assert.Empty(t, excludes) diff --git a/src/internal/connector/onedrive/item.go b/src/internal/connector/onedrive/item.go index c4fd1b380..c527ce09b 100644 --- a/src/internal/connector/onedrive/item.go +++ b/src/internal/connector/onedrive/item.go @@ -1,7 +1,9 @@ package onedrive import ( + "bytes" "context" + "encoding/json" "fmt" "io" "net/http" @@ -37,6 +39,7 @@ func getDriveItem( // sharePointItemReader will return a io.ReadCloser for the specified item // It crafts this by querying M365 for a download URL for the item // and using a http client to initialize a reader +// TODO: Add metadata fetching to SharePoint func sharePointItemReader( hc *http.Client, item models.DriveItemable, @@ -53,6 +56,25 @@ func sharePointItemReader( return dii, resp.Body, nil } +func oneDriveItemMetaReader( + ctx context.Context, + service graph.Servicer, + driveID string, + item models.DriveItemable, +) (io.ReadCloser, int, error) { + meta, err := oneDriveItemMetaInfo(ctx, service, driveID, item) + if err != nil { + return nil, 0, err + } + + metaJSON, err := json.Marshal(meta) + if err != nil { + return nil, 0, err + } + + return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil +} + // oneDriveItemReader will return a io.ReadCloser for the specified item // It crafts this by querying M365 for a download URL for the item // and using a http client to initialize a reader @@ -60,16 +82,25 @@ func oneDriveItemReader( hc *http.Client, item models.DriveItemable, ) (details.ItemInfo, io.ReadCloser, error) { - resp, err := downloadItem(hc, item) - if err != nil { - return details.ItemInfo{}, nil, errors.Wrap(err, "downloading item") + var ( + rc io.ReadCloser + isFile = item.GetFile() != nil + ) + + if isFile { + resp, err := downloadItem(hc, item) + if err != nil { + return details.ItemInfo{}, nil, errors.Wrap(err, "downloading item") + } + + rc = resp.Body } dii := details.ItemInfo{ OneDrive: oneDriveItemInfo(item, *item.GetSize()), } - return dii, resp.Body, nil + return dii, rc, nil } func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, error) { @@ -105,6 +136,10 @@ func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, e return resp, graph.Err401Unauthorized } + if resp.StatusCode == http.StatusInternalServerError { + return resp, graph.Err500InternalServerError + } + if resp.StatusCode == http.StatusServiceUnavailable { return resp, graph.Err503ServiceUnavailable } @@ -145,6 +180,59 @@ func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDrive } } +// oneDriveItemMetaInfo will fetch the meta information for a drive +// item. As of now, it only adds the permissions applicable for a +// onedrive item. +func oneDriveItemMetaInfo( + ctx context.Context, service graph.Servicer, + driveID string, di models.DriveItemable, +) (Metadata, error) { + itemID := di.GetId() + + perm, err := service.Client().DrivesById(driveID).ItemsById(*itemID).Permissions().Get(ctx, nil) + if err != nil { + return Metadata{}, err + } + + uperms := filterUserPermissions(perm.GetValue()) + + return Metadata{Permissions: uperms}, nil +} + +func filterUserPermissions(perms []models.Permissionable) []UserPermission { + up := []UserPermission{} + + for _, p := range perms { + if p.GetGrantedToV2() == nil { + // For link shares, we get permissions without a user + // specified + continue + } + + roles := []string{} + + for _, r := range p.GetRoles() { + // Skip if the only role available in owner + if r != "owner" { + roles = append(roles, r) + } + } + + if len(roles) == 0 { + continue + } + + up = append(up, UserPermission{ + ID: *p.GetId(), + Roles: roles, + Email: *p.GetGrantedToV2().GetUser().GetAdditionalData()["email"].(*string), + Expiration: p.GetExpirationDateTime(), + }) + } + + return up +} + // sharePointItemInfo will populate a details.SharePointInfo struct // with properties from the drive item. ItemSize is specified // separately for restore processes because the local itemable diff --git a/src/internal/connector/onedrive/item_test.go b/src/internal/connector/onedrive/item_test.go index 938748ca2..a2e008ec5 100644 --- a/src/internal/connector/onedrive/item_test.go +++ b/src/internal/connector/onedrive/item_test.go @@ -8,6 +8,7 @@ import ( msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -115,7 +116,17 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { return nil } - _, _, _, err := collectItems(ctx, suite, suite.userDriveID, "General", itemCollector) + _, _, _, err := collectItems( + ctx, + defaultItemPager( + suite, + suite.userDriveID, + "", + ), + suite.userDriveID, + "General", + itemCollector, + ) require.NoError(suite.T(), err) // Test Requirement 2: Need a file @@ -128,8 +139,8 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() { ) // Read data for the file - itemInfo, itemData, err := oneDriveItemReader(graph.HTTPClient(graph.NoTimeout()), driveItem) + require.NoError(suite.T(), err) require.NotNil(suite.T(), itemInfo.OneDrive) require.NotEmpty(suite.T(), itemInfo.OneDrive.ItemName) @@ -247,3 +258,72 @@ func (suite *ItemIntegrationSuite) TestDriveGetFolder() { }) } } + +func getPermsUperms(permID, userID string, scopes []string) (models.Permissionable, UserPermission) { + identity := models.NewIdentity() + identity.SetAdditionalData(map[string]any{"email": &userID}) + + sharepointIdentity := models.NewSharePointIdentitySet() + sharepointIdentity.SetUser(identity) + + perm := models.NewPermission() + perm.SetId(&permID) + perm.SetRoles([]string{"read"}) + perm.SetGrantedToV2(sharepointIdentity) + + uperm := UserPermission{ + ID: permID, + Roles: []string{"read"}, + Email: userID, + } + + return perm, uperm +} + +func TestOneDrivePermissionsFilter(t *testing.T) { + permID := "fakePermId" + userID := "fakeuser@provider.com" + userID2 := "fakeuser2@provider.com" + + readPerm, readUperm := getPermsUperms(permID, userID, []string{"read"}) + readWritePerm, readWriteUperm := getPermsUperms(permID, userID2, []string{"read", "write"}) + + noPerm, _ := getPermsUperms(permID, userID, []string{"read"}) + noPerm.SetGrantedToV2(nil) // eg: link shares + + cases := []struct { + name string + graphPermissions []models.Permissionable + parsedPermissions []UserPermission + }{ + { + name: "no perms", + graphPermissions: []models.Permissionable{}, + parsedPermissions: []UserPermission{}, + }, + { + name: "no user bound to perms", + graphPermissions: []models.Permissionable{noPerm}, + parsedPermissions: []UserPermission{}, + }, + { + name: "user with read permissions", + graphPermissions: []models.Permissionable{readPerm}, + parsedPermissions: []UserPermission{readUperm}, + }, + { + name: "user with read and write permissions", + graphPermissions: []models.Permissionable{readWritePerm}, + parsedPermissions: []UserPermission{readWriteUperm}, + }, + { + name: "multiple users with separate permissions", + graphPermissions: []models.Permissionable{readPerm, readWritePerm}, + parsedPermissions: []UserPermission{readUperm, readWriteUperm}, + }, + } + for _, tc := range cases { + actual := filterUserPermissions(tc.graphPermissions) + assert.ElementsMatch(t, tc.parsedPermissions, actual) + } +} diff --git a/src/internal/connector/onedrive/restore.go b/src/internal/connector/onedrive/restore.go index 00ed855b7..0014457c4 100644 --- a/src/internal/connector/onedrive/restore.go +++ b/src/internal/connector/onedrive/restore.go @@ -2,9 +2,15 @@ package onedrive import ( "context" + "encoding/json" + "fmt" "io" "runtime/trace" + "sort" + "strings" + msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive" + "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/connector/graph" @@ -23,30 +29,101 @@ const ( // Microsoft recommends 5-10MB buffers // https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession?view=graph-rest-1.0#best-practices copyBufferSize = 5 * 1024 * 1024 + + // versionWithDataAndMetaFiles is the corso backup format version + // in which we split from storing just the data to storing both + // the data and metadata in two files. + versionWithDataAndMetaFiles = 1 ) +func getParentPermissions( + parentPath path.Path, + parentPermissions map[string][]UserPermission, +) ([]UserPermission, error) { + parentPerms, ok := parentPermissions[parentPath.String()] + if !ok { + onedrivePath, err := path.ToOneDrivePath(parentPath) + if err != nil { + return nil, errors.Wrap(err, "invalid restore path") + } + + if len(onedrivePath.Folders) != 0 { + return nil, errors.Wrap(err, "unable to compute item permissions") + } + + parentPerms = []UserPermission{} + } + + return parentPerms, nil +} + // RestoreCollections will restore the specified data collections into OneDrive func RestoreCollections( ctx context.Context, + backupVersion int, service graph.Servicer, dest control.RestoreDestination, + opts control.Options, dcs []data.Collection, deets *details.Builder, ) (*support.ConnectorOperationStatus, error) { var ( restoreMetrics support.CollectionMetrics restoreErrors error + metrics support.CollectionMetrics + folderPerms map[string][]UserPermission + canceled bool + + // permissionIDMappings is used to map between old and new id + // of permissions as we restore them + permissionIDMappings = map[string]string{} ) errUpdater := func(id string, err error) { restoreErrors = support.WrapAndAppend(id, err, restoreErrors) } + // Reorder collections so that the parents directories are created + // before the child directories + sort.Slice(dcs, func(i, j int) bool { + return dcs[i].FullPath().String() < dcs[j].FullPath().String() + }) + + parentPermissions := map[string][]UserPermission{} + // Iterate through the data collections and restore the contents of each for _, dc := range dcs { - temp, canceled := RestoreCollection(ctx, service, dc, OneDriveSource, dest.ContainerName, deets, errUpdater) + var ( + parentPerms []UserPermission + err error + ) - restoreMetrics.Combine(temp) + if opts.RestorePermissions { + parentPerms, err = getParentPermissions(dc.FullPath(), parentPermissions) + if err != nil { + errUpdater(dc.FullPath().String(), err) + } + } + + metrics, folderPerms, permissionIDMappings, canceled = RestoreCollection( + ctx, + backupVersion, + service, + dc, + parentPerms, + OneDriveSource, + dest.ContainerName, + deets, + errUpdater, + permissionIDMappings, + opts.RestorePermissions, + ) + + for k, v := range folderPerms { + parentPermissions[k] = v + } + + restoreMetrics.Combine(metrics) if canceled { break @@ -66,29 +143,37 @@ func RestoreCollections( // RestoreCollection handles restoration of an individual collection. // returns: // - the collection's item and byte count metrics -// - the context cancellation state (true if the context is cancelled) +// - the context cancellation state (true if the context is canceled) func RestoreCollection( ctx context.Context, + backupVersion int, service graph.Servicer, dc data.Collection, + parentPerms []UserPermission, source driveSource, restoreContainerName string, deets *details.Builder, errUpdater func(string, error), -) (support.CollectionMetrics, bool) { + permissionIDMappings map[string]string, + restorePerms bool, +) (support.CollectionMetrics, map[string][]UserPermission, map[string]string, bool) { ctx, end := D.Span(ctx, "gc:oneDrive:restoreCollection", D.Label("path", dc.FullPath())) defer end() var ( - metrics = support.CollectionMetrics{} - copyBuffer = make([]byte, copyBufferSize) - directory = dc.FullPath() + metrics = support.CollectionMetrics{} + copyBuffer = make([]byte, copyBufferSize) + directory = dc.FullPath() + restoredIDs = map[string]string{} + itemInfo details.ItemInfo + itemID string + folderPerms = map[string][]UserPermission{} ) drivePath, err := path.ToOneDrivePath(directory) if err != nil { errUpdater(directory.String(), err) - return metrics, false + return metrics, folderPerms, permissionIDMappings, false } // Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy @@ -108,7 +193,7 @@ func RestoreCollection( restoreFolderID, err := CreateRestoreFolders(ctx, service, drivePath.DriveID, restoreFolderElements) if err != nil { errUpdater(directory.String(), errors.Wrapf(err, "failed to create folders %v", restoreFolderElements)) - return metrics, false + return metrics, folderPerms, permissionIDMappings, false } // Restore items from the collection @@ -118,50 +203,175 @@ func RestoreCollection( select { case <-ctx.Done(): errUpdater("context canceled", ctx.Err()) - return metrics, true + return metrics, folderPerms, permissionIDMappings, true case itemData, ok := <-items: if !ok { - return metrics, false - } - metrics.Objects++ - - metrics.TotalBytes += int64(len(copyBuffer)) - - itemInfo, err := restoreItem(ctx, - service, - itemData, - drivePath.DriveID, - restoreFolderID, - copyBuffer, - source) - if err != nil { - errUpdater(itemData.UUID(), err) - continue + return metrics, folderPerms, permissionIDMappings, false } itemPath, err := dc.FullPath().Append(itemData.UUID(), true) if err != nil { logger.Ctx(ctx).DPanicw("transforming item to full path", "error", err) + errUpdater(itemData.UUID(), err) continue } - deets.Add( - itemPath.String(), - itemPath.ShortRef(), - "", - true, - itemInfo) + if source == OneDriveSource && backupVersion >= versionWithDataAndMetaFiles { + name := itemData.UUID() + if strings.HasSuffix(name, DataFileSuffix) { + metrics.Objects++ + metrics.TotalBytes += int64(len(copyBuffer)) + trimmedName := strings.TrimSuffix(name, DataFileSuffix) - metrics.Successes++ + itemID, itemInfo, err = restoreData(ctx, service, trimmedName, itemData, + drivePath.DriveID, restoreFolderID, copyBuffer, source) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + restoredIDs[trimmedName] = itemID + + deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo) + + // Mark it as success without processing .meta + // file if we are not restoring permissions + if !restorePerms { + metrics.Successes++ + } + } else if strings.HasSuffix(name, MetaFileSuffix) { + if !restorePerms { + continue + } + + meta, err := getMetadata(itemData.ToReader()) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + trimmedName := strings.TrimSuffix(name, MetaFileSuffix) + restoreID, ok := restoredIDs[trimmedName] + if !ok { + errUpdater(itemData.UUID(), fmt.Errorf("item not available to restore permissions")) + continue + } + + permissionIDMappings, err = restorePermissions( + ctx, + service, + drivePath.DriveID, + restoreID, + parentPerms, + meta.Permissions, + permissionIDMappings, + ) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + // Objects count is incremented when we restore a + // data file and success count is incremented when + // we restore a meta file as every data file + // should have an associated meta file + metrics.Successes++ + } else if strings.HasSuffix(name, DirMetaFileSuffix) { + trimmedName := strings.TrimSuffix(name, DirMetaFileSuffix) + folderID, err := createRestoreFolder( + ctx, + service, + drivePath.DriveID, + trimmedName, + restoreFolderID, + ) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + if !restorePerms { + continue + } + + meta, err := getMetadata(itemData.ToReader()) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + permissionIDMappings, err = restorePermissions( + ctx, + service, + drivePath.DriveID, + folderID, + parentPerms, + meta.Permissions, + permissionIDMappings, + ) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + trimmedPath := strings.TrimSuffix(itemPath.String(), DirMetaFileSuffix) + folderPerms[trimmedPath] = meta.Permissions + } else { + if !ok { + errUpdater(itemData.UUID(), fmt.Errorf("invalid backup format, you might be using an old backup")) + continue + } + } + } else { + metrics.Objects++ + metrics.TotalBytes += int64(len(copyBuffer)) + + // No permissions stored at the moment for SharePoint + _, itemInfo, err = restoreData(ctx, + service, + itemData.UUID(), + itemData, + drivePath.DriveID, + restoreFolderID, + copyBuffer, + source) + if err != nil { + errUpdater(itemData.UUID(), err) + continue + } + + deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo) + metrics.Successes++ + } } } } -// createRestoreFolders creates the restore folder hieararchy in the specified drive and returns the folder ID -// of the last folder entry in the hiearchy +// Creates a folder with its permissions +func createRestoreFolder( + ctx context.Context, + service graph.Servicer, + driveID, folder, parentFolderID string, +) (string, error) { + folderItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(folder, true)) + if err != nil { + return "", errors.Wrapf( + err, + "failed to create folder %s/%s. details: %s", parentFolderID, folder, + support.ConnectorStackErrorTrace(err), + ) + } + + logger.Ctx(ctx).Debugf("Resolved %s in %s to %s", folder, parentFolderID, *folderItem.GetId()) + + return *folderItem.GetId(), nil +} + +// createRestoreFolders creates the restore folder hierarchy in the specified drive and returns the folder ID +// of the last folder entry in the hierarchy func CreateRestoreFolders(ctx context.Context, service graph.Servicer, driveID string, restoreFolders []string, ) (string, error) { driveRoot, err := service.Client().DrivesById(driveID).Root().Get(ctx, nil) @@ -209,15 +419,16 @@ func CreateRestoreFolders(ctx context.Context, service graph.Servicer, driveID s return parentFolderID, nil } -// restoreItem will create a new item in the specified `parentFolderID` and upload the data.Stream -func restoreItem( +// restoreData will create a new item in the specified `parentFolderID` and upload the data.Stream +func restoreData( ctx context.Context, service graph.Servicer, + name string, itemData data.Stream, driveID, parentFolderID string, copyBuffer []byte, source driveSource, -) (details.ItemInfo, error) { +) (string, details.ItemInfo, error) { ctx, end := D.Span(ctx, "gc:oneDrive:restoreItem", D.Label("item_uuid", itemData.UUID())) defer end() @@ -227,19 +438,19 @@ func restoreItem( // Get the stream size (needed to create the upload session) ss, ok := itemData.(data.StreamSize) if !ok { - return details.ItemInfo{}, errors.Errorf("item %q does not implement DataStreamInfo", itemName) + return "", details.ItemInfo{}, errors.Errorf("item %q does not implement DataStreamInfo", itemName) } // Create Item - newItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(itemData.UUID(), false)) + newItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(name, false)) if err != nil { - return details.ItemInfo{}, errors.Wrapf(err, "failed to create item %s", itemName) + return "", details.ItemInfo{}, errors.Wrapf(err, "failed to create item %s", itemName) } // Get a drive item writer w, err := driveItemWriter(ctx, service, driveID, *newItem.GetId(), ss.Size()) if err != nil { - return details.ItemInfo{}, errors.Wrapf(err, "failed to create item upload session %s", itemName) + return "", details.ItemInfo{}, errors.Wrapf(err, "failed to create item upload session %s", itemName) } iReader := itemData.ToReader() @@ -250,7 +461,7 @@ func restoreItem( // Upload the stream data written, err := io.CopyBuffer(w, progReader, copyBuffer) if err != nil { - return details.ItemInfo{}, errors.Wrapf(err, "failed to upload data: item %s", itemName) + return "", details.ItemInfo{}, errors.Wrapf(err, "failed to upload data: item %s", itemName) } dii := details.ItemInfo{} @@ -262,5 +473,129 @@ func restoreItem( dii.OneDrive = oneDriveItemInfo(newItem, written) } - return dii, nil + return *newItem.GetId(), dii, nil +} + +// getMetadata read and parses the metadata info for an item +func getMetadata(metar io.ReadCloser) (Metadata, error) { + var meta Metadata + // `metar` will be nil for the top level container folder + if metar != nil { + metaraw, err := io.ReadAll(metar) + if err != nil { + return Metadata{}, err + } + + err = json.Unmarshal(metaraw, &meta) + if err != nil { + return Metadata{}, err + } + } + + return meta, nil +} + +// getChildPermissions is to filter out permissions present in the +// parent from the ones that are available for child. This is +// necessary as we store the nested permissions in the child. We +// cannot avoid storing the nested permissions as it is possible that +// a file in a folder can remove the nested permission that is present +// on itself. +func getChildPermissions(childPermissions, parentPermissions []UserPermission) ([]UserPermission, []UserPermission) { + addedPermissions := []UserPermission{} + removedPermissions := []UserPermission{} + + for _, cp := range childPermissions { + found := false + + for _, pp := range parentPermissions { + if cp.ID == pp.ID { + found = true + break + } + } + + if !found { + addedPermissions = append(addedPermissions, cp) + } + } + + for _, pp := range parentPermissions { + found := false + + for _, cp := range childPermissions { + if pp.ID == cp.ID { + found = true + break + } + } + + if !found { + removedPermissions = append(removedPermissions, pp) + } + } + + return addedPermissions, removedPermissions +} + +// restorePermissions takes in the permissions that were added and the +// removed(ones present in parent but not in child) and adds/removes +// the necessary permissions on onedrive objects. +func restorePermissions( + ctx context.Context, + service graph.Servicer, + driveID string, + itemID string, + parentPerms []UserPermission, + childPerms []UserPermission, + permissionIDMappings map[string]string, +) (map[string]string, error) { + permAdded, permRemoved := getChildPermissions(childPerms, parentPerms) + + for _, p := range permRemoved { + err := service.Client().DrivesById(driveID).ItemsById(itemID). + PermissionsById(permissionIDMappings[p.ID]).Delete(ctx, nil) + if err != nil { + return permissionIDMappings, errors.Wrapf( + err, + "failed to remove permission for item %s. details: %s", + itemID, + support.ConnectorStackErrorTrace(err), + ) + } + } + + for _, p := range permAdded { + pbody := msdrive.NewItemsItemInvitePostRequestBody() + pbody.SetRoles(p.Roles) + + if p.Expiration != nil { + expiry := p.Expiration.String() + pbody.SetExpirationDateTime(&expiry) + } + + si := false + pbody.SetSendInvitation(&si) + + rs := true + pbody.SetRequireSignIn(&rs) + + rec := models.NewDriveRecipient() + rec.SetEmail(&p.Email) + pbody.SetRecipients([]models.DriveRecipientable{rec}) + + np, err := service.Client().DrivesById(driveID).ItemsById(itemID).Invite().Post(ctx, pbody, nil) + if err != nil { + return permissionIDMappings, errors.Wrapf( + err, + "failed to set permission for item %s. details: %s", + itemID, + support.ConnectorStackErrorTrace(err), + ) + } + + permissionIDMappings[p.ID] = *np.GetValue()[0].GetId() + } + + return permissionIDMappings, nil } diff --git a/src/internal/connector/sharepoint/api/api.go b/src/internal/connector/sharepoint/api/api.go new file mode 100644 index 000000000..c05eaad6b --- /dev/null +++ b/src/internal/connector/sharepoint/api/api.go @@ -0,0 +1,6 @@ +package api + +type Tuple struct { + Name string + ID string +} diff --git a/src/internal/connector/sharepoint/api/helper_test.go b/src/internal/connector/sharepoint/api/helper_test.go new file mode 100644 index 000000000..631dd7b3b --- /dev/null +++ b/src/internal/connector/sharepoint/api/helper_test.go @@ -0,0 +1,21 @@ +package api + +import ( + "testing" + + "github.com/alcionai/corso/src/internal/connector/discovery/api" + "github.com/alcionai/corso/src/internal/connector/graph" + "github.com/alcionai/corso/src/pkg/account" + "github.com/stretchr/testify/require" +) + +func createTestBetaService(t *testing.T, credentials account.M365Config) *api.BetaService { + adapter, err := graph.CreateAdapter( + credentials.AzureTenantID, + credentials.AzureClientID, + credentials.AzureClientSecret, + ) + require.NoError(t, err) + + return api.NewBetaService(adapter) +} diff --git a/src/internal/connector/sharepoint/api/pages.go b/src/internal/connector/sharepoint/api/pages.go new file mode 100644 index 000000000..a2232140c --- /dev/null +++ b/src/internal/connector/sharepoint/api/pages.go @@ -0,0 +1,93 @@ +package api + +import ( + "context" + + "github.com/alcionai/corso/src/internal/connector/discovery/api" + "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" + "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites" + "github.com/alcionai/corso/src/internal/connector/support" +) + +// GetSitePages retrieves a collection of Pages related to the give Site. +// Returns error if error experienced during the call +func GetSitePage( + ctx context.Context, + serv *api.BetaService, + siteID string, + pages []string, +) ([]models.SitePageable, error) { + col := make([]models.SitePageable, 0) + opts := retrieveSitePageOptions() + + for _, entry := range pages { + page, err := serv.Client().SitesById(siteID).PagesById(entry).Get(ctx, opts) + if err != nil { + return nil, support.ConnectorStackErrorTraceWrap(err, "fetching page: "+entry) + } + + col = append(col, page) + } + + return col, nil +} + +// fetchPages utility function to return the tuple of item +func FetchPages(ctx context.Context, bs *api.BetaService, siteID string) ([]Tuple, error) { + var ( + builder = bs.Client().SitesById(siteID).Pages() + opts = fetchPageOptions() + pageTuples = make([]Tuple, 0) + ) + + for { + resp, err := builder.Get(ctx, opts) + if err != nil { + return nil, support.ConnectorStackErrorTraceWrap(err, "failed fetching site page") + } + + for _, entry := range resp.GetValue() { + pid := *entry.GetId() + temp := Tuple{pid, pid} + + if entry.GetName() != nil { + temp.Name = *entry.GetName() + } + + pageTuples = append(pageTuples, temp) + } + + if resp.GetOdataNextLink() == nil { + break + } + + builder = sites.NewItemPagesRequestBuilder(*resp.GetOdataNextLink(), bs.Client().Adapter()) + } + + return pageTuples, nil +} + +// fetchPageOptions is used to return minimal information reltating to Site Pages +// Pages API: https://learn.microsoft.com/en-us/graph/api/resources/sitepage?view=graph-rest-beta +func fetchPageOptions() *sites.ItemPagesRequestBuilderGetRequestConfiguration { + fields := []string{"id", "name"} + options := &sites.ItemPagesRequestBuilderGetRequestConfiguration{ + QueryParameters: &sites.ItemPagesRequestBuilderGetQueryParameters{ + Select: fields, + }, + } + + return options +} + +// retrievePageOptions returns options to expand +func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration { + fields := []string{"canvasLayout"} + options := &sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration{ + QueryParameters: &sites.ItemPagesSitePageItemRequestBuilderGetQueryParameters{ + Expand: fields, + }, + } + + return options +} diff --git a/src/internal/connector/sharepoint/api/pages_test.go b/src/internal/connector/sharepoint/api/pages_test.go new file mode 100644 index 000000000..ecc2cf18d --- /dev/null +++ b/src/internal/connector/sharepoint/api/pages_test.go @@ -0,0 +1,71 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/account" +) + +type SharePointPageSuite struct { + suite.Suite + siteID string + creds account.M365Config +} + +func (suite *SharePointPageSuite) SetupSuite() { + t := suite.T() + tester.MustGetEnvSets(t, tester.M365AcctCredEnvs) + + suite.siteID = tester.M365SiteID(t) + a := tester.NewM365Account(t) + m365, err := a.M365Config() + require.NoError(t, err) + + suite.creds = m365 +} + +func TestSharePointPageSuite(t *testing.T) { + tester.RunOnAny( + t, + tester.CorsoCITests, + tester.CorsoGraphConnectorSharePointTests) + suite.Run(t, new(SharePointPageSuite)) +} + +func (suite *SharePointPageSuite) TestFetchPages() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + service := createTestBetaService(t, suite.creds) + + pgs, err := FetchPages(ctx, service, suite.siteID) + assert.NoError(t, err) + require.NotNil(t, pgs) + assert.NotZero(t, len(pgs)) + + for _, entry := range pgs { + t.Logf("id: %s\t name: %s\n", entry.ID, entry.Name) + } +} + +func (suite *SharePointPageSuite) TestGetSitePage() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + service := createTestBetaService(t, suite.creds) + tuples, err := FetchPages(ctx, service, suite.siteID) + require.NoError(t, err) + require.NotNil(t, tuples) + + jobs := []string{tuples[0].ID} + pages, err := GetSitePage(ctx, service, suite.siteID, jobs) + assert.NoError(t, err) + assert.NotEmpty(t, pages) +} diff --git a/src/internal/connector/sharepoint/collection.go b/src/internal/connector/sharepoint/collection.go index c34d2a2d1..c540af4e6 100644 --- a/src/internal/connector/sharepoint/collection.go +++ b/src/internal/connector/sharepoint/collection.go @@ -9,6 +9,7 @@ import ( kw "github.com/microsoft/kiota-serialization-json-go" "github.com/microsoftgraph/msgraph-sdk-go/models" + "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" @@ -46,6 +47,7 @@ type Collection struct { jobs []string // M365 IDs of the items of this collection service graph.Servicer + betaService *api.BetaService statusUpdater support.StatusUpdater } diff --git a/src/internal/connector/sharepoint/collection_test.go b/src/internal/connector/sharepoint/collection_test.go index f049ab26f..c2b1ac830 100644 --- a/src/internal/connector/sharepoint/collection_test.go +++ b/src/internal/connector/sharepoint/collection_test.go @@ -17,11 +17,27 @@ import ( "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/tester" + "github.com/alcionai/corso/src/pkg/account" + "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/path" ) type SharePointCollectionSuite struct { suite.Suite + siteID string + creds account.M365Config +} + +func (suite *SharePointCollectionSuite) SetupSuite() { + t := suite.T() + tester.MustGetEnvSets(t, tester.M365AcctCredEnvs) + + suite.siteID = tester.M365SiteID(t) + a := tester.NewM365Account(t) + m365, err := a.M365Config() + require.NoError(t, err) + + suite.creds = m365 } func TestSharePointCollectionSuite(t *testing.T) { @@ -95,20 +111,33 @@ func (suite *SharePointCollectionSuite) TestSharePointListCollection() { assert.Equal(t, testName, shareInfo.Info().SharePoint.ItemName) } +func (suite *SharePointCollectionSuite) TestCollectPages() { + ctx, flush := tester.NewContext() + defer flush() + + t := suite.T() + col, err := collectPages( + ctx, + suite.creds, + nil, + account.AzureTenantID, + suite.siteID, + nil, + &MockGraphService{}, + control.Defaults(), + ) + assert.NoError(t, err) + assert.NotEmpty(t, col) +} + // TestRestoreListCollection verifies Graph Restore API for the List Collection func (suite *SharePointCollectionSuite) TestRestoreListCollection() { ctx, flush := tester.NewContext() defer flush() t := suite.T() - siteID := tester.M365SiteID(t) - a := tester.NewM365Account(t) - account, err := a.M365Config() - require.NoError(t, err) - - service, err := createTestService(account) - require.NoError(t, err) + service := createTestService(t, suite.creds) listing := mockconnector.GetMockListDefault("Mock List") testName := "MockListing" listing.SetDisplayName(&testName) @@ -123,13 +152,13 @@ func (suite *SharePointCollectionSuite) TestRestoreListCollection() { destName := "Corso_Restore_" + common.FormatNow(common.SimpleTimeTesting) - deets, err := restoreListItem(ctx, service, listData, siteID, destName) + deets, err := restoreListItem(ctx, service, listData, suite.siteID, destName) assert.NoError(t, err) t.Logf("List created: %s\n", deets.SharePoint.ItemName) // Clean-Up var ( - builder = service.Client().SitesById(siteID).Lists() + builder = service.Client().SitesById(suite.siteID).Lists() isFound bool deleteID string ) @@ -156,7 +185,7 @@ func (suite *SharePointCollectionSuite) TestRestoreListCollection() { } if isFound { - err := DeleteList(ctx, service, siteID, deleteID) + err := DeleteList(ctx, service, suite.siteID, deleteID) assert.NoError(t, err) } } @@ -168,25 +197,18 @@ func (suite *SharePointCollectionSuite) TestRestoreLocation() { defer flush() t := suite.T() - a := tester.NewM365Account(t) - account, err := a.M365Config() - require.NoError(t, err) - - service, err := createTestService(account) - require.NoError(t, err) + service := createTestService(t, suite.creds) rootFolder := "General_" + common.FormatNow(common.SimpleTimeTesting) - siteID := tester.M365SiteID(t) - - folderID, err := createRestoreFolders(ctx, service, siteID, []string{rootFolder}) + folderID, err := createRestoreFolders(ctx, service, suite.siteID, []string{rootFolder}) assert.NoError(t, err) t.Log("FolderID: " + folderID) - _, err = createRestoreFolders(ctx, service, siteID, []string{rootFolder, "Tsao"}) + _, err = createRestoreFolders(ctx, service, suite.siteID, []string{rootFolder, "Tsao"}) assert.NoError(t, err) // CleanUp - siteDrive, err := service.Client().SitesById(siteID).Drive().Get(ctx, nil) + siteDrive, err := service.Client().SitesById(suite.siteID).Drive().Get(ctx, nil) require.NoError(t, err) driveID := *siteDrive.GetId() diff --git a/src/internal/connector/sharepoint/data_collections.go b/src/internal/connector/sharepoint/data_collections.go index 6011c32a0..88e16882c 100644 --- a/src/internal/connector/sharepoint/data_collections.go +++ b/src/internal/connector/sharepoint/data_collections.go @@ -6,11 +6,14 @@ import ( "github.com/pkg/errors" + "github.com/alcionai/corso/src/internal/connector/discovery/api" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" + sapi "github.com/alcionai/corso/src/internal/connector/sharepoint/api" "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/observe" + "github.com/alcionai/corso/src/pkg/account" "github.com/alcionai/corso/src/pkg/control" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" @@ -152,7 +155,9 @@ func collectLibraries( updater.UpdateStatus, ctrlOpts) - odcs, excludes, err := colls.Get(ctx) + // TODO(ashmrtn): Pass previous backup metadata when SharePoint supports delta + // token-based incrementals. + odcs, excludes, err := colls.Get(ctx, nil) if err != nil { return nil, nil, support.WrapAndAppend(siteID, err, errs) } @@ -160,6 +165,55 @@ func collectLibraries( return append(collections, odcs...), excludes, errs } +// collectPages constructs a sharepoint Collections struct and Get()s the associated +// M365 IDs for the associated Pages +func collectPages( + ctx context.Context, + creds account.M365Config, + serv graph.Servicer, + tenantID, siteID string, + scope selectors.SharePointScope, + updater statusUpdater, + ctrlOpts control.Options, +) ([]data.Collection, error) { + logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Pages collections") + + spcs := make([]data.Collection, 0) + + // make the betaClient + adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret) + if err != nil { + return nil, errors.Wrap(err, "adapter for betaservice not created") + } + + betaService := api.NewBetaService(adpt) + + tuples, err := sapi.FetchPages(ctx, betaService, siteID) + if err != nil { + return nil, err + } + + for _, tuple := range tuples { + dir, err := path.Builder{}.Append(tuple.Name). + ToDataLayerSharePointPath( + tenantID, + siteID, + path.PagesCategory, + false) + if err != nil { + return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID) + } + + collection := NewCollection(dir, serv, updater.UpdateStatus) + collection.betaService = betaService + collection.AddJob(tuple.ID) + + spcs = append(spcs, collection) + } + + return spcs, nil +} + type folderMatcher struct { scope selectors.SharePointScope } diff --git a/src/internal/connector/sharepoint/data_collections_test.go b/src/internal/connector/sharepoint/data_collections_test.go index 87aaa5c84..11d05156c 100644 --- a/src/internal/connector/sharepoint/data_collections_test.go +++ b/src/internal/connector/sharepoint/data_collections_test.go @@ -77,7 +77,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() { site, testBaseDrivePath, ), - expectedItemCount: 2, + expectedItemCount: 1, expectedFileCount: 1, expectedContainerCount: 1, }, diff --git a/src/internal/connector/sharepoint/helper_test.go b/src/internal/connector/sharepoint/helper_test.go index e716a5bae..30d589389 100644 --- a/src/internal/connector/sharepoint/helper_test.go +++ b/src/internal/connector/sharepoint/helper_test.go @@ -4,11 +4,11 @@ import ( "testing" msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/alcionai/corso/src/internal/connector/graph" "github.com/alcionai/corso/src/internal/connector/onedrive" + "github.com/alcionai/corso/src/internal/connector/support" "github.com/alcionai/corso/src/pkg/account" ) @@ -29,21 +29,22 @@ func (ms *MockGraphService) Adapter() *msgraphsdk.GraphRequestAdapter { return nil } +func (ms *MockGraphService) UpdateStatus(*support.ConnectorOperationStatus) { +} + // --------------------------------------------------------------------------- // Helper Functions // --------------------------------------------------------------------------- -func createTestService(credentials account.M365Config) (*graph.Service, error) { +func createTestService(t *testing.T, credentials account.M365Config) *graph.Service { adapter, err := graph.CreateAdapter( credentials.AzureTenantID, credentials.AzureClientID, credentials.AzureClientSecret, ) - if err != nil { - return nil, errors.Wrap(err, "creating microsoft graph service for exchange") - } + require.NoError(t, err, "creating microsoft graph service for exchange") - return graph.NewService(adapter), nil + return graph.NewService(adapter) } func expectedPathAsSlice(t *testing.T, tenant, user string, rest ...string) []string { diff --git a/src/internal/connector/sharepoint/list_test.go b/src/internal/connector/sharepoint/list_test.go index c798be368..2571c9183 100644 --- a/src/internal/connector/sharepoint/list_test.go +++ b/src/internal/connector/sharepoint/list_test.go @@ -49,9 +49,7 @@ func (suite *SharePointSuite) TestLoadList() { defer flush() t := suite.T() - service, err := createTestService(suite.creds) - require.NoError(t, err) - + service := createTestService(t, suite.creds) tuples, err := preFetchLists(ctx, service, "root") require.NoError(t, err) diff --git a/src/internal/connector/sharepoint/restore.go b/src/internal/connector/sharepoint/restore.go index ef2b940bb..3cf35d287 100644 --- a/src/internal/connector/sharepoint/restore.go +++ b/src/internal/connector/sharepoint/restore.go @@ -36,6 +36,7 @@ import ( // RestoreCollections will restore the specified data collections into OneDrive func RestoreCollections( ctx context.Context, + backupVersion int, service graph.Servicer, dest control.RestoreDestination, dcs []data.Collection, @@ -59,14 +60,19 @@ func RestoreCollections( switch dc.FullPath().Category() { case path.LibrariesCategory: - metrics, canceled = onedrive.RestoreCollection( + metrics, _, _, canceled = onedrive.RestoreCollection( ctx, + backupVersion, service, dc, + []onedrive.UserPermission{}, // Currently permission data is not stored for sharepoint onedrive.OneDriveSource, dest.ContainerName, deets, - errUpdater) + errUpdater, + map[string]string{}, + false, + ) case path.ListsCategory: metrics, canceled = RestoreCollection( ctx, diff --git a/src/internal/connector/support/m365Support.go b/src/internal/connector/support/m365Support.go index d7e51e513..0780a2b0e 100644 --- a/src/internal/connector/support/m365Support.go +++ b/src/internal/connector/support/m365Support.go @@ -1,6 +1,9 @@ package support import ( + "strings" + + bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" absser "github.com/microsoft/kiota-abstractions-go/serialization" js "github.com/microsoft/kiota-serialization-json-go" "github.com/microsoftgraph/msgraph-sdk-go/models" @@ -12,7 +15,7 @@ import ( func CreateFromBytes(bytes []byte, createFunc absser.ParsableFactory) (absser.Parsable, error) { parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", bytes) if err != nil { - return nil, errors.Wrap(err, "parsing byte array into m365 object") + return nil, errors.Wrap(err, "deserializing bytes into base m365 object") } anObject, err := parseNode.GetObjectValue(createFunc) @@ -27,7 +30,7 @@ func CreateFromBytes(bytes []byte, createFunc absser.ParsableFactory) (absser.Pa func CreateMessageFromBytes(bytes []byte) (models.Messageable, error) { aMessage, err := CreateFromBytes(bytes, models.CreateMessageFromDiscriminatorValue) if err != nil { - return nil, errors.Wrap(err, "creating m365 exchange.Mail object from provided bytes") + return nil, errors.Wrap(err, "deserializing bytes to exchange message") } message := aMessage.(models.Messageable) @@ -40,7 +43,7 @@ func CreateMessageFromBytes(bytes []byte) (models.Messageable, error) { func CreateContactFromBytes(bytes []byte) (models.Contactable, error) { parsable, err := CreateFromBytes(bytes, models.CreateContactFromDiscriminatorValue) if err != nil { - return nil, errors.Wrap(err, "creating m365 exchange.Contact object from provided bytes") + return nil, errors.Wrap(err, "deserializing bytes to exchange contact") } contact := parsable.(models.Contactable) @@ -52,7 +55,7 @@ func CreateContactFromBytes(bytes []byte) (models.Contactable, error) { func CreateEventFromBytes(bytes []byte) (models.Eventable, error) { parsable, err := CreateFromBytes(bytes, models.CreateEventFromDiscriminatorValue) if err != nil { - return nil, errors.Wrap(err, "creating m365 exchange.Event object from provided bytes") + return nil, errors.Wrap(err, "deserializing bytes to exchange event") } event := parsable.(models.Eventable) @@ -64,10 +67,33 @@ func CreateEventFromBytes(bytes []byte) (models.Eventable, error) { func CreateListFromBytes(bytes []byte) (models.Listable, error) { parsable, err := CreateFromBytes(bytes, models.CreateListFromDiscriminatorValue) if err != nil { - return nil, errors.Wrap(err, "creating m365 sharepoint.List object from provided bytes") + return nil, errors.Wrap(err, "deserializing bytes to sharepoint list") } list := parsable.(models.Listable) return list, nil } + +// CreatePageFromBytes transforms given bytes in models.SitePageable object +func CreatePageFromBytes(bytes []byte) (bmodels.SitePageable, error) { + parsable, err := CreateFromBytes(bytes, bmodels.CreateSitePageFromDiscriminatorValue) + if err != nil { + return nil, errors.Wrap(err, "deserializing bytes to sharepoint page") + } + + page := parsable.(bmodels.SitePageable) + + return page, nil +} + +func HasAttachments(body models.ItemBodyable) bool { + if body.GetContent() == nil || body.GetContentType() == nil || + *body.GetContentType() == models.TEXT_BODYTYPE || len(*body.GetContent()) == 0 { + return false + } + + content := *body.GetContent() + + return strings.Contains(content, "src=\"cid:") +} diff --git a/src/internal/connector/support/m365Support_test.go b/src/internal/connector/support/m365Support_test.go index c04c74604..946996431 100644 --- a/src/internal/connector/support/m365Support_test.go +++ b/src/internal/connector/support/m365Support_test.go @@ -3,10 +3,13 @@ package support import ( "testing" + kioser "github.com/microsoft/kiota-serialization-json-go" + "github.com/microsoftgraph/msgraph-sdk-go/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models" "github.com/alcionai/corso/src/internal/connector/mockconnector" ) @@ -18,6 +21,11 @@ func TestDataSupportSuite(t *testing.T) { suite.Run(t, new(DataSupportSuite)) } +var ( + empty = "Empty Bytes" + invalid = "Invalid Bytes" +) + // TestCreateMessageFromBytes verifies approved mockdata bytes can // be successfully transformed into M365 Message data. func (suite *DataSupportSuite) TestCreateMessageFromBytes() { @@ -59,13 +67,13 @@ func (suite *DataSupportSuite) TestCreateContactFromBytes() { isNil assert.ValueAssertionFunc }{ { - name: "Empty Bytes", + name: empty, byteArray: make([]byte, 0), checkError: assert.Error, isNil: assert.Nil, }, { - name: "Invalid Bytes", + name: invalid, byteArray: []byte("A random sentence doesn't make an object"), checkError: assert.Error, isNil: assert.Nil, @@ -94,13 +102,13 @@ func (suite *DataSupportSuite) TestCreateEventFromBytes() { isNil assert.ValueAssertionFunc }{ { - name: "Empty Byes", + name: empty, byteArray: make([]byte, 0), checkError: assert.Error, isNil: assert.Nil, }, { - name: "Invalid Bytes", + name: invalid, byteArray: []byte("Invalid byte stream \"subject:\" Not going to work"), checkError: assert.Error, isNil: assert.Nil, @@ -132,13 +140,13 @@ func (suite *DataSupportSuite) TestCreateListFromBytes() { isNil assert.ValueAssertionFunc }{ { - name: "Empty Byes", + name: empty, byteArray: make([]byte, 0), checkError: assert.Error, isNil: assert.Nil, }, { - name: "Invalid Bytes", + name: invalid, byteArray: []byte("Invalid byte stream \"subject:\" Not going to work"), checkError: assert.Error, isNil: assert.Nil, @@ -159,3 +167,111 @@ func (suite *DataSupportSuite) TestCreateListFromBytes() { }) } } + +func (suite *DataSupportSuite) TestCreatePageFromBytes() { + tests := []struct { + name string + checkError assert.ErrorAssertionFunc + isNil assert.ValueAssertionFunc + getBytes func(t *testing.T) []byte + }{ + { + empty, + assert.Error, + assert.Nil, + func(t *testing.T) []byte { + return make([]byte, 0) + }, + }, + { + invalid, + assert.Error, + assert.Nil, + func(t *testing.T) []byte { + return []byte("snarf") + }, + }, + { + "Valid Page", + assert.NoError, + assert.NotNil, + func(t *testing.T) []byte { + pg := bmodels.NewSitePage() + title := "Tested" + pg.SetTitle(&title) + pg.SetName(&title) + pg.SetWebUrl(&title) + + writer := kioser.NewJsonSerializationWriter() + err := pg.Serialize(writer) + require.NoError(t, err) + + byteArray, err := writer.GetSerializedContent() + require.NoError(t, err) + + return byteArray + }, + }, + } + + for _, test := range tests { + suite.T().Run(test.name, func(t *testing.T) { + result, err := CreatePageFromBytes(test.getBytes(t)) + test.checkError(t, err) + test.isNil(t, result) + }) + } +} + +func (suite *DataSupportSuite) TestHasAttachments() { + tests := []struct { + name string + hasAttachment assert.BoolAssertionFunc + getBodyable func(t *testing.T) models.ItemBodyable + }{ + { + name: "Mock w/out attachment", + hasAttachment: assert.False, + getBodyable: func(t *testing.T) models.ItemBodyable { + byteArray := mockconnector.GetMockMessageWithBodyBytes( + "Test", + "This is testing", + "This is testing", + ) + message, err := CreateMessageFromBytes(byteArray) + require.NoError(t, err) + return message.GetBody() + }, + }, + { + name: "Mock w/ inline attachment", + hasAttachment: assert.True, + getBodyable: func(t *testing.T) models.ItemBodyable { + byteArray := mockconnector.GetMessageWithOneDriveAttachment("Test legacy") + message, err := CreateMessageFromBytes(byteArray) + require.NoError(t, err) + return message.GetBody() + }, + }, + { + name: "Edge Case", + hasAttachment: assert.True, + getBodyable: func(t *testing.T) models.ItemBodyable { + //nolint:lll + content := "\r\n
Happy New Year,

In accordance with TPS report guidelines, there have been questions about how to address our activities SharePoint Cover page. Do you believe this is the best picture? 



Let me know if this meets our culture requirements.

Warm Regards,

Dustin
" + body := models.NewItemBody() + body.SetContent(&content) + cat := models.HTML_BODYTYPE + body.SetContentType(&cat) + return body + }, + }, + } + + for _, test := range tests { + suite.T().Run(test.name, func(t *testing.T) { + found := HasAttachments(test.getBodyable(t)) + test.hasAttachment(t, found) + }) + } +} diff --git a/src/internal/connector/support/m365Transform.go b/src/internal/connector/support/m365Transform.go index 651689430..7fa207c9e 100644 --- a/src/internal/connector/support/m365Transform.go +++ b/src/internal/connector/support/m365Transform.go @@ -1,11 +1,14 @@ package support import ( + "fmt" "strings" "github.com/microsoftgraph/msgraph-sdk-go/models" ) +const itemAttachment = "#microsoft.graph.itemAttachment" + // CloneMessageableFields places data from original data into new message object. // SingleLegacyValueProperty is not populated during this operation func CloneMessageableFields(orig, message models.Messageable) models.Messageable { @@ -278,3 +281,90 @@ func cloneColumnDefinitionable(orig models.ColumnDefinitionable) models.ColumnDe return newColumn } + +// ToItemAttachment transforms internal item, OutlookItemables, into +// objects that are able to be uploaded into M365. +// Supported Internal Items: +// - Events +func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) { + transform, ok := orig.(models.ItemAttachmentable) + supported := "#microsoft.graph.event" + + if !ok { // Shouldn't ever happen + return nil, fmt.Errorf("transforming attachment to item attachment") + } + + item := transform.GetItem() + itemType := item.GetOdataType() + + switch *itemType { + case supported: + event := item.(models.Eventable) + + newEvent, err := sanitizeEvent(event) + if err != nil { + return nil, err + } + + transform.SetItem(newEvent) + + return transform, nil + default: + return nil, fmt.Errorf("exiting ToItemAttachment: %s not supported", *itemType) + } +} + +// sanitizeEvent transfers data into event object and +// removes unique IDs from the M365 object +func sanitizeEvent(orig models.Eventable) (models.Eventable, error) { + newEvent := models.NewEvent() + newEvent.SetAttendees(orig.GetAttendees()) + newEvent.SetBody(orig.GetBody()) + newEvent.SetBodyPreview(orig.GetBodyPreview()) + newEvent.SetCalendar(orig.GetCalendar()) + newEvent.SetCreatedDateTime(orig.GetCreatedDateTime()) + newEvent.SetEnd(orig.GetEnd()) + newEvent.SetHasAttachments(orig.GetHasAttachments()) + newEvent.SetHideAttendees(orig.GetHideAttendees()) + newEvent.SetImportance(orig.GetImportance()) + newEvent.SetIsAllDay(orig.GetIsAllDay()) + newEvent.SetIsOnlineMeeting(orig.GetIsOnlineMeeting()) + newEvent.SetLocation(orig.GetLocation()) + newEvent.SetLocations(orig.GetLocations()) + newEvent.SetSensitivity(orig.GetSensitivity()) + newEvent.SetReminderMinutesBeforeStart(orig.GetReminderMinutesBeforeStart()) + newEvent.SetStart(orig.GetStart()) + newEvent.SetSubject(orig.GetSubject()) + newEvent.SetType(orig.GetType()) + + // Sanitation + // isDraft and isOrganizer *bool ptr's have to be removed completely + // from JSON in order for POST method to succeed. + // Current as of 2/2/2023 + + newEvent.SetIsOrganizer(nil) + newEvent.SetIsDraft(nil) + newEvent.SetAdditionalData(orig.GetAdditionalData()) + + attached := orig.GetAttachments() + attachments := make([]models.Attachmentable, len(attached)) + + for _, ax := range attached { + if *ax.GetOdataType() == itemAttachment { + newAttachment, err := ToItemAttachment(ax) + if err != nil { + return nil, err + } + + attachments = append(attachments, newAttachment) + + continue + } + + attachments = append(attachments, ax) + } + + newEvent.SetAttachments(attachments) + + return newEvent, nil +} diff --git a/src/internal/connector/support/status.go b/src/internal/connector/support/status.go index 3f2435263..dcf5f32c5 100644 --- a/src/internal/connector/support/status.go +++ b/src/internal/connector/support/status.go @@ -4,8 +4,8 @@ import ( "context" "fmt" + "github.com/dustin/go-humanize" multierror "github.com/hashicorp/go-multierror" - bytesize "github.com/inhies/go-bytesize" "github.com/alcionai/corso/src/pkg/logger" ) @@ -66,6 +66,7 @@ func CreateStatus( hasErrors := err != nil numErr := GetNumberOfErrors(err) + status := ConnectorOperationStatus{ lastOperation: op, ObjectCount: cm.Objects, @@ -142,7 +143,7 @@ func (cos *ConnectorOperationStatus) String() string { cos.lastOperation.String(), cos.Successful, cos.ObjectCount, - bytesize.New(float64(cos.bytes)), + humanize.Bytes(uint64(cos.bytes)), cos.FolderCount, ) diff --git a/src/internal/observe/observe.go b/src/internal/observe/observe.go index d8492109d..34da29331 100644 --- a/src/internal/observe/observe.go +++ b/src/internal/observe/observe.go @@ -177,7 +177,7 @@ func MessageWithCompletion( completionCh := make(chan struct{}, 1) if cfg.hidden() { - return completionCh, func() {} + return completionCh, func() { log.Info("done - " + clean) } } wg.Add(1) @@ -232,7 +232,7 @@ func ItemProgress( log.Debug(header) if cfg.hidden() || rc == nil || totalBytes == 0 { - return rc, func() {} + return rc, func() { log.Debug("done - " + header) } } wg.Add(1) @@ -286,7 +286,7 @@ func ProgressWithCount( } }(progressCh) - return progressCh, func() {} + return progressCh, func() { log.Info("done - " + lmsg) } } wg.Add(1) @@ -381,16 +381,24 @@ func CollectionProgress( if cfg.hidden() || len(user.String()) == 0 || len(dirName.String()) == 0 { ch := make(chan struct{}) + counted := 0 + go func(ci <-chan struct{}) { for { _, ok := <-ci if !ok { return } + counted++ + + // Log every 1000 items that are processed + if counted%1000 == 0 { + log.Infow("uploading", "count", counted) + } } }(ch) - return ch, func() {} + return ch, func() { log.Infow("done - "+message, "count", counted) } } wg.Add(1) @@ -432,6 +440,11 @@ func CollectionProgress( counted++ + // Log every 1000 items that are processed + if counted%1000 == 0 { + log.Infow("uploading", "count", counted) + } + bar.Increment() } } diff --git a/src/internal/operations/backup.go b/src/internal/operations/backup.go index b2a0c552c..ec47bae1c 100644 --- a/src/internal/operations/backup.go +++ b/src/internal/operations/backup.go @@ -2,6 +2,8 @@ package operations import ( "context" + "fmt" + "runtime/debug" "time" "github.com/alcionai/clues" @@ -40,6 +42,9 @@ type BackupOperation struct { Version string `json:"version"` account account.Account + + // when true, this allows for incremental backups instead of full data pulls + incremental bool } // BackupResults aggregate the details of the result of the operation. @@ -66,6 +71,7 @@ func NewBackupOperation( Selectors: selector, Version: "v0", account: acct, + incremental: useIncrementalBackup(selector, opts), } if err := op.validate(); err != nil { return BackupOperation{}, err @@ -103,29 +109,52 @@ type detailsWriter interface { // Run begins a synchronous backup operation. func (op *BackupOperation) Run(ctx context.Context) (err error) { + defer func() { + if r := recover(); r != nil { + var rerr error + if re, ok := r.(error); ok { + rerr = re + } else if re, ok := r.(string); ok { + rerr = clues.New(re) + } else { + rerr = clues.New(fmt.Sprintf("%v", r)) + } + + err = clues.Wrap(rerr, "panic recovery"). + WithClues(ctx). + With("stacktrace", string(debug.Stack())) + logger.Ctx(ctx). + With("err", err). + Errorw("backup panic", clues.InErr(err).Slice()...) + } + }() + ctx, end := D.Span(ctx, "operations:backup:run") - defer end() + defer func() { + end() + // wait for the progress display to clean up + observe.Complete() + }() + + // ----- + // Setup + // ----- var ( - opStats backupStats - backupDetails *details.Builder - toMerge map[string]path.Path - tenantID = op.account.ID() - startTime = time.Now() - detailsStore = streamstore.New(op.kopia, tenantID, op.Selectors.PathService()) - reasons = selectorToReasons(op.Selectors) - uib = useIncrementalBackup(op.Selectors, op.Options) + opStats backupStats + startTime = time.Now() + detailsStore = streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService()) ) op.Results.BackupID = model.StableID(uuid.NewString()) ctx = clues.AddAll( ctx, - "tenant_id", tenantID, // TODO: pii + "tenant_id", op.account.ID(), // TODO: pii "resource_owner", op.ResourceOwner, // TODO: pii "backup_id", op.Results.BackupID, "service", op.Selectors.Service, - "incremental", uib) + "incremental", op.incremental) op.bus.Event( ctx, @@ -134,101 +163,128 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) { events.StartTime: startTime, events.Service: op.Selectors.Service.String(), events.BackupID: op.Results.BackupID, - }, - ) + }) - // persist operation results to the model store on exit - defer func() { - // wait for the progress display to clean up - observe.Complete() + // ----- + // Execution + // ----- - err = op.persistResults(startTime, &opStats) - if err != nil { - return - } + deets, err := op.do( + ctx, + &opStats, + detailsStore, + op.Results.BackupID) + if err != nil { + // No return here! We continue down to persistResults, even in case of failure. + logger.Ctx(ctx). + With("err", err). + Errorw("doing backup", clues.InErr(err).Slice()...) + op.Errors.Fail(errors.Wrap(err, "doing backup")) + opStats.readErr = op.Errors.Err() + } - err = op.createBackupModels( - ctx, - detailsStore, - opStats.k.SnapshotID, - backupDetails.Details()) - if err != nil { - opStats.writeErr = err - } - }() + // ----- + // Persistence + // ----- + + err = op.persistResults(startTime, &opStats) + if err != nil { + op.Errors.Fail(errors.Wrap(err, "persisting backup results")) + opStats.writeErr = op.Errors.Err() + + return op.Errors.Err() + } + + err = op.createBackupModels( + ctx, + detailsStore, + opStats.k.SnapshotID, + op.Results.BackupID, + deets.Details()) + if err != nil { + op.Errors.Fail(errors.Wrap(err, "persisting backup")) + opStats.writeErr = op.Errors.Err() + + return op.Errors.Err() + } + + logger.Ctx(ctx).Infow("completed backup", "results", op.Results) + + return nil +} + +// do is purely the action of running a backup. All pre/post behavior +// is found in Run(). +func (op *BackupOperation) do( + ctx context.Context, + opStats *backupStats, + detailsStore detailsReader, + backupID model.StableID, +) (*details.Builder, error) { + reasons := selectorToReasons(op.Selectors) + + // should always be 1, since backups are 1:1 with resourceOwners. + opStats.resourceCount = 1 mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata( ctx, op.kopia, op.store, reasons, - tenantID, - uib, - ) + op.account.ID(), + op.incremental, + op.Errors) if err != nil { - opStats.readErr = errors.Wrap(err, "connecting to M365") - return opStats.readErr + return nil, errors.Wrap(err, "producing manifests and metadata") } gc, err := connectToM365(ctx, op.Selectors, op.account) if err != nil { - opStats.readErr = errors.Wrap(err, "connecting to M365") - return opStats.readErr + return nil, errors.Wrap(err, "connectng to m365") } cs, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options) if err != nil { - opStats.readErr = errors.Wrap(err, "retrieving data to backup") - return opStats.readErr + return nil, errors.Wrap(err, "producing backup data collections") } - ctx = clues.Add(ctx, "collections", len(cs)) + ctx = clues.Add(ctx, "coll_count", len(cs)) - opStats.k, backupDetails, toMerge, err = consumeBackupDataCollections( + writeStats, deets, toMerge, err := consumeBackupDataCollections( ctx, op.kopia, - tenantID, + op.account.ID(), reasons, mans, cs, - op.Results.BackupID, - uib && canUseMetaData) + backupID, + op.incremental && canUseMetaData) if err != nil { - opStats.writeErr = errors.Wrap(err, "backing up service data") - return opStats.writeErr + return nil, errors.Wrap(err, "persisting collection backups") } - logger.Ctx(ctx).Debugf( - "Backed up %d directories and %d files", - opStats.k.TotalDirectoryCount, opStats.k.TotalFileCount, - ) + opStats.k = writeStats - if err = mergeDetails( + err = mergeDetails( ctx, op.store, detailsStore, mans, toMerge, - backupDetails, - ); err != nil { - opStats.writeErr = errors.Wrap(err, "merging backup details") - return opStats.writeErr + deets) + if err != nil { + return nil, errors.Wrap(err, "merging details") } opStats.gc = gc.AwaitStatus() - + // TODO(keepers): remove when fault.Errors handles all iterable error aggregation. if opStats.gc.ErrorCount > 0 { - merr := multierror.Append(opStats.readErr, errors.Wrap(opStats.gc.Err, "retrieving data")) - opStats.readErr = merr.ErrorOrNil() - - // Need to exit before we set started to true else we'll report no errors. - return opStats.readErr + return nil, opStats.gc.Err } - // should always be 1, since backups are 1:1 with resourceOwners. - opStats.resourceCount = 1 + logger.Ctx(ctx).Debug(gc.PrintableStatus()) - return err + return deets, nil } // checker to see if conditions are correct for incremental backup behavior such as @@ -307,7 +363,9 @@ func selectorToReasons(sel selectors.Selector) []kopia.Reason { return reasons } -func builderFromReason(tenant string, r kopia.Reason) (*path.Builder, error) { +func builderFromReason(ctx context.Context, tenant string, r kopia.Reason) (*path.Builder, error) { + ctx = clues.Add(ctx, "category", r.Category.String()) + // This is hacky, but we want the path package to format the path the right // way (e.x. proper order for service, category, etc), but we don't care about // the folders after the prefix. @@ -319,12 +377,7 @@ func builderFromReason(tenant string, r kopia.Reason) (*path.Builder, error) { false, ) if err != nil { - return nil, errors.Wrapf( - err, - "building path for service %s category %s", - r.Service.String(), - r.Category.String(), - ) + return nil, clues.Wrap(err, "building path").WithClues(ctx) } return p.ToBuilder().Dir(), nil @@ -367,7 +420,7 @@ func consumeBackupDataCollections( categories := map[string]struct{}{} for _, reason := range m.Reasons { - pb, err := builderFromReason(tenantID, reason) + pb, err := builderFromReason(ctx, tenantID, reason) if err != nil { return nil, nil, nil, errors.Wrap(err, "getting subtree paths for bases") } @@ -394,13 +447,9 @@ func consumeBackupDataCollections( logger.Ctx(ctx).Infow( "using base for backup", - "snapshot_id", - m.ID, - "services", - svcs, - "categories", - cats, - ) + "snapshot_id", m.ID, + "services", svcs, + "categories", cats) } kopiaStats, deets, itemsSourcedFromBase, err := bu.BackupCollections( @@ -409,24 +458,22 @@ func consumeBackupDataCollections( cs, nil, tags, - isIncremental, - ) + isIncremental) + if err != nil { + if kopiaStats == nil { + return nil, nil, nil, err + } + + return nil, nil, nil, errors.Wrapf( + err, + "kopia snapshot failed with %v catastrophic errors and %v ignored errors", + kopiaStats.ErrorCount, kopiaStats.IgnoredErrorCount) + } if kopiaStats.ErrorCount > 0 || kopiaStats.IgnoredErrorCount > 0 { - if err != nil { - err = errors.Wrapf( - err, - "kopia snapshot failed with %v catastrophic errors and %v ignored errors", - kopiaStats.ErrorCount, - kopiaStats.IgnoredErrorCount, - ) - } else { - err = errors.Errorf( - "kopia snapshot failed with %v catastrophic errors and %v ignored errors", - kopiaStats.ErrorCount, - kopiaStats.IgnoredErrorCount, - ) - } + err = errors.Errorf( + "kopia snapshot failed with %v catastrophic errors and %v ignored errors", + kopiaStats.ErrorCount, kopiaStats.IgnoredErrorCount) } return kopiaStats, deets, itemsSourcedFromBase, err @@ -461,6 +508,8 @@ func mergeDetails( var addedEntries int for _, man := range mans { + mctx := clues.Add(ctx, "manifest_id", man.ID) + // For now skip snapshots that aren't complete. We will need to revisit this // when we tackle restartability. if len(man.IncompleteReason) > 0 { @@ -469,28 +518,26 @@ func mergeDetails( bID, ok := man.GetTag(kopia.TagBackupID) if !ok { - return errors.Errorf("no backup ID in snapshot manifest with ID %s", man.ID) + return clues.New("no backup ID in snapshot manifest").WithClues(mctx) } + mctx = clues.Add(mctx, "manifest_backup_id", bID) + _, baseDeets, err := getBackupAndDetailsFromID( ctx, model.StableID(bID), ms, - detailsStore, - ) + detailsStore) if err != nil { - return errors.Wrapf(err, "backup fetching base details for backup %s", bID) + return clues.New("fetching base details for backup").WithClues(mctx) } for _, entry := range baseDeets.Items() { rr, err := path.FromDataLayerPath(entry.RepoRef, true) if err != nil { - return errors.Wrapf( - err, - "parsing base item info path %s in backup %s", - entry.RepoRef, - bID, - ) + return clues.New("parsing base item info path"). + WithClues(mctx). + With("repo_ref", entry.RepoRef) // todo: pii } // Although this base has an entry it may not be the most recent. Check @@ -513,11 +560,7 @@ func mergeDetails( // Fixup paths in the item. item := entry.ItemInfo if err := details.UpdateItem(&item, newPath); err != nil { - return errors.Wrapf( - err, - "updating item info for entry from backup %s", - bID, - ) + return clues.New("updating item details").WithClues(mctx) } // TODO(ashmrtn): This may need updated if we start using this merge @@ -529,8 +572,7 @@ func mergeDetails( newPath.ShortRef(), newPath.ToBuilder().Dir().ShortRef(), itemUpdated, - item, - ) + item) folders := details.FolderEntriesForPath(newPath.ToBuilder().Dir()) deets.AddFoldersForItem(folders, item, itemUpdated) @@ -542,11 +584,9 @@ func mergeDetails( } if addedEntries != len(shortRefsFromPrevBackup) { - return errors.Errorf( - "incomplete migration of backup details: found %v of %v expected items", - addedEntries, - len(shortRefsFromPrevBackup), - ) + return clues.New("incomplete migration of backup details"). + WithClues(ctx). + WithAll("item_count", addedEntries, "expected_item_count", len(shortRefsFromPrevBackup)) } return nil @@ -568,21 +608,28 @@ func (op *BackupOperation) persistResults( if opStats.readErr != nil || opStats.writeErr != nil { op.Status = Failed + // TODO(keepers): replace with fault.Errors handling. return multierror.Append( errors.New("errors prevented the operation from processing"), opStats.readErr, opStats.writeErr) } - if opStats.readErr == nil && opStats.writeErr == nil && opStats.gc.Successful == 0 { + op.Results.BytesRead = opStats.k.TotalHashedBytes + op.Results.BytesUploaded = opStats.k.TotalUploadedBytes + op.Results.ItemsWritten = opStats.k.TotalFileCount + op.Results.ResourceOwners = opStats.resourceCount + + if opStats.gc == nil { + op.Status = Failed + return errors.New("backup population never completed") + } + + if opStats.gc.Successful == 0 { op.Status = NoData } - op.Results.BytesRead = opStats.k.TotalHashedBytes - op.Results.BytesUploaded = opStats.k.TotalUploadedBytes op.Results.ItemsRead = opStats.gc.Successful - op.Results.ItemsWritten = opStats.k.TotalFileCount - op.Results.ResourceOwners = opStats.resourceCount return nil } @@ -592,29 +639,32 @@ func (op *BackupOperation) createBackupModels( ctx context.Context, detailsStore detailsWriter, snapID string, + backupID model.StableID, backupDetails *details.Details, ) error { + ctx = clues.Add(ctx, "snapshot_id", snapID) + if backupDetails == nil { - return errors.New("no backup details to record") + return clues.New("no backup details to record").WithClues(ctx) } detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails) if err != nil { - return errors.Wrap(err, "creating backupdetails model") + return clues.Wrap(err, "creating backupDetails model").WithClues(ctx) } + ctx = clues.Add(ctx, "details_id", detailsID) b := backup.New( snapID, detailsID, op.Status.String(), - op.Results.BackupID, + backupID, op.Selectors, op.Results.ReadWrites, op.Results.StartAndEndTime, op.Errors, ) - err = op.store.Put(ctx, model.BackupSchema, b) - if err != nil { - return errors.Wrap(err, "creating backup model") + if err = op.store.Put(ctx, model.BackupSchema, b); err != nil { + return clues.Wrap(err, "creating backup model").WithClues(ctx) } dur := op.Results.CompletedAt.Sub(op.Results.StartedAt) diff --git a/src/internal/operations/backup_integration_test.go b/src/internal/operations/backup_integration_test.go index 21d4009e0..b3ea617d9 100644 --- a/src/internal/operations/backup_integration_test.go +++ b/src/internal/operations/backup_integration_test.go @@ -339,7 +339,15 @@ func generateContainerOfItems( dest, collections) - deets, err := gc.RestoreDataCollections(ctx, acct, sel, dest, dataColls) + deets, err := gc.RestoreDataCollections( + ctx, + backup.Version, + acct, + sel, + dest, + control.Options{RestorePermissions: true}, + dataColls, + ) require.NoError(t, err) return deets @@ -1073,7 +1081,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDrive() { sel.Include(sel.AllData()) - bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{}) + bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{EnablePermissionsBackup: true}) defer closer() runAndCheckBackup(t, ctx, &bo, mb) diff --git a/src/internal/operations/backup_test.go b/src/internal/operations/backup_test.go index f867e2a11..149448e66 100644 --- a/src/internal/operations/backup_test.go +++ b/src/internal/operations/backup_test.go @@ -432,258 +432,6 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() { } } -func (suite *BackupOpSuite) TestBackupOperation_VerifyDistinctBases() { - const user = "a-user" - - table := []struct { - name string - input []*kopia.ManifestEntry - errCheck assert.ErrorAssertionFunc - }{ - { - name: "SingleManifestMultipleReasons", - input: []*kopia.ManifestEntry{ - { - Manifest: &snapshot.Manifest{ - ID: "id1", - }, - Reasons: []kopia.Reason{ - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - }, - }, - }, - errCheck: assert.NoError, - }, - { - name: "MultipleManifestsDistinctReason", - input: []*kopia.ManifestEntry{ - { - Manifest: &snapshot.Manifest{ - ID: "id1", - }, - Reasons: []kopia.Reason{ - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - }, - }, - { - Manifest: &snapshot.Manifest{ - ID: "id2", - }, - Reasons: []kopia.Reason{ - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EventsCategory, - }, - }, - }, - }, - errCheck: assert.NoError, - }, - { - name: "MultipleManifestsSameReason", - input: []*kopia.ManifestEntry{ - { - Manifest: &snapshot.Manifest{ - ID: "id1", - }, - Reasons: []kopia.Reason{ - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - }, - }, - { - Manifest: &snapshot.Manifest{ - ID: "id2", - }, - Reasons: []kopia.Reason{ - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - }, - }, - }, - errCheck: assert.Error, - }, - { - name: "MultipleManifestsSameReasonOneIncomplete", - input: []*kopia.ManifestEntry{ - { - Manifest: &snapshot.Manifest{ - ID: "id1", - }, - Reasons: []kopia.Reason{ - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - }, - }, - { - Manifest: &snapshot.Manifest{ - ID: "id2", - IncompleteReason: "checkpoint", - }, - Reasons: []kopia.Reason{ - { - ResourceOwner: user, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - }, - }, - }, - errCheck: assert.NoError, - }, - } - - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - test.errCheck(t, verifyDistinctBases(test.input)) - }) - } -} - -func (suite *BackupOpSuite) TestBackupOperation_CollectMetadata() { - var ( - tenant = "a-tenant" - resourceOwner = "a-user" - fileNames = []string{ - "delta", - "paths", - } - - emailDeltaPath = makeMetadataPath( - suite.T(), - tenant, - path.ExchangeService, - resourceOwner, - path.EmailCategory, - fileNames[0], - ) - emailPathsPath = makeMetadataPath( - suite.T(), - tenant, - path.ExchangeService, - resourceOwner, - path.EmailCategory, - fileNames[1], - ) - contactsDeltaPath = makeMetadataPath( - suite.T(), - tenant, - path.ExchangeService, - resourceOwner, - path.ContactsCategory, - fileNames[0], - ) - contactsPathsPath = makeMetadataPath( - suite.T(), - tenant, - path.ExchangeService, - resourceOwner, - path.ContactsCategory, - fileNames[1], - ) - ) - - table := []struct { - name string - inputMan *kopia.ManifestEntry - inputFiles []string - expected []path.Path - }{ - { - name: "SingleReasonSingleFile", - inputMan: &kopia.ManifestEntry{ - Manifest: &snapshot.Manifest{}, - Reasons: []kopia.Reason{ - { - ResourceOwner: resourceOwner, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - }, - }, - inputFiles: []string{fileNames[0]}, - expected: []path.Path{emailDeltaPath}, - }, - { - name: "SingleReasonMultipleFiles", - inputMan: &kopia.ManifestEntry{ - Manifest: &snapshot.Manifest{}, - Reasons: []kopia.Reason{ - { - ResourceOwner: resourceOwner, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - }, - }, - inputFiles: fileNames, - expected: []path.Path{emailDeltaPath, emailPathsPath}, - }, - { - name: "MultipleReasonsMultipleFiles", - inputMan: &kopia.ManifestEntry{ - Manifest: &snapshot.Manifest{}, - Reasons: []kopia.Reason{ - { - ResourceOwner: resourceOwner, - Service: path.ExchangeService, - Category: path.EmailCategory, - }, - { - ResourceOwner: resourceOwner, - Service: path.ExchangeService, - Category: path.ContactsCategory, - }, - }, - }, - inputFiles: fileNames, - expected: []path.Path{ - emailDeltaPath, - emailPathsPath, - contactsDeltaPath, - contactsPathsPath, - }, - }, - } - - for _, test := range table { - suite.T().Run(test.name, func(t *testing.T) { - ctx, flush := tester.NewContext() - defer flush() - - mr := &mockRestorer{} - - _, err := collectMetadata(ctx, mr, test.inputMan, test.inputFiles, tenant) - assert.NoError(t, err) - - checkPaths(t, test.expected, mr.gotPaths) - }) - } -} - func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Paths() { var ( tenant = "a-tenant" diff --git a/src/internal/operations/manifests.go b/src/internal/operations/manifests.go index fe0e4d09d..c0ba35e43 100644 --- a/src/internal/operations/manifests.go +++ b/src/internal/operations/manifests.go @@ -3,7 +3,7 @@ package operations import ( "context" - multierror "github.com/hashicorp/go-multierror" + "github.com/alcionai/clues" "github.com/kopia/kopia/repo/manifest" "github.com/pkg/errors" @@ -12,6 +12,7 @@ import ( "github.com/alcionai/corso/src/internal/kopia" "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/fault" "github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/path" ) @@ -44,6 +45,7 @@ func produceManifestsAndMetadata( reasons []kopia.Reason, tenantID string, getMetadata bool, + errs fault.Adder, ) ([]*kopia.ManifestEntry, []data.Collection, bool, error) { var ( metadataFiles = graph.AllMetadataFileNames() @@ -68,12 +70,10 @@ func produceManifestsAndMetadata( // // TODO(ashmrtn): This may need updating if we start sourcing item backup // details from previous snapshots when using kopia-assisted incrementals. - if err := verifyDistinctBases(ms); err != nil { - logger.Ctx(ctx).Warnw( + if err := verifyDistinctBases(ctx, ms, errs); err != nil { + logger.Ctx(ctx).With("error", err).Infow( "base snapshot collision, falling back to full backup", - "error", - err, - ) + clues.In(ctx).Slice()...) return ms, nil, false, nil } @@ -83,40 +83,41 @@ func produceManifestsAndMetadata( continue } + mctx := clues.Add(ctx, "manifest_id", man.ID) + bID, ok := man.GetTag(kopia.TagBackupID) if !ok { - return nil, nil, false, errors.New("snapshot manifest missing backup ID") + err = clues.New("snapshot manifest missing backup ID").WithClues(ctx) + return nil, nil, false, err } - dID, _, err := gdi.GetDetailsIDFromBackupID(ctx, model.StableID(bID)) + mctx = clues.Add(mctx, "manifest_backup_id", man.ID) + + dID, _, err := gdi.GetDetailsIDFromBackupID(mctx, model.StableID(bID)) if err != nil { // if no backup exists for any of the complete manifests, we want // to fall back to a complete backup. if errors.Is(err, kopia.ErrNotFound) { - logger.Ctx(ctx).Infow( - "backup missing, falling back to full backup", - "backup_id", bID) - + logger.Ctx(ctx).Infow("backup missing, falling back to full backup", clues.In(mctx).Slice()...) return ms, nil, false, nil } return nil, nil, false, errors.Wrap(err, "retrieving prior backup data") } + mctx = clues.Add(mctx, "manifest_details_id", dID) + // if no detailsID exists for any of the complete manifests, we want // to fall back to a complete backup. This is a temporary prevention // mechanism to keep backups from falling into a perpetually bad state. // This makes an assumption that the ID points to a populated set of // details; we aren't doing the work to look them up. if len(dID) == 0 { - logger.Ctx(ctx).Infow( - "backup missing details ID, falling back to full backup", - "backup_id", bID) - + logger.Ctx(ctx).Infow("backup missing details ID, falling back to full backup", clues.In(mctx).Slice()...) return ms, nil, false, nil } - colls, err := collectMetadata(ctx, mr, man, metadataFiles, tenantID) + colls, err := collectMetadata(mctx, mr, man, metadataFiles, tenantID) if err != nil && !errors.Is(err, kopia.ErrNotFound) { // prior metadata isn't guaranteed to exist. // if it doesn't, we'll just have to do a @@ -134,9 +135,9 @@ func produceManifestsAndMetadata( // of manifests, that each manifest's Reason (owner, service, category) is only // included once. If a reason is duplicated by any two manifests, an error is // returned. -func verifyDistinctBases(mans []*kopia.ManifestEntry) error { +func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs fault.Adder) error { var ( - errs *multierror.Error + failed bool reasons = map[string]manifest.ID{} ) @@ -155,10 +156,11 @@ func verifyDistinctBases(mans []*kopia.ManifestEntry) error { reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String() if b, ok := reasons[reasonKey]; ok { - errs = multierror.Append(errs, errors.Errorf( - "multiple base snapshots source data for %s %s. IDs: %s, %s", - reason.Service, reason.Category, b, man.ID, - )) + failed = true + + errs.Add(clues.New("manifests have overlapping reasons"). + WithClues(ctx). + With("other_manifest_id", b)) continue } @@ -167,7 +169,11 @@ func verifyDistinctBases(mans []*kopia.ManifestEntry) error { } } - return errs.ErrorOrNil() + if failed { + return clues.New("multiple base snapshots qualify").WithClues(ctx) + } + + return nil } // collectMetadata retrieves all metadata files associated with the manifest. @@ -191,7 +197,9 @@ func collectMetadata( reason.Category, true) if err != nil { - return nil, errors.Wrapf(err, "building metadata path") + return nil, clues. + Wrap(err, "building metadata path"). + WithAll("metadata_file", fn, "category", reason.Category) } paths = append(paths, p) diff --git a/src/internal/operations/manifests_test.go b/src/internal/operations/manifests_test.go index 7cfc9ac9a..93cdb982f 100644 --- a/src/internal/operations/manifests_test.go +++ b/src/internal/operations/manifests_test.go @@ -14,6 +14,7 @@ import ( "github.com/alcionai/corso/src/internal/model" "github.com/alcionai/corso/src/internal/tester" "github.com/alcionai/corso/src/pkg/backup" + "github.com/alcionai/corso/src/pkg/fault/mock" "github.com/alcionai/corso/src/pkg/path" ) @@ -400,7 +401,10 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() { } for _, test := range table { suite.T().Run(test.name, func(t *testing.T) { - err := verifyDistinctBases(test.mans) + ctx, flush := tester.NewContext() + defer flush() + + err := verifyDistinctBases(ctx, test.mans, mock.NewAdder()) test.expect(t, err) }) } @@ -646,6 +650,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { ctx, flush := tester.NewContext() defer flush() + ma := mock.NewAdder() + mans, dcs, b, err := produceManifestsAndMetadata( ctx, &test.mr, @@ -653,7 +659,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { test.reasons, tid, test.getMeta, - ) + ma) test.assertErr(t, err) test.assertB(t, b) @@ -683,3 +689,270 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() { }) } } + +// --------------------------------------------------------------------------- +// older tests +// --------------------------------------------------------------------------- + +type BackupManifestSuite struct { + suite.Suite +} + +func TestBackupManifestSuite(t *testing.T) { + suite.Run(t, new(BackupOpSuite)) +} + +func (suite *BackupManifestSuite) TestBackupOperation_VerifyDistinctBases() { + const user = "a-user" + + table := []struct { + name string + input []*kopia.ManifestEntry + errCheck assert.ErrorAssertionFunc + }{ + { + name: "SingleManifestMultipleReasons", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EventsCategory, + }, + }, + }, + }, + errCheck: assert.NoError, + }, + { + name: "MultipleManifestsDistinctReason", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + { + Manifest: &snapshot.Manifest{ + ID: "id2", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EventsCategory, + }, + }, + }, + }, + errCheck: assert.NoError, + }, + { + name: "MultipleManifestsSameReason", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + { + Manifest: &snapshot.Manifest{ + ID: "id2", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + }, + errCheck: assert.Error, + }, + { + name: "MultipleManifestsSameReasonOneIncomplete", + input: []*kopia.ManifestEntry{ + { + Manifest: &snapshot.Manifest{ + ID: "id1", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + { + Manifest: &snapshot.Manifest{ + ID: "id2", + IncompleteReason: "checkpoint", + }, + Reasons: []kopia.Reason{ + { + ResourceOwner: user, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + }, + errCheck: assert.NoError, + }, + } + + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + ctx, flush := tester.NewContext() + defer flush() + + test.errCheck(t, verifyDistinctBases(ctx, test.input, mock.NewAdder())) + }) + } +} + +func (suite *BackupManifestSuite) TestBackupOperation_CollectMetadata() { + var ( + tenant = "a-tenant" + resourceOwner = "a-user" + fileNames = []string{ + "delta", + "paths", + } + + emailDeltaPath = makeMetadataPath( + suite.T(), + tenant, + path.ExchangeService, + resourceOwner, + path.EmailCategory, + fileNames[0], + ) + emailPathsPath = makeMetadataPath( + suite.T(), + tenant, + path.ExchangeService, + resourceOwner, + path.EmailCategory, + fileNames[1], + ) + contactsDeltaPath = makeMetadataPath( + suite.T(), + tenant, + path.ExchangeService, + resourceOwner, + path.ContactsCategory, + fileNames[0], + ) + contactsPathsPath = makeMetadataPath( + suite.T(), + tenant, + path.ExchangeService, + resourceOwner, + path.ContactsCategory, + fileNames[1], + ) + ) + + table := []struct { + name string + inputMan *kopia.ManifestEntry + inputFiles []string + expected []path.Path + }{ + { + name: "SingleReasonSingleFile", + inputMan: &kopia.ManifestEntry{ + Manifest: &snapshot.Manifest{}, + Reasons: []kopia.Reason{ + { + ResourceOwner: resourceOwner, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + inputFiles: []string{fileNames[0]}, + expected: []path.Path{emailDeltaPath}, + }, + { + name: "SingleReasonMultipleFiles", + inputMan: &kopia.ManifestEntry{ + Manifest: &snapshot.Manifest{}, + Reasons: []kopia.Reason{ + { + ResourceOwner: resourceOwner, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + }, + }, + inputFiles: fileNames, + expected: []path.Path{emailDeltaPath, emailPathsPath}, + }, + { + name: "MultipleReasonsMultipleFiles", + inputMan: &kopia.ManifestEntry{ + Manifest: &snapshot.Manifest{}, + Reasons: []kopia.Reason{ + { + ResourceOwner: resourceOwner, + Service: path.ExchangeService, + Category: path.EmailCategory, + }, + { + ResourceOwner: resourceOwner, + Service: path.ExchangeService, + Category: path.ContactsCategory, + }, + }, + }, + inputFiles: fileNames, + expected: []path.Path{ + emailDeltaPath, + emailPathsPath, + contactsDeltaPath, + contactsPathsPath, + }, + }, + } + + for _, test := range table { + suite.T().Run(test.name, func(t *testing.T) { + ctx, flush := tester.NewContext() + defer flush() + + mr := &mockRestorer{} + + _, err := collectMetadata(ctx, mr, test.inputMan, test.inputFiles, tenant) + assert.NoError(t, err) + + checkPaths(t, test.expected, mr.gotPaths) + }) + } +} diff --git a/src/internal/operations/restore.go b/src/internal/operations/restore.go index db5ca9a93..dafb8670e 100644 --- a/src/internal/operations/restore.go +++ b/src/internal/operations/restore.go @@ -3,6 +3,8 @@ package operations import ( "context" "fmt" + "runtime/debug" + "sort" "time" "github.com/alcionai/clues" @@ -106,35 +108,89 @@ type restorer interface { // Run begins a synchronous restore operation. func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.Details, err error) { - ctx, end := D.Span(ctx, "operations:restore:run") - defer end() + defer func() { + if r := recover(); r != nil { + var rerr error + if re, ok := r.(error); ok { + rerr = re + } else if re, ok := r.(string); ok { + rerr = clues.New(re) + } else { + rerr = clues.New(fmt.Sprintf("%v", r)) + } + + err = clues.Wrap(rerr, "panic recovery"). + WithClues(ctx). + With("stacktrace", string(debug.Stack())) + logger.Ctx(ctx). + With("err", err). + Errorw("backup panic", clues.InErr(err).Slice()...) + } + }() var ( opStats = restoreStats{ bytesRead: &stats.ByteCounter{}, restoreID: uuid.NewString(), } - startTime = time.Now() + start = time.Now() + detailsStore = streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService()) ) + // ----- + // Setup + // ----- + + ctx, end := D.Span(ctx, "operations:restore:run") defer func() { + end() // wait for the progress display to clean up observe.Complete() - - err = op.persistResults(ctx, startTime, &opStats) - if err != nil { - return - } }() - detailsStore := streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService()) - ctx = clues.AddAll( ctx, "tenant_id", op.account.ID(), // TODO: pii "backup_id", op.BackupID, "service", op.Selectors.Service) + // ----- + // Execution + // ----- + + deets, err := op.do(ctx, &opStats, detailsStore, start) + if err != nil { + // No return here! We continue down to persistResults, even in case of failure. + logger.Ctx(ctx). + With("err", err). + Errorw("doing restore", clues.InErr(err).Slice()...) + op.Errors.Fail(errors.Wrap(err, "doing restore")) + opStats.readErr = op.Errors.Err() + } + + // ----- + // Persistence + // ----- + + err = op.persistResults(ctx, start, &opStats) + if err != nil { + op.Errors.Fail(errors.Wrap(err, "persisting restore results")) + opStats.writeErr = op.Errors.Err() + + return nil, op.Errors.Err() + } + + logger.Ctx(ctx).Infow("completed restore", "results", op.Results) + + return deets, nil +} + +func (op *RestoreOperation) do( + ctx context.Context, + opStats *restoreStats, + detailsStore detailsReader, + start time.Time, +) (*details.Details, error) { bup, deets, err := getBackupAndDetailsFromID( ctx, op.BackupID, @@ -142,30 +198,28 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De detailsStore, ) if err != nil { - opStats.readErr = errors.Wrap(err, "restore") - return nil, opStats.readErr + return nil, errors.Wrap(err, "getting backup and details") } - ctx = clues.Add(ctx, "resource_owner", bup.Selector.DiscreteOwner) + paths, err := formatDetailsForRestoration(ctx, op.Selectors, deets) + if err != nil { + return nil, errors.Wrap(err, "formatting paths from details") + } + + ctx = clues.AddAll( + ctx, + "resource_owner", bup.Selector.DiscreteOwner, + "details_paths", len(paths)) op.bus.Event( ctx, events.RestoreStart, map[string]any{ - events.StartTime: startTime, + events.StartTime: start, events.BackupID: op.BackupID, events.BackupCreateTime: bup.CreationTime, events.RestoreID: opStats.restoreID, - }, - ) - - paths, err := formatDetailsForRestoration(ctx, op.Selectors, deets) - if err != nil { - opStats.readErr = err - return nil, err - } - - ctx = clues.Add(ctx, "details_paths", len(paths)) + }) observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))) @@ -175,39 +229,45 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De dcs, err := op.kopia.RestoreMultipleItems(ctx, bup.SnapshotID, paths, opStats.bytesRead) if err != nil { - opStats.readErr = errors.Wrap(err, "retrieving service data") - return nil, opStats.readErr + return nil, errors.Wrap(err, "retrieving collections from repository") } + kopiaComplete <- struct{}{} - ctx = clues.Add(ctx, "collections", len(dcs)) + ctx = clues.Add(ctx, "coll_count", len(dcs)) + // should always be 1, since backups are 1:1 with resourceOwners. + opStats.resourceCount = 1 opStats.cs = dcs - opStats.resourceCount = len(data.ResourceOwnerSet(dcs)) gc, err := connectToM365(ctx, op.Selectors, op.account) if err != nil { - opStats.readErr = errors.Wrap(err, "connecting to M365") - return nil, opStats.readErr + return nil, errors.Wrap(err, "connecting to M365") } restoreComplete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Restoring data")) defer closer() defer close(restoreComplete) - restoreDetails, err = gc.RestoreDataCollections( + restoreDetails, err := gc.RestoreDataCollections( ctx, + bup.Version, op.account, op.Selectors, op.Destination, + op.Options, dcs) if err != nil { - opStats.writeErr = errors.Wrap(err, "restoring service data") - return nil, opStats.writeErr + return nil, errors.Wrap(err, "restoring collections") } + restoreComplete <- struct{}{} opStats.gc = gc.AwaitStatus() + // TODO(keepers): remove when fault.Errors handles all iterable error aggregation. + if opStats.gc.ErrorCount > 0 { + return nil, opStats.gc.Err + } logger.Ctx(ctx).Debug(gc.PrintableStatus()) @@ -236,14 +296,20 @@ func (op *RestoreOperation) persistResults( opStats.writeErr) } - if opStats.readErr == nil && opStats.writeErr == nil && opStats.gc.Successful == 0 { + op.Results.BytesRead = opStats.bytesRead.NumBytes + op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count + op.Results.ResourceOwners = opStats.resourceCount + + if opStats.gc == nil { + op.Status = Failed + return errors.New("restoration never completed") + } + + if opStats.gc.Successful == 0 { op.Status = NoData } - op.Results.BytesRead = opStats.bytesRead.NumBytes - op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count op.Results.ItemsWritten = opStats.gc.Successful - op.Results.ResourceOwners = opStats.resourceCount dur := op.Results.CompletedAt.Sub(op.Results.StartedAt) @@ -300,6 +366,17 @@ func formatDetailsForRestoration( paths[i] = p } + // TODO(meain): Move this to onedrive specific component, but as + // of now the paths can technically be from multiple services + + // This sort is done primarily to order `.meta` files after `.data` + // files. This is only a necessity for OneDrive as we are storing + // metadata for files/folders in separate meta files and we the + // data to be restored before we can restore the metadata. + sort.Slice(paths, func(i, j int) bool { + return paths[i].String() < paths[j].String() + }) + if errs != nil { return nil, errs } diff --git a/src/pkg/backup/backup.go b/src/pkg/backup/backup.go index d0d9ddffd..4422b3a47 100644 --- a/src/pkg/backup/backup.go +++ b/src/pkg/backup/backup.go @@ -14,6 +14,8 @@ import ( "github.com/alcionai/corso/src/pkg/selectors" ) +const Version = 1 + // Backup represents the result of a backup operation type Backup struct { model.BaseModel @@ -32,6 +34,9 @@ type Backup struct { // Selector used in this operation Selector selectors.Selector `json:"selectors"` + // Version represents the version of the backup format + Version int `json:"version"` + // Errors contains all errors aggregated during a backup operation. Errors fault.ErrorsData `json:"errors"` @@ -67,6 +72,7 @@ func New( Errors: errs.Data(), ReadWrites: rw, StartAndEndTime: se, + Version: Version, } } diff --git a/src/pkg/control/options.go b/src/pkg/control/options.go index 9cc5a334a..7348e8fa6 100644 --- a/src/pkg/control/options.go +++ b/src/pkg/control/options.go @@ -6,10 +6,11 @@ import ( // Options holds the optional configurations for a process type Options struct { - Collision CollisionPolicy `json:"-"` - DisableMetrics bool `json:"disableMetrics"` - FailFast bool `json:"failFast"` - ToggleFeatures Toggles `json:"ToggleFeatures"` + Collision CollisionPolicy `json:"-"` + DisableMetrics bool `json:"disableMetrics"` + FailFast bool `json:"failFast"` + RestorePermissions bool `json:"restorePermissions"` + ToggleFeatures Toggles `json:"ToggleFeatures"` } // Defaults provides an Options with the default values set. @@ -74,4 +75,9 @@ type Toggles struct { // DisableIncrementals prevents backups from using incremental lookups, // forcing a new, complete backup of all data regardless of prior state. DisableIncrementals bool `json:"exchangeIncrementals,omitempty"` + + // EnablePermissionsBackup is used to enable backups of item + // permissions. Permission metadata increases graph api call count, + // so disabling their retrieval when not needed is advised. + EnablePermissionsBackup bool `json:"enablePermissionsBackup,omitempty"` } diff --git a/src/pkg/fault/fault.go b/src/pkg/fault/fault.go index f4171ce77..b8f57108b 100644 --- a/src/pkg/fault/fault.go +++ b/src/pkg/fault/fault.go @@ -87,16 +87,19 @@ func (e *Errors) Fail(err error) *Errors { // setErr handles setting errors.err. Sync locking gets // handled upstream of this call. func (e *Errors) setErr(err error) *Errors { - if e.err != nil { - return e.addErr(err) + if e.err == nil { + e.err = err + return e } - e.err = err + e.errs = append(e.errs, err) return e } -// TODO: introduce Adder interface +type Adder interface { + Add(err error) *Errors +} // Add appends the error to the slice of recoverable and // iterated errors (ie: errors.errs). If failFast is true, diff --git a/src/pkg/fault/fault_test.go b/src/pkg/fault/fault_test.go index 3f5ad127c..8fb2981d7 100644 --- a/src/pkg/fault/fault_test.go +++ b/src/pkg/fault/fault_test.go @@ -73,6 +73,8 @@ func (suite *FaultErrorsUnitSuite) TestErr() { suite.T().Run(test.name, func(t *testing.T) { n := fault.New(test.failFast) require.NotNil(t, n) + require.NoError(t, n.Err()) + require.Empty(t, n.Errs()) e := n.Fail(test.fail) require.NotNil(t, e) @@ -90,6 +92,8 @@ func (suite *FaultErrorsUnitSuite) TestFail() { n := fault.New(false) require.NotNil(t, n) + require.NoError(t, n.Err()) + require.Empty(t, n.Errs()) n.Fail(assert.AnError) assert.Error(t, n.Err()) diff --git a/src/pkg/fault/mock/mock.go b/src/pkg/fault/mock/mock.go new file mode 100644 index 000000000..ba560996d --- /dev/null +++ b/src/pkg/fault/mock/mock.go @@ -0,0 +1,17 @@ +package mock + +import "github.com/alcionai/corso/src/pkg/fault" + +// Adder mocks an adder interface for testing. +type Adder struct { + Errs []error +} + +func NewAdder() *Adder { + return &Adder{Errs: []error{}} +} + +func (ma *Adder) Add(err error) *fault.Errors { + ma.Errs = append(ma.Errs, err) + return fault.New(false) +} diff --git a/src/pkg/logger/logger.go b/src/pkg/logger/logger.go index 0d5ffe250..a6b5aa4dd 100644 --- a/src/pkg/logger/logger.go +++ b/src/pkg/logger/logger.go @@ -11,6 +11,8 @@ import ( "github.com/spf13/pflag" "go.uber.org/zap" "go.uber.org/zap/zapcore" + + "github.com/alcionai/corso/src/cli/print" ) // Default location for writing logs, initialized in platform specific files @@ -28,6 +30,8 @@ var ( DebugAPI bool readableOutput bool + + LogFile string ) type logLevel int @@ -117,7 +121,9 @@ func PreloadLoggingFlags() (string, string) { } if logfile != "stdout" && logfile != "stderr" { + LogFile = logfile logdir := filepath.Dir(logfile) + print.Info(context.Background(), "Logging to file: "+logfile) err := os.MkdirAll(logdir, 0o755) if err != nil { @@ -265,7 +271,7 @@ func Ctx(ctx context.Context) *zap.SugaredLogger { return singleton(levelOf(llFlag), defaultLogLocation()) } - return l.(*zap.SugaredLogger).With(clues.Slice(ctx)...) + return l.(*zap.SugaredLogger).With(clues.In(ctx).Slice()...) } // transforms the llevel flag value to a logLevel enum diff --git a/src/pkg/repository/repository.go b/src/pkg/repository/repository.go index f8c8d3d49..087b193bc 100644 --- a/src/pkg/repository/repository.go +++ b/src/pkg/repository/repository.go @@ -2,11 +2,12 @@ package repository import ( "context" - "errors" "time" + "github.com/alcionai/clues" "github.com/google/uuid" "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" "github.com/alcionai/corso/src/internal/events" "github.com/alcionai/corso/src/internal/kopia" @@ -88,6 +89,8 @@ func Initialize( s storage.Storage, opts control.Options, ) (Repository, error) { + ctx = clues.AddAll(ctx, "acct_provider", acct.Provider, "storage_provider", s.Provider) + kopiaRef := kopia.NewConn(s) if err := kopiaRef.Initialize(ctx); err != nil { // replace common internal errors so that sdk users can check results with errors.Is() @@ -95,7 +98,7 @@ func Initialize( return nil, ErrorRepoAlreadyExists } - return nil, err + return nil, errors.Wrap(err, "initializing kopia") } // kopiaRef comes with a count of 1 and NewWrapper/NewModelStore bumps it again so safe // to close here. @@ -103,17 +106,17 @@ func Initialize( w, err := kopia.NewWrapper(kopiaRef) if err != nil { - return nil, err + return nil, clues.Stack(err).WithClues(ctx) } ms, err := kopia.NewModelStore(kopiaRef) if err != nil { - return nil, err + return nil, clues.Stack(err).WithClues(ctx) } bus, err := events.NewBus(ctx, s, acct.ID(), opts) if err != nil { - return nil, err + return nil, errors.Wrap(err, "constructing event bus") } repoID := newRepoID(s) @@ -131,7 +134,7 @@ func Initialize( } if err := newRepoModel(ctx, ms, r.ID); err != nil { - return nil, errors.New("setting up repository") + return nil, clues.New("setting up repository").WithClues(ctx) } r.Bus.Event(ctx, events.RepoInit, nil) @@ -150,6 +153,8 @@ func Connect( s storage.Storage, opts control.Options, ) (Repository, error) { + ctx = clues.AddAll(ctx, "acct_provider", acct.Provider, "storage_provider", s.Provider) + // Close/Reset the progress bar. This ensures callers don't have to worry about // their output getting clobbered (#1720) defer observe.Complete() @@ -160,7 +165,7 @@ func Connect( kopiaRef := kopia.NewConn(s) if err := kopiaRef.Connect(ctx); err != nil { - return nil, err + return nil, errors.Wrap(err, "connecting kopia client") } // kopiaRef comes with a count of 1 and NewWrapper/NewModelStore bumps it again so safe // to close here. @@ -168,17 +173,17 @@ func Connect( w, err := kopia.NewWrapper(kopiaRef) if err != nil { - return nil, err + return nil, clues.Stack(err).WithClues(ctx) } ms, err := kopia.NewModelStore(kopiaRef) if err != nil { - return nil, err + return nil, clues.Stack(err).WithClues(ctx) } bus, err := events.NewBus(ctx, s, acct.ID(), opts) if err != nil { - return nil, err + return nil, errors.Wrap(err, "constructing event bus") } rm, err := getRepoModel(ctx, ms) diff --git a/website/blog/2023-2-4-where-to-store-corso.md b/website/blog/2023-2-4-where-to-store-corso.md new file mode 100644 index 000000000..2d9da3ea7 --- /dev/null +++ b/website/blog/2023-2-4-where-to-store-corso.md @@ -0,0 +1,122 @@ +--- +slug: where-to-store-corso +title: "Where to store your Corso Repository" +description: "Storage Options for Corso" +authors: nica +tags: [corso, microsoft 365, backups, S3] +date: 2023-2-4 +image: ./images/boxes_web.jpeg +--- + +![image of a large number of packing boxes](./images/boxes_web.jpeg) + +We all know that Corso is a free and open-source tool for creating backups of your Microsoft 365 data. But where does +that data go? + +Corso creates a repository to store your backups, and the default in our documentation is to send that data to AWS S3. +It's possible however to back up to any object storage system that has an S3-compatible API. Let’s talk about some options. + + +## S3-Compatible Object Storage + +A number of other cloud providers aren’t the 500-pound gorilla of AWS but still offer an S3-compatible API. +Some of them include: + +- Google Cloud: One of the largest cloud providers in the world, Google offers +[an S3-compatible API](https://cloud.google.com/storage/docs/interoperability) on top of its Google Cloud Storage (GCS) offering. +- Backblaze: Known for its deep analysis of hard drive failure statistics, Backblaze offers an S3-compatible API for its +B2 Cloud Storage product. They also make the bold claim of costing [significantly less than AWS S3](https://www.backblaze.com/b2/cloud-storage-pricing.html) +(I haven’t evaluated these claims) but Glacier is still cheaper (see below for more details) +- HPE: HPE Greenlake offers S3 compatibility and claims superior performance over S3. If you want to get a sense of how +‘Enterprise’ HPE is, the best writeup I could find of their offerings is [available only as a PDF](https://www.hpe.com/us/en/collaterals/collateral.a50006216.Create-value-from-data-2C-at-scale-E2-80-93-HPE-GreenLake-for-Scality-solution-brief.html). +- Wasabi: Another popular offering, Wasabi has great integration with existing AWS components at a reduced +cost but watch out for the minimum monthly storage charge and the minimum storage duration policy! + +This is an incomplete list, but any S3-compliant storage with immediate retrieval is expected to work with Corso today. + +## Local S3 Testing + +In my own testing, I use [MinIO](https://min.io/) to create a local S3 server and bucket. This has some great advantages +including extremely low latency for testing. Unless you have a significant hardware and software investment to ensure +reliable storage and compute infrastructure, you probably don't want to rely on a MinIO setup as your primary +backup location, but it’s a great way to do a zero-cost test backup that you totally control. + +While there are a number of in-depth tutorials on how to use +[MinIO to run a local S3 server](https://simonjcarr.medium.com/running-s3-object-storage-locally-with-minio-f50540ffc239), +here’s the single script that can run a non-production instance of MinIO within a Docker container (you’ll need Docker +and the AWS CLI as prerequisites) and get you started with Corso quickly: + +```bash +mkdir -p $HOME/minio/data + +docker run \ + -p 9000:9000 \ + -p 9090:9090 \ + --name minio \ + -v ~/minio/data:/data \ + -e "MINIO_ROOT_USER=ROOTNAME" \ + -e "MINIO_ROOT_PASSWORD=CHANGEME123" \ + quay.io/minio/minio server /data --console-address ":9090" +``` + +In a separate window, create a bucket (`corso-backup`) for use with Corso. + +```bash +export AWS_ACCESS_KEY_ID=ROOTNAME +export AWS_SECRET_ACCESS_KEY=CHANGEME123 + +aws s3api create-bucket --bucket corso-backup --endpoint=http://127.0.0.1:9000 +``` + +To connect Corso to a local MinIO server with [`corso repo init`](https://corsobackup.io/docs/cli/corso-repo-init-s3/) +you’ll want to pass the `--disable-tls` flag so that it will accept an `http` connection. The full command would look like: + +```bash +./corso repo init s3 --bucket corso-backup --disable-tls --endpoint 127.0.0.1:9000 +``` + +## Reducing Cost With S3 Storage Classes + +AWS S3 offers [storage classes](https://aws.amazon.com/s3/storage-classes/) for a variety of different use cases and +Corso can leverage a number of them, but not all, to reduce the cost of storing data in the cloud. + +By default, Corso works hard to reduce its data footprint. It will compress and deduplicate data at source to reduce the +amount of storage used as well as the amount of network traffic when writing to object storage. Corso also combines +different emails, attachments, etc. into larger objects to make it more cost-effective by reducing the number of API +calls and increasing network throughput as well as making Corso data eligible and cost-effective for some of the other +storage classes described below. + +Stepping away from the default S3 offering (S3 Standard), S3 offers a number of different Glacier (cheap and deep) +storage classes that can help to further reduce the cost for backup and archival workloads. Within the storage classes, +Corso today supports Glacier Instant Retrieval but, because of user responsiveness and metadata requirements, not the +other Glacier variants. + +Glacier Instant Retrieval should provide the best price performance for a backup workload as backup data blobs are +typically written once, with occasional re-compacting, and read infrequently in the case of restore. One should note +that recommendations such as these are always workload dependent and should be verified for your use case. For example, +we would not recommend Glacier Instant Retrieval if you are constantly testing large restores or have heavy +churn in your backups and +limited retention. However, for most typical backup workloads (write mostly, read rarely), Glacier Instant Retrieval +should work just fine and deliver the best price-performance ratio. + +You can configure your storage to use Glacier Instant Retrieval by adding a `.storageconfig` file to the root of your +bucket. If you have configured Corso to store the repository in a sub-folder within your bucket by adding a +`prefix = '[folder name]'` configuration, the `.storageconfig` should go within that folder in the bucket. + +Here’s an example: + +```json +{ + "blobOptions": [ + { "prefix": "p", "storageClass": "GLACIER_IR" }, + { "storageClass": "STANDARD" } + ] +} +``` + +The `"prefix": "p"` parameter is unrelated to the subfolder `prefix` setting mentioned above. It tells Corso to +use the selected storage class for data blobs (named with a `p` prefix). By default, all other objects including +metadata and indices will use the standard storage tier. + +We would love to hear from you if you’ve deployed Corso with a different storage class, an object storage provider not +listed above, or have questions about how to best cost-optimize your setup. Come [find us on Discord](https://discord.gg/63DTTSnuhT)! diff --git a/website/blog/images/boxes_web.jpeg b/website/blog/images/boxes_web.jpeg new file mode 100644 index 000000000..840e96526 Binary files /dev/null and b/website/blog/images/boxes_web.jpeg differ diff --git a/website/docs/setup/configuration.md b/website/docs/setup/configuration.md index 85c99c6bb..d9255f6b7 100644 --- a/website/docs/setup/configuration.md +++ b/website/docs/setup/configuration.md @@ -129,7 +129,9 @@ directory within the container. ## Log Files +Corso generates a unique log file named with its timestamp for every invocation. The default location of Corso's log file is shown below but the location can be overridden by using the `--log-file` flag. +The log file will be appended to if multiple Corso invocations are pointed to the same file. You can also use `stdout` or `stderr` as the `--log-file` location to redirect the logs to "stdout" and "stderr" respectively. diff --git a/website/package-lock.json b/website/package-lock.json index 11a0d15e1..4ef68bb92 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -8,9 +8,9 @@ "name": "docs", "version": "0.1.0", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/plugin-google-gtag": "^2.3.0", - "@docusaurus/preset-classic": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/plugin-google-gtag": "^2.3.1", + "@docusaurus/preset-classic": "2.3.1", "@loadable/component": "^5.15.3", "@mdx-js/react": "^1.6.22", "animate.css": "^4.1.1", @@ -24,12 +24,12 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.57.1", + "sass": "^1.58.0", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" }, "devDependencies": { - "@docusaurus/module-type-aliases": "2.3.0", + "@docusaurus/module-type-aliases": "2.3.1", "@iconify/react": "^4.1.0", "autoprefixer": "^10.4.13", "postcss": "^8.4.21", @@ -1988,9 +1988,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.3.0.tgz", - "integrity": "sha512-2AU5HfKyExO+/mi41SBnx5uY0aGZFXr3D93wntBY4lN1gsDKUpi7EE4lPBAXm9CoH4Pw6N24yDHy9CPR3sh/uA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.3.1.tgz", + "integrity": "sha512-0Jd4jtizqnRAr7svWaBbbrCCN8mzBNd2xFLoT/IM7bGfFie5y58oz97KzXliwiLY3zWjqMXjQcuP1a5VgCv2JA==", "dependencies": { "@babel/core": "^7.18.6", "@babel/generator": "^7.18.7", @@ -2002,13 +2002,13 @@ "@babel/runtime": "^7.18.6", "@babel/runtime-corejs3": "^7.18.6", "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", + "@docusaurus/cssnano-preset": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.2.1", "autoprefixer": "^10.4.7", @@ -2029,7 +2029,7 @@ "del": "^6.1.1", "detect-port": "^1.3.0", "escape-html": "^1.0.3", - "eta": "^1.12.3", + "eta": "^2.0.0", "file-loader": "^6.2.0", "fs-extra": "^10.1.0", "html-minifier-terser": "^6.1.0", @@ -2146,9 +2146,9 @@ } }, "node_modules/@docusaurus/cssnano-preset": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.3.0.tgz", - "integrity": "sha512-igmsXc1Q95lMeq07A1xua0/5wOPygDQ/ENSV7VVbiGhnvMv4gzkba8ZvbAtc7PmqK+kpYRfPzNCOk0GnQCvibg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.3.1.tgz", + "integrity": "sha512-7mIhAROES6CY1GmCjR4CZkUfjTL6B3u6rKHK0ChQl2d1IevYXq/k/vFgvOrJfcKxiObpMnE9+X6R2Wt1KqxC6w==", "dependencies": { "cssnano-preset-advanced": "^5.3.8", "postcss": "^8.4.14", @@ -2160,9 +2160,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.3.0.tgz", - "integrity": "sha512-GO8s+FJpNT0vwt6kr/BZ/B1iB8EgHH/CF590i55Epy3TP2baQHGEHcAnQWvz5067OXIEke7Sa8sUNi0V9FrcJw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.3.1.tgz", + "integrity": "sha512-2lAV/olKKVr9qJhfHFCaqBIl8FgYjbUFwgUnX76+cULwQYss+42ZQ3grHGFvI0ocN2X55WcYe64ellQXz7suqg==", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.4.0" @@ -2236,14 +2236,14 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.3.0.tgz", - "integrity": "sha512-uxownG7dlg/l19rTIfUP0KDsbI8lTCgziWsdubMcWpGvOgXgm1p4mKSmWPzAwkRENn+un4L8DBhl3j1toeJy1A==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.3.1.tgz", + "integrity": "sha512-Gzga7OsxQRpt3392K9lv/bW4jGppdLFJh3luKRknCKSAaZrmVkOQv2gvCn8LAOSZ3uRg5No7AgYs/vpL8K94lA==", "dependencies": { "@babel/parser": "^7.18.8", "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/logger": "2.3.1", + "@docusaurus/utils": "2.3.1", "@mdx-js/mdx": "^1.6.22", "escape-html": "^1.0.3", "file-loader": "^6.2.0", @@ -2267,12 +2267,12 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.3.0.tgz", - "integrity": "sha512-DvJtVejgrgIgxSNZ0pRaVu4EndRVBgbtp1LKvIO4xBgKlrsq8o4qkj1HKwH6yok5NoMqGApu8/E0KPOdZBtDpQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.3.1.tgz", + "integrity": "sha512-6KkxfAVOJqIUynTRb/tphYCl+co3cP0PlHiMDbi+SzmYxMdgIrwYqH9yAnGSDoN6Jk2ZE/JY/Azs/8LPgKP48A==", "dependencies": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.3.0", + "@docusaurus/types": "2.3.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2286,17 +2286,17 @@ } }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.3.0.tgz", - "integrity": "sha512-/v+nWEaqRxH1U4I6uJIMdj8Iilrh0XwIG5vsmsi4AXbpArgqqyfMjbf70lzPOmSdYfdWYgb7tWcA6OhJqyKj0w==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.3.1.tgz", + "integrity": "sha512-f5LjqX+9WkiLyGiQ41x/KGSJ/9bOjSD8lsVhPvYeUYHCtYpuiDKfhZE07O4EqpHkBx4NQdtQDbp+aptgHSTuiw==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^10.1.0", @@ -2316,17 +2316,17 @@ } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.3.0.tgz", - "integrity": "sha512-P53gYvtPY/VJTMdV5pFnKv8d7qMBOPyu/4NPREQU5PWsXJOYedCwNBqdAR7A5P69l55TrzyUEUYLjIcwuoSPGg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.3.1.tgz", + "integrity": "sha512-DxztTOBEruv7qFxqUtbsqXeNcHqcVEIEe+NQoI1oi2DBmKBhW/o0MIal8lt+9gvmpx3oYtlwmLOOGepxZgJGkw==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/module-type-aliases": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/module-type-aliases": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "@types/react-router-config": "^5.0.6", "combine-promises": "^1.1.0", "fs-extra": "^10.1.0", @@ -2346,15 +2346,15 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.3.0.tgz", - "integrity": "sha512-H21Ux3Ln+pXlcp0RGdD1fyes7H3tsyhFpeflkxnCoXfTQf/pQB9IMuddFnxuXzj+34rp6jAQmLSaPssuixJXRQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.3.1.tgz", + "integrity": "sha512-E80UL6hvKm5VVw8Ka8YaVDtO6kWWDVUK4fffGvkpQ/AJQDOg99LwOXKujPoICC22nUFTsZ2Hp70XvpezCsFQaA==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "fs-extra": "^10.1.0", "tslib": "^2.4.0", "webpack": "^5.73.0" @@ -2368,13 +2368,13 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.3.0.tgz", - "integrity": "sha512-TyeH3DMA9/8sIXyX8+zpdLtSixBnLJjW9KSvncKj/iXs1t20tpUZ1WFL7D+G1gxGGbLCBUGDluh738VvsRHC6Q==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.3.1.tgz", + "integrity": "sha512-Ujpml1Ppg4geB/2hyu2diWnO49az9U2bxM9Shen7b6qVcyFisNJTkVG2ocvLC7wM1efTJcUhBO6zAku2vKJGMw==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", "fs-extra": "^10.1.0", "react-json-view": "^1.21.3", "tslib": "^2.4.0" @@ -2388,13 +2388,13 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.3.0.tgz", - "integrity": "sha512-Z9FqTQzeOC1R6i/x07VgkrTKpQ4OtMe3WBOKZKzgldWXJr6CDUWPSR8pfDEjA+RRAj8ajUh0E+BliKBmFILQvQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.3.1.tgz", + "integrity": "sha512-OHip0GQxKOFU8n7gkt3TM4HOYTXPCFDjqKbMClDD3KaDnyTuMp/Zvd9HSr770lLEscgPWIvzhJByRAClqsUWiQ==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "tslib": "^2.4.0" }, "engines": { @@ -2406,13 +2406,13 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.3.0.tgz", - "integrity": "sha512-oZavqtfwQAGjz+Dyhsb45mVssTevCW1PJgLcmr3WKiID15GTolbBrrp/fueTrEh60DzOd81HbiCLs56JWBwDhQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.3.1.tgz", + "integrity": "sha512-uXtDhfu4+Hm+oqWUySr3DNI5cWC/rmP6XJyAk83Heor3dFjZqDwCbkX8yWPywkRiWev3Dk/rVF8lEn0vIGVocA==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "tslib": "^2.4.0" }, "engines": { @@ -2424,13 +2424,13 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.3.0.tgz", - "integrity": "sha512-toAhuMX1h+P2CfavwoDlz9s2/Zm7caiEznW/inxq3izywG2l9ujWI/o6u2g70O3ACQ19eHMGHDsyEUcRDPrxBw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.3.1.tgz", + "integrity": "sha512-Ww2BPEYSqg8q8tJdLYPFFM3FMDBCVhEM4UUqKzJaiRMx3NEoly3qqDRAoRDGdIhlC//Rf0iJV9cWAoq2m6k3sw==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "tslib": "^2.4.0" }, "engines": { @@ -2442,16 +2442,16 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.3.0.tgz", - "integrity": "sha512-kwIHLP6lyubWOnNO0ejwjqdxB9C6ySnATN61etd6iwxHri5+PBZCEOv1sVm5U1gfQiDR1sVsXnJq2zNwLwgEtQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.3.1.tgz", + "integrity": "sha512-8Yxile/v6QGYV9vgFiYL+8d2N4z4Er3pSHsrD08c5XI8bUXxTppMwjarDUTH/TRTfgAWotRbhJ6WZLyajLpozA==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "fs-extra": "^10.1.0", "sitemap": "^7.1.1", "tslib": "^2.4.0" @@ -2465,23 +2465,23 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.3.0.tgz", - "integrity": "sha512-mI37ieJe7cs5dHuvWz415U7hO209Q19Fp4iSHeFFgtQoK1PiRg7HJHkVbEsLZII2MivdzGFB5Hxoq2wUPWdNEA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.3.1.tgz", + "integrity": "sha512-OQ5W0AHyfdUk0IldwJ3BlnZ1EqoJuu2L2BMhqLbqwNWdkmzmSUvlFLH1Pe7CZSQgB2YUUC/DnmjbPKk/qQD0lQ==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/plugin-content-blog": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/plugin-content-pages": "2.3.0", - "@docusaurus/plugin-debug": "2.3.0", - "@docusaurus/plugin-google-analytics": "2.3.0", - "@docusaurus/plugin-google-gtag": "2.3.0", - "@docusaurus/plugin-google-tag-manager": "2.3.0", - "@docusaurus/plugin-sitemap": "2.3.0", - "@docusaurus/theme-classic": "2.3.0", - "@docusaurus/theme-common": "2.3.0", - "@docusaurus/theme-search-algolia": "2.3.0", - "@docusaurus/types": "2.3.0" + "@docusaurus/core": "2.3.1", + "@docusaurus/plugin-content-blog": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/plugin-content-pages": "2.3.1", + "@docusaurus/plugin-debug": "2.3.1", + "@docusaurus/plugin-google-analytics": "2.3.1", + "@docusaurus/plugin-google-gtag": "2.3.1", + "@docusaurus/plugin-google-tag-manager": "2.3.1", + "@docusaurus/plugin-sitemap": "2.3.1", + "@docusaurus/theme-classic": "2.3.1", + "@docusaurus/theme-common": "2.3.1", + "@docusaurus/theme-search-algolia": "2.3.1", + "@docusaurus/types": "2.3.1" }, "engines": { "node": ">=16.14" @@ -2505,22 +2505,22 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.3.0.tgz", - "integrity": "sha512-x2h9KZ4feo22b1aArsfqvK05aDCgTkLZGRgAPY/9TevFV5/Yy19cZtBOCbzaKa2dKq1ofBRK9Hm1DdLJdLB14A==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.3.1.tgz", + "integrity": "sha512-SelSIDvyttb7ZYHj8vEUhqykhAqfOPKk+uP0z85jH72IMC58e7O8DIlcAeBv+CWsLbNIl9/Hcg71X0jazuxJug==", "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/module-type-aliases": "2.3.0", - "@docusaurus/plugin-content-blog": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/plugin-content-pages": "2.3.0", - "@docusaurus/theme-common": "2.3.0", - "@docusaurus/theme-translations": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/module-type-aliases": "2.3.1", + "@docusaurus/plugin-content-blog": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/plugin-content-pages": "2.3.1", + "@docusaurus/theme-common": "2.3.1", + "@docusaurus/theme-translations": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "@mdx-js/react": "^1.6.22", "clsx": "^1.2.1", "copy-text-to-clipboard": "^3.0.1", @@ -2544,16 +2544,16 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.3.0.tgz", - "integrity": "sha512-1eAvaULgu6ywHbjkdWOOHl1PdMylne/88i0kg25qimmkMgRHoIQ23JgRD/q5sFr+2YX7U7SggR1UNNsqu2zZPw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.3.1.tgz", + "integrity": "sha512-RYmYl2OR2biO+yhmW1aS5FyEvnrItPINa+0U2dMxcHpah8reSCjQ9eJGRmAgkZFchV1+aIQzXOI1K7LCW38O0g==", "dependencies": { - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/module-type-aliases": "2.3.0", - "@docusaurus/plugin-content-blog": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/plugin-content-pages": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/module-type-aliases": "2.3.1", + "@docusaurus/plugin-content-blog": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/plugin-content-pages": "2.3.1", + "@docusaurus/utils": "2.3.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2573,22 +2573,22 @@ } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.3.0.tgz", - "integrity": "sha512-/i5k1NAlbYvgnw69vJQA174+ipwdtTCCUvxRp7bVZ+8KmviEybAC/kuKe7WmiUbIGVYbAbwYaEsPuVnsd65DrA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.3.1.tgz", + "integrity": "sha512-JdHaRqRuH1X++g5fEMLnq7OtULSGQdrs9AbhcWRQ428ZB8/HOiaN6mj3hzHvcD3DFgu7koIVtWPQnvnN7iwzHA==", "dependencies": { "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/theme-common": "2.3.0", - "@docusaurus/theme-translations": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/theme-common": "2.3.1", + "@docusaurus/theme-translations": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "algoliasearch": "^4.13.1", "algoliasearch-helper": "^3.10.0", "clsx": "^1.2.1", - "eta": "^1.12.3", + "eta": "^2.0.0", "fs-extra": "^10.1.0", "lodash": "^4.17.21", "tslib": "^2.4.0", @@ -2603,9 +2603,9 @@ } }, "node_modules/@docusaurus/theme-translations": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.3.0.tgz", - "integrity": "sha512-YLVD6LrszBld1EvThTOa9PcblKAZs1jOmRjwtffdg1CGjQWFXEeWUL24n2M4ARByzuLry5D8ZRVmKyRt3LOwsw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.3.1.tgz", + "integrity": "sha512-BsBZzAewJabVhoGG1Ij2u4pMS3MPW6gZ6sS4pc+Y7czevRpzxoFNJXRtQDVGe7mOpv/MmRmqg4owDK+lcOTCVQ==", "dependencies": { "fs-extra": "^10.1.0", "tslib": "^2.4.0" @@ -2615,9 +2615,9 @@ } }, "node_modules/@docusaurus/types": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.3.0.tgz", - "integrity": "sha512-c5C0nROxVFsgMAm4vWDB1LDv3v4K18Y8eVxazL3dEr7w+7kNLc5koWrW7fWmCnrbItnuTna4nLS2PcSZrkYidg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.3.1.tgz", + "integrity": "sha512-PREbIRhTaNNY042qmfSE372Jb7djZt+oVTZkoqHJ8eff8vOIc2zqqDqBVc5BhOfpZGPTrE078yy/torUEZy08A==", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*", @@ -2634,11 +2634,11 @@ } }, "node_modules/@docusaurus/utils": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.3.0.tgz", - "integrity": "sha512-6+GCurDsePHHbLM3ktcjv8N4zrjgrl1O7gOQNG4UMktcwHssFFVm+geVcB4M8siOmwUjV2VaNrp0hpGy8DOQHw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.3.1.tgz", + "integrity": "sha512-9WcQROCV0MmrpOQDXDGhtGMd52DHpSFbKLfkyaYumzbTstrbA5pPOtiGtxK1nqUHkiIv8UwexS54p0Vod2I1lg==", "dependencies": { - "@docusaurus/logger": "2.3.0", + "@docusaurus/logger": "2.3.1", "@svgr/webpack": "^6.2.1", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -2668,9 +2668,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.3.0.tgz", - "integrity": "sha512-nu5An+26FS7SQTwvyFR4g9lw3NU1u2RLcxJPZF+NCOG8Ne96ciuQosa7+N1kllm/heEJqfTaAUD0sFxpTZrDtw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.3.1.tgz", + "integrity": "sha512-pVlRpXkdNcxmKNxAaB1ya2hfCEvVsLDp2joeM6K6uv55Oc5nVIqgyYSgSNKZyMdw66NnvMfsu0RBylcwZQKo9A==", "dependencies": { "tslib": "^2.4.0" }, @@ -2687,12 +2687,12 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.3.0.tgz", - "integrity": "sha512-TBJCLqwAoiQQJ6dbgBpuLvzsn/XiTgbZkd6eJFUIQYLb1d473Zv58QrHXVmVQDLWiCgmJpHW2LpMfumTpCDgJw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.3.1.tgz", + "integrity": "sha512-7n0208IG3k1HVTByMHlZoIDjjOFC8sbViHVXJx0r3Q+3Ezrx+VQ1RZ/zjNn6lT+QBCRCXlnlaoJ8ug4HIVgQ3w==", "dependencies": { - "@docusaurus/logger": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/logger": "2.3.1", + "@docusaurus/utils": "2.3.1", "joi": "^17.6.0", "js-yaml": "^4.1.0", "tslib": "^2.4.0" @@ -6719,10 +6719,9 @@ } }, "node_modules/eta": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/eta/-/eta-1.12.3.tgz", - "integrity": "sha512-qHixwbDLtekO/d51Yr4glcaUJCIjGVJyTzuqV4GPlgZo1YpgOKG+avQynErZIYrfM6JIJdtiG2Kox8tbb+DoGg==", - "license": "MIT", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.0.0.tgz", + "integrity": "sha512-NqE7S2VmVwgMS8yBxsH4VgNQjNjLq1gfGU0u9I6Cjh468nPRMoDfGdK9n1p/3Dvsw3ebklDkZsFAnKJ9sefjBA==", "engines": { "node": ">=6.0.0" }, @@ -7999,10 +7998,9 @@ } }, "node_modules/http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==", - "license": "BSD-2-Clause" + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" }, "node_modules/http-deceiver": { "version": "1.2.7", @@ -11861,9 +11859,9 @@ "license": "MIT" }, "node_modules/sass": { - "version": "1.57.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.57.1.tgz", - "integrity": "sha512-O2+LwLS79op7GI0xZ8fqzF7X2m/m8WFfI02dHOdsK5R2ECeS5F62zrwg/relM1rjSLy7Vd/DiMNIvPrQGsA0jw==", + "version": "1.58.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.58.0.tgz", + "integrity": "sha512-PiMJcP33DdKtZ/1jSjjqVIKihoDc6yWmYr9K/4r3fVVIEDAluD0q7XZiRKrNJcPK3qkLRF/79DND1H5q1LBjgg==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -15787,9 +15785,9 @@ } }, "@docusaurus/core": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.3.0.tgz", - "integrity": "sha512-2AU5HfKyExO+/mi41SBnx5uY0aGZFXr3D93wntBY4lN1gsDKUpi7EE4lPBAXm9CoH4Pw6N24yDHy9CPR3sh/uA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.3.1.tgz", + "integrity": "sha512-0Jd4jtizqnRAr7svWaBbbrCCN8mzBNd2xFLoT/IM7bGfFie5y58oz97KzXliwiLY3zWjqMXjQcuP1a5VgCv2JA==", "requires": { "@babel/core": "^7.18.6", "@babel/generator": "^7.18.7", @@ -15801,13 +15799,13 @@ "@babel/runtime": "^7.18.6", "@babel/runtime-corejs3": "^7.18.6", "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", + "@docusaurus/cssnano-preset": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.2.1", "autoprefixer": "^10.4.7", @@ -15828,7 +15826,7 @@ "del": "^6.1.1", "detect-port": "^1.3.0", "escape-html": "^1.0.3", - "eta": "^1.12.3", + "eta": "^2.0.0", "file-loader": "^6.2.0", "fs-extra": "^10.1.0", "html-minifier-terser": "^6.1.0", @@ -15910,9 +15908,9 @@ } }, "@docusaurus/cssnano-preset": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.3.0.tgz", - "integrity": "sha512-igmsXc1Q95lMeq07A1xua0/5wOPygDQ/ENSV7VVbiGhnvMv4gzkba8ZvbAtc7PmqK+kpYRfPzNCOk0GnQCvibg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.3.1.tgz", + "integrity": "sha512-7mIhAROES6CY1GmCjR4CZkUfjTL6B3u6rKHK0ChQl2d1IevYXq/k/vFgvOrJfcKxiObpMnE9+X6R2Wt1KqxC6w==", "requires": { "cssnano-preset-advanced": "^5.3.8", "postcss": "^8.4.14", @@ -15921,9 +15919,9 @@ } }, "@docusaurus/logger": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.3.0.tgz", - "integrity": "sha512-GO8s+FJpNT0vwt6kr/BZ/B1iB8EgHH/CF590i55Epy3TP2baQHGEHcAnQWvz5067OXIEke7Sa8sUNi0V9FrcJw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.3.1.tgz", + "integrity": "sha512-2lAV/olKKVr9qJhfHFCaqBIl8FgYjbUFwgUnX76+cULwQYss+42ZQ3grHGFvI0ocN2X55WcYe64ellQXz7suqg==", "requires": { "chalk": "^4.1.2", "tslib": "^2.4.0" @@ -15975,14 +15973,14 @@ } }, "@docusaurus/mdx-loader": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.3.0.tgz", - "integrity": "sha512-uxownG7dlg/l19rTIfUP0KDsbI8lTCgziWsdubMcWpGvOgXgm1p4mKSmWPzAwkRENn+un4L8DBhl3j1toeJy1A==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.3.1.tgz", + "integrity": "sha512-Gzga7OsxQRpt3392K9lv/bW4jGppdLFJh3luKRknCKSAaZrmVkOQv2gvCn8LAOSZ3uRg5No7AgYs/vpL8K94lA==", "requires": { "@babel/parser": "^7.18.8", "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/logger": "2.3.1", + "@docusaurus/utils": "2.3.1", "@mdx-js/mdx": "^1.6.22", "escape-html": "^1.0.3", "file-loader": "^6.2.0", @@ -15999,12 +15997,12 @@ } }, "@docusaurus/module-type-aliases": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.3.0.tgz", - "integrity": "sha512-DvJtVejgrgIgxSNZ0pRaVu4EndRVBgbtp1LKvIO4xBgKlrsq8o4qkj1HKwH6yok5NoMqGApu8/E0KPOdZBtDpQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.3.1.tgz", + "integrity": "sha512-6KkxfAVOJqIUynTRb/tphYCl+co3cP0PlHiMDbi+SzmYxMdgIrwYqH9yAnGSDoN6Jk2ZE/JY/Azs/8LPgKP48A==", "requires": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.3.0", + "@docusaurus/types": "2.3.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -16014,17 +16012,17 @@ } }, "@docusaurus/plugin-content-blog": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.3.0.tgz", - "integrity": "sha512-/v+nWEaqRxH1U4I6uJIMdj8Iilrh0XwIG5vsmsi4AXbpArgqqyfMjbf70lzPOmSdYfdWYgb7tWcA6OhJqyKj0w==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.3.1.tgz", + "integrity": "sha512-f5LjqX+9WkiLyGiQ41x/KGSJ/9bOjSD8lsVhPvYeUYHCtYpuiDKfhZE07O4EqpHkBx4NQdtQDbp+aptgHSTuiw==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^10.1.0", @@ -16037,17 +16035,17 @@ } }, "@docusaurus/plugin-content-docs": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.3.0.tgz", - "integrity": "sha512-P53gYvtPY/VJTMdV5pFnKv8d7qMBOPyu/4NPREQU5PWsXJOYedCwNBqdAR7A5P69l55TrzyUEUYLjIcwuoSPGg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.3.1.tgz", + "integrity": "sha512-DxztTOBEruv7qFxqUtbsqXeNcHqcVEIEe+NQoI1oi2DBmKBhW/o0MIal8lt+9gvmpx3oYtlwmLOOGepxZgJGkw==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/module-type-aliases": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/module-type-aliases": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "@types/react-router-config": "^5.0.6", "combine-promises": "^1.1.0", "fs-extra": "^10.1.0", @@ -16060,100 +16058,100 @@ } }, "@docusaurus/plugin-content-pages": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.3.0.tgz", - "integrity": "sha512-H21Ux3Ln+pXlcp0RGdD1fyes7H3tsyhFpeflkxnCoXfTQf/pQB9IMuddFnxuXzj+34rp6jAQmLSaPssuixJXRQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.3.1.tgz", + "integrity": "sha512-E80UL6hvKm5VVw8Ka8YaVDtO6kWWDVUK4fffGvkpQ/AJQDOg99LwOXKujPoICC22nUFTsZ2Hp70XvpezCsFQaA==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "fs-extra": "^10.1.0", "tslib": "^2.4.0", "webpack": "^5.73.0" } }, "@docusaurus/plugin-debug": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.3.0.tgz", - "integrity": "sha512-TyeH3DMA9/8sIXyX8+zpdLtSixBnLJjW9KSvncKj/iXs1t20tpUZ1WFL7D+G1gxGGbLCBUGDluh738VvsRHC6Q==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.3.1.tgz", + "integrity": "sha512-Ujpml1Ppg4geB/2hyu2diWnO49az9U2bxM9Shen7b6qVcyFisNJTkVG2ocvLC7wM1efTJcUhBO6zAku2vKJGMw==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", "fs-extra": "^10.1.0", "react-json-view": "^1.21.3", "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-analytics": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.3.0.tgz", - "integrity": "sha512-Z9FqTQzeOC1R6i/x07VgkrTKpQ4OtMe3WBOKZKzgldWXJr6CDUWPSR8pfDEjA+RRAj8ajUh0E+BliKBmFILQvQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.3.1.tgz", + "integrity": "sha512-OHip0GQxKOFU8n7gkt3TM4HOYTXPCFDjqKbMClDD3KaDnyTuMp/Zvd9HSr770lLEscgPWIvzhJByRAClqsUWiQ==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-gtag": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.3.0.tgz", - "integrity": "sha512-oZavqtfwQAGjz+Dyhsb45mVssTevCW1PJgLcmr3WKiID15GTolbBrrp/fueTrEh60DzOd81HbiCLs56JWBwDhQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.3.1.tgz", + "integrity": "sha512-uXtDhfu4+Hm+oqWUySr3DNI5cWC/rmP6XJyAk83Heor3dFjZqDwCbkX8yWPywkRiWev3Dk/rVF8lEn0vIGVocA==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-tag-manager": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.3.0.tgz", - "integrity": "sha512-toAhuMX1h+P2CfavwoDlz9s2/Zm7caiEznW/inxq3izywG2l9ujWI/o6u2g70O3ACQ19eHMGHDsyEUcRDPrxBw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.3.1.tgz", + "integrity": "sha512-Ww2BPEYSqg8q8tJdLYPFFM3FMDBCVhEM4UUqKzJaiRMx3NEoly3qqDRAoRDGdIhlC//Rf0iJV9cWAoq2m6k3sw==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "tslib": "^2.4.0" } }, "@docusaurus/plugin-sitemap": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.3.0.tgz", - "integrity": "sha512-kwIHLP6lyubWOnNO0ejwjqdxB9C6ySnATN61etd6iwxHri5+PBZCEOv1sVm5U1gfQiDR1sVsXnJq2zNwLwgEtQ==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.3.1.tgz", + "integrity": "sha512-8Yxile/v6QGYV9vgFiYL+8d2N4z4Er3pSHsrD08c5XI8bUXxTppMwjarDUTH/TRTfgAWotRbhJ6WZLyajLpozA==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "fs-extra": "^10.1.0", "sitemap": "^7.1.1", "tslib": "^2.4.0" } }, "@docusaurus/preset-classic": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.3.0.tgz", - "integrity": "sha512-mI37ieJe7cs5dHuvWz415U7hO209Q19Fp4iSHeFFgtQoK1PiRg7HJHkVbEsLZII2MivdzGFB5Hxoq2wUPWdNEA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.3.1.tgz", + "integrity": "sha512-OQ5W0AHyfdUk0IldwJ3BlnZ1EqoJuu2L2BMhqLbqwNWdkmzmSUvlFLH1Pe7CZSQgB2YUUC/DnmjbPKk/qQD0lQ==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/plugin-content-blog": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/plugin-content-pages": "2.3.0", - "@docusaurus/plugin-debug": "2.3.0", - "@docusaurus/plugin-google-analytics": "2.3.0", - "@docusaurus/plugin-google-gtag": "2.3.0", - "@docusaurus/plugin-google-tag-manager": "2.3.0", - "@docusaurus/plugin-sitemap": "2.3.0", - "@docusaurus/theme-classic": "2.3.0", - "@docusaurus/theme-common": "2.3.0", - "@docusaurus/theme-search-algolia": "2.3.0", - "@docusaurus/types": "2.3.0" + "@docusaurus/core": "2.3.1", + "@docusaurus/plugin-content-blog": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/plugin-content-pages": "2.3.1", + "@docusaurus/plugin-debug": "2.3.1", + "@docusaurus/plugin-google-analytics": "2.3.1", + "@docusaurus/plugin-google-gtag": "2.3.1", + "@docusaurus/plugin-google-tag-manager": "2.3.1", + "@docusaurus/plugin-sitemap": "2.3.1", + "@docusaurus/theme-classic": "2.3.1", + "@docusaurus/theme-common": "2.3.1", + "@docusaurus/theme-search-algolia": "2.3.1", + "@docusaurus/types": "2.3.1" } }, "@docusaurus/react-loadable": { @@ -16166,22 +16164,22 @@ } }, "@docusaurus/theme-classic": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.3.0.tgz", - "integrity": "sha512-x2h9KZ4feo22b1aArsfqvK05aDCgTkLZGRgAPY/9TevFV5/Yy19cZtBOCbzaKa2dKq1ofBRK9Hm1DdLJdLB14A==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.3.1.tgz", + "integrity": "sha512-SelSIDvyttb7ZYHj8vEUhqykhAqfOPKk+uP0z85jH72IMC58e7O8DIlcAeBv+CWsLbNIl9/Hcg71X0jazuxJug==", "requires": { - "@docusaurus/core": "2.3.0", - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/module-type-aliases": "2.3.0", - "@docusaurus/plugin-content-blog": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/plugin-content-pages": "2.3.0", - "@docusaurus/theme-common": "2.3.0", - "@docusaurus/theme-translations": "2.3.0", - "@docusaurus/types": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-common": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/module-type-aliases": "2.3.1", + "@docusaurus/plugin-content-blog": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/plugin-content-pages": "2.3.1", + "@docusaurus/theme-common": "2.3.1", + "@docusaurus/theme-translations": "2.3.1", + "@docusaurus/types": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-common": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "@mdx-js/react": "^1.6.22", "clsx": "^1.2.1", "copy-text-to-clipboard": "^3.0.1", @@ -16198,16 +16196,16 @@ } }, "@docusaurus/theme-common": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.3.0.tgz", - "integrity": "sha512-1eAvaULgu6ywHbjkdWOOHl1PdMylne/88i0kg25qimmkMgRHoIQ23JgRD/q5sFr+2YX7U7SggR1UNNsqu2zZPw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.3.1.tgz", + "integrity": "sha512-RYmYl2OR2biO+yhmW1aS5FyEvnrItPINa+0U2dMxcHpah8reSCjQ9eJGRmAgkZFchV1+aIQzXOI1K7LCW38O0g==", "requires": { - "@docusaurus/mdx-loader": "2.3.0", - "@docusaurus/module-type-aliases": "2.3.0", - "@docusaurus/plugin-content-blog": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/plugin-content-pages": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/mdx-loader": "2.3.1", + "@docusaurus/module-type-aliases": "2.3.1", + "@docusaurus/plugin-content-blog": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/plugin-content-pages": "2.3.1", + "@docusaurus/utils": "2.3.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -16220,22 +16218,22 @@ } }, "@docusaurus/theme-search-algolia": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.3.0.tgz", - "integrity": "sha512-/i5k1NAlbYvgnw69vJQA174+ipwdtTCCUvxRp7bVZ+8KmviEybAC/kuKe7WmiUbIGVYbAbwYaEsPuVnsd65DrA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.3.1.tgz", + "integrity": "sha512-JdHaRqRuH1X++g5fEMLnq7OtULSGQdrs9AbhcWRQ428ZB8/HOiaN6mj3hzHvcD3DFgu7koIVtWPQnvnN7iwzHA==", "requires": { "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.3.0", - "@docusaurus/logger": "2.3.0", - "@docusaurus/plugin-content-docs": "2.3.0", - "@docusaurus/theme-common": "2.3.0", - "@docusaurus/theme-translations": "2.3.0", - "@docusaurus/utils": "2.3.0", - "@docusaurus/utils-validation": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/logger": "2.3.1", + "@docusaurus/plugin-content-docs": "2.3.1", + "@docusaurus/theme-common": "2.3.1", + "@docusaurus/theme-translations": "2.3.1", + "@docusaurus/utils": "2.3.1", + "@docusaurus/utils-validation": "2.3.1", "algoliasearch": "^4.13.1", "algoliasearch-helper": "^3.10.0", "clsx": "^1.2.1", - "eta": "^1.12.3", + "eta": "^2.0.0", "fs-extra": "^10.1.0", "lodash": "^4.17.21", "tslib": "^2.4.0", @@ -16243,18 +16241,18 @@ } }, "@docusaurus/theme-translations": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.3.0.tgz", - "integrity": "sha512-YLVD6LrszBld1EvThTOa9PcblKAZs1jOmRjwtffdg1CGjQWFXEeWUL24n2M4ARByzuLry5D8ZRVmKyRt3LOwsw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.3.1.tgz", + "integrity": "sha512-BsBZzAewJabVhoGG1Ij2u4pMS3MPW6gZ6sS4pc+Y7czevRpzxoFNJXRtQDVGe7mOpv/MmRmqg4owDK+lcOTCVQ==", "requires": { "fs-extra": "^10.1.0", "tslib": "^2.4.0" } }, "@docusaurus/types": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.3.0.tgz", - "integrity": "sha512-c5C0nROxVFsgMAm4vWDB1LDv3v4K18Y8eVxazL3dEr7w+7kNLc5koWrW7fWmCnrbItnuTna4nLS2PcSZrkYidg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.3.1.tgz", + "integrity": "sha512-PREbIRhTaNNY042qmfSE372Jb7djZt+oVTZkoqHJ8eff8vOIc2zqqDqBVc5BhOfpZGPTrE078yy/torUEZy08A==", "requires": { "@types/history": "^4.7.11", "@types/react": "*", @@ -16267,11 +16265,11 @@ } }, "@docusaurus/utils": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.3.0.tgz", - "integrity": "sha512-6+GCurDsePHHbLM3ktcjv8N4zrjgrl1O7gOQNG4UMktcwHssFFVm+geVcB4M8siOmwUjV2VaNrp0hpGy8DOQHw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.3.1.tgz", + "integrity": "sha512-9WcQROCV0MmrpOQDXDGhtGMd52DHpSFbKLfkyaYumzbTstrbA5pPOtiGtxK1nqUHkiIv8UwexS54p0Vod2I1lg==", "requires": { - "@docusaurus/logger": "2.3.0", + "@docusaurus/logger": "2.3.1", "@svgr/webpack": "^6.2.1", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -16297,20 +16295,20 @@ } }, "@docusaurus/utils-common": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.3.0.tgz", - "integrity": "sha512-nu5An+26FS7SQTwvyFR4g9lw3NU1u2RLcxJPZF+NCOG8Ne96ciuQosa7+N1kllm/heEJqfTaAUD0sFxpTZrDtw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.3.1.tgz", + "integrity": "sha512-pVlRpXkdNcxmKNxAaB1ya2hfCEvVsLDp2joeM6K6uv55Oc5nVIqgyYSgSNKZyMdw66NnvMfsu0RBylcwZQKo9A==", "requires": { "tslib": "^2.4.0" } }, "@docusaurus/utils-validation": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.3.0.tgz", - "integrity": "sha512-TBJCLqwAoiQQJ6dbgBpuLvzsn/XiTgbZkd6eJFUIQYLb1d473Zv58QrHXVmVQDLWiCgmJpHW2LpMfumTpCDgJw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.3.1.tgz", + "integrity": "sha512-7n0208IG3k1HVTByMHlZoIDjjOFC8sbViHVXJx0r3Q+3Ezrx+VQ1RZ/zjNn6lT+QBCRCXlnlaoJ8ug4HIVgQ3w==", "requires": { - "@docusaurus/logger": "2.3.0", - "@docusaurus/utils": "2.3.0", + "@docusaurus/logger": "2.3.1", + "@docusaurus/utils": "2.3.1", "joi": "^17.6.0", "js-yaml": "^4.1.0", "tslib": "^2.4.0" @@ -19066,9 +19064,9 @@ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" }, "eta": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/eta/-/eta-1.12.3.tgz", - "integrity": "sha512-qHixwbDLtekO/d51Yr4glcaUJCIjGVJyTzuqV4GPlgZo1YpgOKG+avQynErZIYrfM6JIJdtiG2Kox8tbb+DoGg==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.0.0.tgz", + "integrity": "sha512-NqE7S2VmVwgMS8yBxsH4VgNQjNjLq1gfGU0u9I6Cjh468nPRMoDfGdK9n1p/3Dvsw3ebklDkZsFAnKJ9sefjBA==" }, "etag": { "version": "1.8.1", @@ -19958,9 +19956,9 @@ } }, "http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" }, "http-deceiver": { "version": "1.2.7", @@ -22452,9 +22450,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sass": { - "version": "1.57.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.57.1.tgz", - "integrity": "sha512-O2+LwLS79op7GI0xZ8fqzF7X2m/m8WFfI02dHOdsK5R2ECeS5F62zrwg/relM1rjSLy7Vd/DiMNIvPrQGsA0jw==", + "version": "1.58.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.58.0.tgz", + "integrity": "sha512-PiMJcP33DdKtZ/1jSjjqVIKihoDc6yWmYr9K/4r3fVVIEDAluD0q7XZiRKrNJcPK3qkLRF/79DND1H5q1LBjgg==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", diff --git a/website/package.json b/website/package.json index 5d5670674..63f86b146 100644 --- a/website/package.json +++ b/website/package.json @@ -14,9 +14,9 @@ "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "@docusaurus/core": "2.3.0", - "@docusaurus/plugin-google-gtag": "^2.3.0", - "@docusaurus/preset-classic": "2.3.0", + "@docusaurus/core": "2.3.1", + "@docusaurus/plugin-google-gtag": "^2.3.1", + "@docusaurus/preset-classic": "2.3.1", "@loadable/component": "^5.15.3", "@mdx-js/react": "^1.6.22", "animate.css": "^4.1.1", @@ -30,12 +30,12 @@ "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", - "sass": "^1.57.1", + "sass": "^1.58.0", "tw-elements": "^1.0.0-alpha13", "wow.js": "^1.2.2" }, "devDependencies": { - "@docusaurus/module-type-aliases": "2.3.0", + "@docusaurus/module-type-aliases": "2.3.1", "@iconify/react": "^4.1.0", "autoprefixer": "^10.4.13", "postcss": "^8.4.21", diff --git a/website/styles/Vocab/Base/accept.txt b/website/styles/Vocab/Base/accept.txt index 69565b8a5..8831b2ca6 100644 --- a/website/styles/Vocab/Base/accept.txt +++ b/website/styles/Vocab/Base/accept.txt @@ -37,4 +37,6 @@ SLAs runbooks stdout stderr -backoff \ No newline at end of file +backoff +Greenlake +subfolder \ No newline at end of file