Merge branch 'main' into sharepoint-restore-selectors

This commit is contained in:
Danny 2023-02-04 16:49:28 -05:00 committed by GitHub
commit aafc9c73f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
110 changed files with 6912 additions and 2241 deletions

3
.gitignore vendored
View File

@ -20,6 +20,9 @@
.corso_test.toml
.corso.toml
# Logging
.corso.log
# Build directories
/bin
/docker/bin

View File

@ -10,6 +10,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Document Corso's fault-tolerance and restartability features
- Add retries on timeouts and status code 500 for Exchange
- Increase page size preference for delta requests for Exchange to reduce number of roundtrips
- OneDrive file/folder permissions can now be backed up and restored
- Add `--restore-permissions` flag to toggle restoration of OneDrive permissions
- Add versions to backups so that we can understand/handle older backup formats
### Fixed
- Backing up a calendar that has the same name as the default calendar
- Added additional backoff-retry to all OneDrive queries.
- Users with `null` userType values are no longer excluded from user queries.
### Known Issues
- When the same user has permissions to a file and the containing
folder, we only restore folder level permissions for the user and no
separate file only permission is restored.
- Link shares are not restored
## [v0.2.0] (alpha) - 2023-1-29
@ -18,7 +35,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Check if the user specified for an exchange backup operation has a mailbox.
### Changed
- Item.Attachments are disabled from being restored for the patching of ([#2353](https://github.com/alcionai/corso/issues/2353))
- BetaClient introduced. Enables Corso to be able to interact with SharePoint Page objects. Package located `/internal/connector/graph/betasdk`
- Handle case where user's drive has not been initialized
- Inline attachments (e.g. copy/paste ) are discovered and backed up correctly ([#2163](https://github.com/alcionai/corso/issues/2163))

View File

@ -79,6 +79,7 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
switch cmd.Use {
case createCommand:
c, fs = utils.AddCommand(cmd, oneDriveCreateCmd())
options.AddFeatureToggle(cmd, options.EnablePermissionsBackup())
c.Use = c.Use + " " + oneDriveServiceCommandCreateUseSuffix
c.Example = oneDriveServiceCommandCreateExamples

View File

@ -72,7 +72,13 @@ func (suite *NoBackupOneDriveIntegrationSuite) SetupSuite() {
suite.m365UserID = tester.M365UserID(t)
// init the repo first
suite.repo, err = repository.Initialize(ctx, suite.acct, suite.st, control.Options{})
suite.repo, err = repository.Initialize(
ctx,
suite.acct,
suite.st,
control.Options{
ToggleFeatures: control.Toggles{EnablePermissionsBackup: true},
})
require.NoError(t, err)
}
@ -152,7 +158,13 @@ func (suite *BackupDeleteOneDriveIntegrationSuite) SetupSuite() {
defer flush()
// init the repo first
suite.repo, err = repository.Initialize(ctx, suite.acct, suite.st, control.Options{})
suite.repo, err = repository.Initialize(
ctx,
suite.acct,
suite.st,
control.Options{
ToggleFeatures: control.Toggles{EnablePermissionsBackup: true},
})
require.NoError(t, err)
m365UserID := tester.M365UserID(t)

View File

@ -6,7 +6,9 @@ import (
"regexp"
"strings"
"github.com/alcionai/clues"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"github.com/alcionai/corso/src/cli/backup"
"github.com/alcionai/corso/src/cli/config"
@ -50,6 +52,13 @@ func preRun(cc *cobra.Command, args []string) error {
flagSl = append(flagSl, f)
}
avoidTheseCommands := []string{
"corso", "env", "help", "backup", "details", "list", "restore", "delete", "repo", "init", "connect",
}
if len(logger.LogFile) > 0 && !slices.Contains(avoidTheseCommands, cc.Use) {
print.Info(cc.Context(), "Logging to file: "+logger.LogFile)
}
log.Infow("cli command", "command", cc.CommandPath(), "flags", flagSl, "version", version.CurrentVersion())
return nil
@ -121,6 +130,9 @@ func Handle() {
}()
if err := corsoCmd.ExecuteContext(ctx); err != nil {
logger.Ctx(ctx).
With("err", err).
Errorw("cli execution", clues.InErr(err).Slice()...)
os.Exit(1)
}
}

View File

@ -11,17 +11,11 @@ import (
func Control() control.Options {
opt := control.Defaults()
if fastFail {
opt.FailFast = true
}
if noStats {
opt.DisableMetrics = true
}
if disableIncrementals {
opt.ToggleFeatures.DisableIncrementals = true
}
opt.FailFast = fastFail
opt.DisableMetrics = noStats
opt.RestorePermissions = restorePermissions
opt.ToggleFeatures.DisableIncrementals = disableIncrementals
opt.ToggleFeatures.EnablePermissionsBackup = enablePermissionsBackup
return opt
}
@ -31,8 +25,9 @@ func Control() control.Options {
// ---------------------------------------------------------------------------
var (
fastFail bool
noStats bool
fastFail bool
noStats bool
restorePermissions bool
)
// AddOperationFlags adds command-local operation flags
@ -49,11 +44,22 @@ func AddGlobalOperationFlags(cmd *cobra.Command) {
fs.BoolVar(&noStats, "no-stats", false, "disable anonymous usage statistics gathering")
}
// AddRestorePermissionsFlag adds OneDrive flag for restoring permissions
func AddRestorePermissionsFlag(cmd *cobra.Command) {
fs := cmd.Flags()
fs.BoolVar(&restorePermissions, "restore-permissions", false, "Restore permissions for files and folders")
// TODO: reveal this flag once backing up permissions becomes default
cobra.CheckErr(fs.MarkHidden("restore-permissions"))
}
// ---------------------------------------------------------------------------
// Feature Flags
// ---------------------------------------------------------------------------
var disableIncrementals bool
var (
disableIncrementals bool
enablePermissionsBackup bool
)
type exposeFeatureFlag func(*pflag.FlagSet)
@ -78,3 +84,16 @@ func DisableIncrementals() func(*pflag.FlagSet) {
cobra.CheckErr(fs.MarkHidden("disable-incrementals"))
}
}
// Adds the hidden '--enable-permissions-backup' cli flag which, when
// set, enables backing up permissions.
func EnablePermissionsBackup() func(*pflag.FlagSet) {
return func(fs *pflag.FlagSet) {
fs.BoolVar(
&enablePermissionsBackup,
"enable-permissions-backup",
false,
"Enable backing up item permissions for OneDrive")
cobra.CheckErr(fs.MarkHidden("enable-permissions-backup"))
}
}

View File

@ -63,6 +63,9 @@ func addOneDriveCommands(cmd *cobra.Command) *cobra.Command {
utils.FileFN, nil,
"Restore items by file name or ID")
// permissions restore flag
options.AddRestorePermissionsFlag(c)
// onedrive info flags
fs.StringVar(
@ -97,6 +100,9 @@ const (
oneDriveServiceCommandRestoreExamples = `# Restore file with ID 98765abcdef
corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef
# Restore file with ID 98765abcdef along with its associated permissions
corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd --file 98765abcdef --restore-permissions
# Restore Alice's file named "FY2021 Planning.xlsx in "Documents/Finance Reports" from a specific backup
corso restore onedrive --backup 1234abcd-12ab-cd34-56de-1234abcd \
--user alice@example.com --file "FY2021 Planning.xlsx" --folder "Documents/Finance Reports"

View File

@ -16,6 +16,7 @@ import (
"github.com/alcionai/corso/src/internal/connector/mockconnector"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/credentials"
@ -50,6 +51,7 @@ func generateAndRestoreItems(
tenantID, userID, destFldr string,
howMany int,
dbf dataBuilderFunc,
opts control.Options,
) (*details.Details, error) {
items := make([]item, 0, howMany)
@ -74,7 +76,7 @@ func generateAndRestoreItems(
items: items,
}}
// TODO: fit the desination to the containers
// TODO: fit the destination to the containers
dest := control.DefaultRestoreDestination(common.SimpleTimeTesting)
dest.ContainerName = destFldr
@ -90,7 +92,7 @@ func generateAndRestoreItems(
Infof(ctx, "Generating %d %s items in %s\n", howMany, cat, Destination)
return gc.RestoreDataCollections(ctx, acct, sel, dest, dataColls)
return gc.RestoreDataCollections(ctx, backup.Version, acct, sel, dest, opts, dataColls)
}
// ------------------------------------------------------------------------------------------

View File

@ -6,6 +6,7 @@ import (
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/connector/mockconnector"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
@ -67,6 +68,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
subject, body, body,
now, now, now, now)
},
control.Options{},
)
if err != nil {
return Only(ctx, err)
@ -107,6 +109,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
User, subject, body, body,
now, now, false)
},
control.Options{},
)
if err != nil {
return Only(ctx, err)
@ -152,6 +155,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
"123-456-7890",
)
},
control.Options{},
)
if err != nil {
return Only(ctx, err)

View File

@ -4,8 +4,8 @@ go 1.19
require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40
github.com/aws/aws-sdk-go v1.44.190
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005
github.com/aws/aws-sdk-go v1.44.192
github.com/aws/aws-xray-sdk-go v1.8.0
github.com/google/uuid v1.3.0
github.com/hashicorp/go-multierror v1.1.1
@ -71,7 +71,6 @@ require (
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.12 // indirect

View File

@ -52,8 +52,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40 h1:bvAwz0dcJeIyRjudVyzmmawOvc4SqlSerKd0B4dh0yw=
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005 h1:eTgICcmcydEWG8J+hgnidf0pzujV3Gd2XqmknykZkzA=
github.com/alcionai/clues v0.0.0-20230131232239-cee86233b005/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@ -62,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/aws/aws-sdk-go v1.44.190 h1:QC+Pf/Ooj7Waf2obOPZbIQOqr00hy4h54j3ZK9mvHcc=
github.com/aws/aws-sdk-go v1.44.190/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM=
github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -209,8 +209,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf h1:FtEj8sfIcaaBfAKrE1Cwb61YDtYq9JxChK1c7AKce7s=
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf/go.mod h1:yrqSXGoD/4EKfF26AOGzscPOgTTJcyAwM2rpixWT+t4=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=

View File

@ -83,7 +83,7 @@ func (gc *GraphConnector) DataCollections(
return colls, excludes, nil
case selectors.ServiceOneDrive:
return gc.OneDriveDataCollections(ctx, sels, ctrlOpts)
return gc.OneDriveDataCollections(ctx, sels, metadata, ctrlOpts)
case selectors.ServiceSharePoint:
colls, excludes, err := sharepoint.DataCollections(
@ -182,6 +182,7 @@ func (fm odFolderMatcher) Matches(dir string) bool {
func (gc *GraphConnector) OneDriveDataCollections(
ctx context.Context,
selector selectors.Selector,
metadata []data.Collection,
ctrlOpts control.Options,
) ([]data.Collection, map[string]struct{}, error) {
odb, err := selector.ToOneDriveBackup()
@ -209,7 +210,7 @@ func (gc *GraphConnector) OneDriveDataCollections(
gc.Service,
gc.UpdateStatus,
ctrlOpts,
).Get(ctx)
).Get(ctx, metadata)
if err != nil {
return nil, nil, support.WrapAndAppend(user, err, errs)
}

View File

@ -1,32 +1,33 @@
package api
import (
"github.com/alcionai/corso/src/internal/connector/graph/betasdk"
absser "github.com/microsoft/kiota-abstractions-go/serialization"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/connector/graph/betasdk"
)
// Service wraps BetaClient's functionality.
// Abstraction created to comply loosely with graph.Servicer
// methods for ease of switching between v1.0 and beta connnectors
type Service struct {
type BetaService struct {
client *betasdk.BetaClient
}
func (s Service) Client() *betasdk.BetaClient {
func (s BetaService) Client() *betasdk.BetaClient {
return s.client
}
func NewBetaService(adpt *msgraphsdk.GraphRequestAdapter) *Service {
return &Service{
func NewBetaService(adpt *msgraphsdk.GraphRequestAdapter) *BetaService {
return &BetaService{
client: betasdk.NewBetaClient(adpt),
}
}
// Seraialize writes an M365 parsable object into a byte array using the built-in
// application/json writer within the adapter.
func (s Service) Serialize(object absser.Parsable) ([]byte, error) {
func (s BetaService) Serialize(object absser.Parsable) ([]byte, error) {
writer, err := s.client.Adapter().
GetSerializationWriterFactory().
GetSerializationWriter("application/json")

View File

@ -3,6 +3,7 @@ package api
import (
"context"
absser "github.com/microsoft/kiota-abstractions-go"
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/users"
@ -58,14 +59,27 @@ const (
// require more fine-tuned controls in the future.
// https://stackoverflow.com/questions/64044266/error-message-unsupported-or-invalid-query-filter-clause-specified-for-property
//
// ne 'Guest' ensures we don't filter out users where userType = null, which can happen
// for user accounts created prior to 2014. In order to use the `ne` comparator, we
// MUST include $count=true and the ConsistencyLevel: eventual header.
// https://stackoverflow.com/questions/49340485/how-to-filter-users-by-usertype-null
//
//nolint:lll
var userFilterNoGuests = "onPremisesSyncEnabled eq true OR userType eq 'Member'"
var userFilterNoGuests = "onPremisesSyncEnabled eq true OR userType ne 'Guest'"
// I can't believe I have to do this.
var t = true
func userOptions(fs *string) *users.UsersRequestBuilderGetRequestConfiguration {
headers := absser.NewRequestHeaders()
headers.Add("ConsistencyLevel", "eventual")
return &users.UsersRequestBuilderGetRequestConfiguration{
Headers: headers,
QueryParameters: &users.UsersRequestBuilderGetQueryParameters{
Select: []string{userSelectID, userSelectPrincipalName, userSelectDisplayName},
Filter: fs,
Count: &t,
},
}
}
@ -77,7 +91,13 @@ func (c Users) GetAll(ctx context.Context) ([]models.Userable, error) {
return nil, err
}
resp, err := service.Client().Users().Get(ctx, userOptions(&userFilterNoGuests))
var resp models.UserCollectionResponseable
err = graph.RunWithRetry(func() error {
resp, err = service.Client().Users().Get(ctx, userOptions(&userFilterNoGuests))
return err
})
if err != nil {
return nil, support.ConnectorStackErrorTraceWrap(err, "getting all users")
}
@ -114,22 +134,37 @@ func (c Users) GetAll(ctx context.Context) ([]models.Userable, error) {
}
func (c Users) GetByID(ctx context.Context, userID string) (models.Userable, error) {
user, err := c.stable.Client().UsersById(userID).Get(ctx, nil)
var (
resp models.Userable
err error
)
err = graph.RunWithRetry(func() error {
resp, err = c.stable.Client().UsersById(userID).Get(ctx, nil)
return err
})
if err != nil {
return nil, support.ConnectorStackErrorTraceWrap(err, "getting user by id")
}
return user, nil
return resp, err
}
func (c Users) GetInfo(ctx context.Context, userID string) (*UserInfo, error) {
// Assume all services are enabled
// then filter down to only services the user has enabled
userInfo := newUserInfo()
var (
err error
userInfo = newUserInfo()
)
// TODO: OneDrive
err = graph.RunWithRetry(func() error {
_, err = c.stable.Client().UsersById(userID).MailFolders().Get(ctx, nil)
return err
})
_, err := c.stable.Client().UsersById(userID).MailFolders().Get(ctx, nil)
if err != nil {
if !graph.IsErrExchangeMailFolderNotFound(err) {
return nil, support.ConnectorStackErrorTraceWrap(err, "getting user's exchange mailfolders")

View File

@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/alcionai/clues"
"github.com/hashicorp/go-multierror"
"github.com/microsoft/kiota-abstractions-go/serialization"
kioser "github.com/microsoft/kiota-serialization-json-go"
@ -16,6 +17,7 @@ import (
"github.com/alcionai/corso/src/internal/connector/graph/api"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/selectors"
)
// ---------------------------------------------------------------------------
@ -61,7 +63,16 @@ func (c Contacts) GetItem(
ctx context.Context,
user, itemID string,
) (serialization.Parsable, *details.ExchangeInfo, error) {
cont, err := c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil)
var (
cont models.Contactable
err error
)
err = graph.RunWithRetry(func() error {
cont, err = c.stable.Client().UsersById(user).ContactsById(itemID).Get(ctx, nil)
return err
})
if err != nil {
return nil, nil, err
}
@ -81,7 +92,14 @@ func (c Contacts) GetAllContactFolderNamesForUser(
return nil, err
}
return c.stable.Client().UsersById(user).ContactFolders().Get(ctx, options)
var resp models.ContactFolderCollectionResponseable
err = graph.RunWithRetry(func() error {
resp, err = c.stable.Client().UsersById(user).ContactFolders().Get(ctx, options)
return err
})
return resp, err
}
func (c Contacts) GetContainerByID(
@ -93,10 +111,14 @@ func (c Contacts) GetContainerByID(
return nil, errors.Wrap(err, "options for contact folder")
}
return c.stable.Client().
UsersById(userID).
ContactFoldersById(dirID).
Get(ctx, ofcf)
var resp models.ContactFolderable
err = graph.RunWithRetry(func() error {
resp, err = c.stable.Client().UsersById(userID).ContactFoldersById(dirID).Get(ctx, ofcf)
return err
})
return resp, err
}
// EnumerateContainers iterates through all of the users current
@ -117,6 +139,7 @@ func (c Contacts) EnumerateContainers(
var (
errs *multierror.Error
resp models.ContactFolderCollectionResponseable
fields = []string{"displayName", "parentFolderId"}
)
@ -131,7 +154,11 @@ func (c Contacts) EnumerateContainers(
ChildFolders()
for {
resp, err := builder.Get(ctx, ofcf)
err = graph.RunWithRetry(func() error {
resp, err = builder.Get(ctx, ofcf)
return err
})
if err != nil {
return errors.Wrap(err, support.ConnectorStackErrorTrace(err))
}
@ -174,7 +201,17 @@ type contactPager struct {
}
func (p *contactPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
return p.builder.Get(ctx, p.options)
var (
resp api.DeltaPageLinker
err error
)
err = graph.RunWithRetry(func() error {
resp, err = p.builder.Get(ctx, p.options)
return err
})
return resp, err
}
func (p *contactPager) setNext(nextLink string) {
@ -199,6 +236,11 @@ func (c Contacts) GetAddedAndRemovedItemIDs(
resetDelta bool
)
ctx = clues.AddAll(
ctx,
"category", selectors.ExchangeContact,
"folder_id", directoryID)
options, err := optionsForContactFoldersItemDelta([]string{"parentFolderId"})
if err != nil {
return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options")

View File

@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/alcionai/clues"
"github.com/hashicorp/go-multierror"
"github.com/microsoft/kiota-abstractions-go/serialization"
kioser "github.com/microsoft/kiota-serialization-json-go"
@ -19,6 +20,7 @@ import (
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
"github.com/alcionai/corso/src/pkg/selectors"
)
// ---------------------------------------------------------------------------
@ -73,7 +75,13 @@ func (c Events) GetContainerByID(
return nil, errors.Wrap(err, "options for event calendar")
}
cal, err := service.Client().UsersById(userID).CalendarsById(containerID).Get(ctx, ofc)
var cal models.Calendarable
err = graph.RunWithRetry(func() error {
cal, err = service.Client().UsersById(userID).CalendarsById(containerID).Get(ctx, ofc)
return err
})
if err != nil {
return nil, err
}
@ -86,12 +94,28 @@ func (c Events) GetItem(
ctx context.Context,
user, itemID string,
) (serialization.Parsable, *details.ExchangeInfo, error) {
event, err := c.stable.Client().UsersById(user).EventsById(itemID).Get(ctx, nil)
var (
event models.Eventable
err error
)
err = graph.RunWithRetry(func() error {
event, err = c.stable.Client().UsersById(user).EventsById(itemID).Get(ctx, nil)
return err
})
if err != nil {
return nil, nil, err
}
var errs *multierror.Error
var (
errs *multierror.Error
options = &users.ItemEventsItemAttachmentsRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemEventsItemAttachmentsRequestBuilderGetQueryParameters{
Expand: []string{"microsoft.graph.itemattachment/item"},
},
}
)
if *event.GetHasAttachments() || HasAttachments(event.GetBody()) {
for count := 0; count < numberOfRetries; count++ {
@ -100,7 +124,7 @@ func (c Events) GetItem(
UsersById(user).
EventsById(itemID).
Attachments().
Get(ctx, nil)
Get(ctx, options)
if err == nil {
event.SetAttachments(attached.GetValue())
break
@ -128,7 +152,14 @@ func (c Client) GetAllCalendarNamesForUser(
return nil, err
}
return c.stable.Client().UsersById(user).Calendars().Get(ctx, options)
var resp models.CalendarCollectionResponseable
err = graph.RunWithRetry(func() error {
resp, err = c.stable.Client().UsersById(user).Calendars().Get(ctx, options)
return err
})
return resp, err
}
// EnumerateContainers iterates through all of the users current
@ -147,7 +178,10 @@ func (c Events) EnumerateContainers(
return err
}
var errs *multierror.Error
var (
resp models.CalendarCollectionResponseable
errs *multierror.Error
)
ofc, err := optionsForCalendars([]string{"name"})
if err != nil {
@ -157,7 +191,13 @@ func (c Events) EnumerateContainers(
builder := service.Client().UsersById(userID).Calendars()
for {
resp, err := builder.Get(ctx, ofc)
var err error
err = graph.RunWithRetry(func() error {
resp, err = builder.Get(ctx, ofc)
return err
})
if err != nil {
return errors.Wrap(err, support.ConnectorStackErrorTrace(err))
}
@ -205,7 +245,16 @@ type eventPager struct {
}
func (p *eventPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
resp, err := p.builder.Get(ctx, p.options)
var (
resp api.DeltaPageLinker
err error
)
err = graph.RunWithRetry(func() error {
resp, err = p.builder.Get(ctx, p.options)
return err
})
return resp, err
}
@ -231,6 +280,11 @@ func (c Events) GetAddedAndRemovedItemIDs(
errs *multierror.Error
)
ctx = clues.AddAll(
ctx,
"category", selectors.ExchangeEvent,
"calendar_id", calendarID)
if len(oldDelta) > 0 {
builder := users.NewItemCalendarsItemEventsDeltaRequestBuilder(oldDelta, service.Adapter())
pgr := &eventPager{service, builder, nil}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/alcionai/clues"
"github.com/hashicorp/go-multierror"
"github.com/microsoft/kiota-abstractions-go/serialization"
kioser "github.com/microsoft/kiota-serialization-json-go"
@ -17,6 +18,7 @@ import (
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/selectors"
)
// ---------------------------------------------------------------------------
@ -95,7 +97,14 @@ func (c Mail) GetContainerByID(
return nil, errors.Wrap(err, "options for mail folder")
}
return service.Client().UsersById(userID).MailFoldersById(dirID).Get(ctx, ofmf)
var resp graph.Container
err = graph.RunWithRetry(func() error {
resp, err = service.Client().UsersById(userID).MailFoldersById(dirID).Get(ctx, ofmf)
return err
})
return resp, err
}
// GetItem retrieves a Messageable item. If the item contains an attachment, that
@ -104,7 +113,16 @@ func (c Mail) GetItem(
ctx context.Context,
user, itemID string,
) (serialization.Parsable, *details.ExchangeInfo, error) {
mail, err := c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil)
var (
mail models.Messageable
err error
)
err = graph.RunWithRetry(func() error {
mail, err = c.stable.Client().UsersById(user).MessagesById(itemID).Get(ctx, nil)
return err
})
if err != nil {
return nil, nil, err
}
@ -112,13 +130,18 @@ func (c Mail) GetItem(
var errs *multierror.Error
if *mail.GetHasAttachments() || HasAttachments(mail.GetBody()) {
options := &users.ItemMessagesItemAttachmentsRequestBuilderGetRequestConfiguration{
QueryParameters: &users.ItemMessagesItemAttachmentsRequestBuilderGetQueryParameters{
Expand: []string{"microsoft.graph.itemattachment/item"},
},
}
for count := 0; count < numberOfRetries; count++ {
attached, err := c.largeItem.
Client().
UsersById(user).
MessagesById(itemID).
Attachments().
Get(ctx, nil)
Get(ctx, options)
if err == nil {
mail.SetAttachments(attached.GetValue())
break
@ -154,6 +177,7 @@ func (c Mail) EnumerateContainers(
}
var (
resp users.ItemMailFoldersDeltaResponseable
errs *multierror.Error
builder = service.Client().
UsersById(userID).
@ -162,7 +186,13 @@ func (c Mail) EnumerateContainers(
)
for {
resp, err := builder.Get(ctx, nil)
var err error
err = graph.RunWithRetry(func() error {
resp, err = builder.Get(ctx, nil)
return err
})
if err != nil {
return errors.Wrap(err, support.ConnectorStackErrorTrace(err))
}
@ -200,7 +230,17 @@ type mailPager struct {
}
func (p *mailPager) getPage(ctx context.Context) (api.DeltaPageLinker, error) {
return p.builder.Get(ctx, p.options)
var (
page api.DeltaPageLinker
err error
)
err = graph.RunWithRetry(func() error {
page, err = p.builder.Get(ctx, p.options)
return err
})
return page, err
}
func (p *mailPager) setNext(nextLink string) {
@ -226,6 +266,11 @@ func (c Mail) GetAddedAndRemovedItemIDs(
resetDelta bool
)
ctx = clues.AddAll(
ctx,
"category", selectors.ExchangeMail,
"folder_id", directoryID)
options, err := optionsForFolderMessagesDelta([]string{"isRead"})
if err != nil {
return nil, nil, DeltaUpdate{}, errors.Wrap(err, "getting query options")

View File

@ -3,6 +3,7 @@ package api
import (
"fmt"
abstractions "github.com/microsoft/kiota-abstractions-go"
"github.com/microsoftgraph/msgraph-sdk-go/users"
)
@ -53,6 +54,16 @@ var (
}
)
const (
// headerKeyPrefer is used to set query preferences
headerKeyPrefer = "Prefer"
// maxPageSizeHeaderFmt is used to indicate max page size
// preferences
maxPageSizeHeaderFmt = "odata.maxpagesize=%d"
// deltaMaxPageSize is the max page size to use for delta queries
deltaMaxPageSize = 200
)
// -----------------------------------------------------------------------
// exchange.Query Option Section
// These functions can be used to filter a response on M365
@ -71,8 +82,10 @@ func optionsForFolderMessagesDelta(
requestParameters := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{
Select: selecting,
}
options := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{
QueryParameters: requestParameters,
Headers: buildDeltaRequestHeaders(),
}
return options, nil
@ -218,6 +231,7 @@ func optionsForContactFoldersItemDelta(
options := &users.ItemContactFoldersItemContactsDeltaRequestBuilderGetRequestConfiguration{
QueryParameters: requestParameters,
Headers: buildDeltaRequestHeaders(),
}
return options, nil
@ -275,3 +289,11 @@ func buildOptions(fields []string, allowed map[string]struct{}) ([]string, error
return append(returnedOptions, fields...), nil
}
// buildDeltaRequestHeaders returns the headers we add to delta page requests
func buildDeltaRequestHeaders() *abstractions.RequestHeaders {
headers := abstractions.NewRequestHeaders()
headers.Add(headerKeyPrefer, fmt.Sprintf(maxPageSizeHeaderFmt, deltaMaxPageSize))
return headers
}

View File

@ -8,6 +8,7 @@ import (
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/graph/api"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/pkg/logger"
)
// ---------------------------------------------------------------------------
@ -64,6 +65,9 @@ func getItemsAddedAndRemovedFromContainer(
deltaURL string
)
itemCount := 0
page := 0
for {
// get the next page of data, check for standard errors
resp, err := pager.getPage(ctx)
@ -82,6 +86,14 @@ func getItemsAddedAndRemovedFromContainer(
return nil, nil, "", err
}
itemCount += len(items)
page++
// Log every ~1000 items (the page size we use is 200)
if page%5 == 0 {
logger.Ctx(ctx).Infow("queried items", "count", itemCount)
}
// iterate through the items in the page
for _, item := range items {
// if the additional data conains a `@removed` key, the value will either
@ -114,5 +126,7 @@ func getItemsAddedAndRemovedFromContainer(
pager.setNext(nextLink)
}
logger.Ctx(ctx).Infow("completed enumeration", "count", itemCount)
return addedIDs, removedIDs, deltaURL, nil
}

View File

@ -8,6 +8,7 @@ import (
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/connector/uploadsession"
"github.com/alcionai/corso/src/pkg/logger"
)
@ -44,8 +45,11 @@ func uploadAttachment(
attachment models.Attachmentable,
) error {
logger.Ctx(ctx).Debugf("uploading attachment with size %d", *attachment.GetSize())
attachmentType := attachmentType(attachment)
var (
attachmentType = attachmentType(attachment)
err error
)
// Reference attachments that are inline() do not need to be recreated. The contents are part of the body.
if attachmentType == models.REFERENCE_ATTACHMENTTYPE &&
attachment.GetIsInline() != nil && *attachment.GetIsInline() {
@ -53,6 +57,30 @@ func uploadAttachment(
return nil
}
// item Attachments to be skipped until the completion of Issue #2353
if attachmentType == models.ITEM_ATTACHMENTTYPE {
prev := attachment
attachment, err = support.ToItemAttachment(attachment)
if err != nil {
name := ""
if prev.GetName() != nil {
name = *prev.GetName()
}
// TODO: Update to support PII protection
logger.Ctx(ctx).Infow("item attachment uploads are not supported ",
"err", err,
"attachment_name", name,
"attachment_type", attachmentType,
"internal_item_type", getItemAttachmentItemType(prev),
"attachment_id", *prev.GetId(),
)
return nil
}
}
// For Item/Reference attachments *or* file attachments < 3MB, use the attachments endpoint
if attachmentType != models.FILE_ATTACHMENTTYPE || *attachment.GetSize() < largeAttachmentSize {
err := uploader.uploadSmallAttachment(ctx, attachment)
@ -90,3 +118,19 @@ func uploadLargeAttachment(ctx context.Context, uploader attachmentUploadable,
return nil
}
func getItemAttachmentItemType(query models.Attachmentable) string {
empty := ""
attachment, ok := query.(models.ItemAttachmentable)
if !ok {
return empty
}
item := attachment.GetItem()
if item.GetOdataType() == nil {
return empty
}
return *item.GetOdataType()
}

View File

@ -501,10 +501,11 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() {
directoryCaches = make(map[path.CategoryType]graph.ContainerResolver)
folderName = tester.DefaultTestRestoreDestination().ContainerName
tests = []struct {
name string
pathFunc1 func(t *testing.T) path.Path
pathFunc2 func(t *testing.T) path.Path
category path.CategoryType
name string
pathFunc1 func(t *testing.T) path.Path
pathFunc2 func(t *testing.T) path.Path
category path.CategoryType
folderPrefix string
}{
{
name: "Mail Cache Test",
@ -587,6 +588,7 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() {
require.NoError(t, err)
return aPath
},
folderPrefix: calendarOthersFolder,
},
}
)
@ -617,8 +619,9 @@ func (suite *FolderCacheIntegrationSuite) TestCreateContainerDestination() {
_, err = resolver.IDToPath(ctx, secondID)
require.NoError(t, err)
_, ok := resolver.PathInCache(folderName)
require.True(t, ok)
p := stdpath.Join(test.folderPrefix, folderName)
_, ok := resolver.PathInCache(p)
require.True(t, ok, "looking for path in cache: %s", p)
})
}
}

View File

@ -537,9 +537,9 @@ func (suite *DataCollectionsIntegrationSuite) TestEventsSerializationRegression(
},
{
name: "Birthday Calendar",
expected: "Birthdays",
expected: calendarOthersFolder + "/Birthdays",
scope: selectors.NewExchangeBackup(users).EventCalendars(
[]string{"Birthdays"},
[]string{calendarOthersFolder + "/Birthdays"},
selectors.PrefixMatch(),
)[0],
},

View File

@ -64,7 +64,15 @@ func (ecc *eventCalendarCache) Populate(
return errors.Wrap(err, "initializing")
}
err := ecc.enumer.EnumerateContainers(ctx, ecc.userID, "", ecc.addFolder)
err := ecc.enumer.EnumerateContainers(
ctx,
ecc.userID,
"",
func(cf graph.CacheFolder) error {
cf.SetPath(path.Builder{}.Append(calendarOthersFolder, *cf.GetDisplayName()))
return ecc.addFolder(cf)
},
)
if err != nil {
return errors.Wrap(err, "enumerating containers")
}
@ -83,7 +91,7 @@ func (ecc *eventCalendarCache) AddToCache(ctx context.Context, f graph.Container
return errors.Wrap(err, "validating container")
}
temp := graph.NewCacheFolder(f, path.Builder{}.Append(*f.GetDisplayName()))
temp := graph.NewCacheFolder(f, path.Builder{}.Append(calendarOthersFolder, *f.GetDisplayName()))
if err := ecc.addFolder(temp); err != nil {
return errors.Wrap(err, "adding container")

View File

@ -38,4 +38,5 @@ const (
rootFolderAlias = "msgfolderroot"
DefaultContactFolder = "Contacts"
DefaultCalendar = "Calendar"
calendarOthersFolder = "Other Calendars"
)

View File

@ -175,6 +175,30 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
return *folder.GetId()
},
},
{
name: "Test Mail: Item Attachment_Event",
bytes: mockconnector.GetMockMessageWithItemAttachmentEvent("Event Item Attachment"),
category: path.EmailCategory,
destination: func(t *testing.T, ctx context.Context) string {
folderName := "TestRestoreEventItemAttachment: " + common.FormatSimpleDateTime(now)
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
require.NoError(t, err)
return *folder.GetId()
},
},
{ // Restore will upload the Message without uploading the attachment
name: "Test Mail: Item Attachment_NestedEvent",
bytes: mockconnector.GetMockMessageWithNestedItemAttachmentEvent("Nested Item Attachment"),
category: path.EmailCategory,
destination: func(t *testing.T, ctx context.Context) string {
folderName := "TestRestoreNestedEventItemAttachment: " + common.FormatSimpleDateTime(now)
folder, err := suite.ac.Mail().CreateMailFolder(ctx, userID, folderName)
require.NoError(t, err)
return *folder.GetId()
},
},
{
name: "Test Mail: One Large Attachment",
bytes: mockconnector.GetMockMessageWithLargeAttachment("Restore Large Attachment"),
@ -266,7 +290,7 @@ func (suite *ExchangeRestoreSuite) TestRestoreExchangeObject() {
userID,
)
assert.NoError(t, err, support.ConnectorStackErrorTrace(err))
assert.NotNil(t, info, "item info is populated")
assert.NotNil(t, info, "item info was not populated")
assert.NoError(t, deleters[test.category].DeleteContainer(ctx, userID, destination))
})
}

View File

@ -189,23 +189,32 @@ func RestoreMailMessage(
// 1st: No transmission
// 2nd: Send Date
// 3rd: Recv Date
svlep := make([]models.SingleValueLegacyExtendedPropertyable, 0)
sv1 := models.NewSingleValueLegacyExtendedProperty()
sv1.SetId(&valueID)
sv1.SetValue(&enableValue)
svlep = append(svlep, sv1)
sv2 := models.NewSingleValueLegacyExtendedProperty()
sendPropertyValue := common.FormatLegacyTime(*clone.GetSentDateTime())
sendPropertyTag := MailSendDateTimeOverrideProperty
sv2.SetId(&sendPropertyTag)
sv2.SetValue(&sendPropertyValue)
if clone.GetSentDateTime() != nil {
sv2 := models.NewSingleValueLegacyExtendedProperty()
sendPropertyValue := common.FormatLegacyTime(*clone.GetSentDateTime())
sendPropertyTag := MailSendDateTimeOverrideProperty
sv2.SetId(&sendPropertyTag)
sv2.SetValue(&sendPropertyValue)
sv3 := models.NewSingleValueLegacyExtendedProperty()
recvPropertyValue := common.FormatLegacyTime(*clone.GetReceivedDateTime())
recvPropertyTag := MailReceiveDateTimeOverriveProperty
sv3.SetId(&recvPropertyTag)
sv3.SetValue(&recvPropertyValue)
svlep = append(svlep, sv2)
}
if clone.GetReceivedDateTime() != nil {
sv3 := models.NewSingleValueLegacyExtendedProperty()
recvPropertyValue := common.FormatLegacyTime(*clone.GetReceivedDateTime())
recvPropertyTag := MailReceiveDateTimeOverriveProperty
sv3.SetId(&recvPropertyTag)
sv3.SetValue(&recvPropertyValue)
svlep = append(svlep, sv3)
}
svlep := []models.SingleValueLegacyExtendedPropertyable{sv1, sv2, sv3}
clone.SetSingleValueExtendedProperties(svlep)
// Switch workflow based on collision policy
@ -248,10 +257,9 @@ func SendMailToBackStore(
errs error
)
if *message.GetHasAttachments() {
attached = message.GetAttachments()
message.SetAttachments([]models.Attachmentable{})
}
// Item.Attachments --> HasAttachments doesn't always have a value populated when deserialized
attached = message.GetAttachments()
message.SetAttachments([]models.Attachmentable{})
sentMessage, err := service.Client().UsersById(user).MailFoldersById(destination).Messages().Post(ctx, message, nil)
if err != nil {
@ -637,7 +645,11 @@ func establishEventsRestoreLocation(
user string,
isNewCache bool,
) (string, error) {
cached, ok := ecc.PathInCache(folders[0])
// Need to prefix with the "Other Calendars" folder so lookup happens properly.
cached, ok := ecc.PathInCache(path.Builder{}.Append(
calendarOthersFolder,
folders[0],
).String())
if ok {
return cached, nil
}

View File

@ -1,13 +1,14 @@
package betasdk
import (
i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites"
absser "github.com/microsoft/kiota-abstractions-go"
kioser "github.com/microsoft/kiota-abstractions-go/serialization"
kform "github.com/microsoft/kiota-serialization-form-go"
kw "github.com/microsoft/kiota-serialization-json-go"
ktext "github.com/microsoft/kiota-serialization-text-go"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
i1a3c1a5501c5e41b7fd169f2d4c768dce9b096ac28fb5431bf02afcc57295411 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites"
)
// BetaClient the main entry point of the SDK, exposes the configuration and the fluent API.

View File

@ -1,52 +1,54 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the remove method.
type HorizontalSectionLayoutType int
const (
NONE_HORIZONTALSECTIONLAYOUTTYPE HorizontalSectionLayoutType = iota
ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE
TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE
UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE
NONE_HORIZONTALSECTIONLAYOUTTYPE HorizontalSectionLayoutType = iota
ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE
TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE
UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE
)
func (i HorizontalSectionLayoutType) String() string {
return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i]
return []string{"none", "oneColumn", "twoColumns", "threeColumns", "oneThirdLeftColumn", "oneThirdRightColumn", "fullWidth", "unknownFutureValue"}[i]
}
func ParseHorizontalSectionLayoutType(v string) (interface{}, error) {
result := NONE_HORIZONTALSECTIONLAYOUTTYPE
switch v {
case "none":
result = NONE_HORIZONTALSECTIONLAYOUTTYPE
case "oneColumn":
result = ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE
case "twoColumns":
result = TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
case "threeColumns":
result = THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
case "oneThirdLeftColumn":
result = ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
case "oneThirdRightColumn":
result = ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
case "fullWidth":
result = FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE
default:
return 0, errors.New("Unknown HorizontalSectionLayoutType value: " + v)
}
return &result, nil
result := NONE_HORIZONTALSECTIONLAYOUTTYPE
switch v {
case "none":
result = NONE_HORIZONTALSECTIONLAYOUTTYPE
case "oneColumn":
result = ONECOLUMN_HORIZONTALSECTIONLAYOUTTYPE
case "twoColumns":
result = TWOCOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
case "threeColumns":
result = THREECOLUMNS_HORIZONTALSECTIONLAYOUTTYPE
case "oneThirdLeftColumn":
result = ONETHIRDLEFTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
case "oneThirdRightColumn":
result = ONETHIRDRIGHTCOLUMN_HORIZONTALSECTIONLAYOUTTYPE
case "fullWidth":
result = FULLWIDTH_HORIZONTALSECTIONLAYOUTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_HORIZONTALSECTIONLAYOUTTYPE
default:
return 0, errors.New("Unknown HorizontalSectionLayoutType value: " + v)
}
return &result, nil
}
func SerializeHorizontalSectionLayoutType(values []HorizontalSectionLayoutType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -1,123 +1,134 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// MetaDataKeyStringPair
type MetaDataKeyStringPair struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Key of the meta data.
key *string
// The OdataType property
odataType *string
// Value of the meta data.
value *string
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Key of the meta data.
key *string
// The OdataType property
odataType *string
// Value of the meta data.
value *string
}
// NewMetaDataKeyStringPair instantiates a new metaDataKeyStringPair and sets the default values.
func NewMetaDataKeyStringPair()(*MetaDataKeyStringPair) {
m := &MetaDataKeyStringPair{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
func NewMetaDataKeyStringPair() *MetaDataKeyStringPair {
m := &MetaDataKeyStringPair{}
m.SetAdditionalData(make(map[string]interface{}))
return m
}
// CreateMetaDataKeyStringPairFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateMetaDataKeyStringPairFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewMetaDataKeyStringPair(), nil
func CreateMetaDataKeyStringPairFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewMetaDataKeyStringPair(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *MetaDataKeyStringPair) GetAdditionalData()(map[string]interface{}) {
return m.additionalData
func (m *MetaDataKeyStringPair) GetAdditionalData() map[string]interface{} {
return m.additionalData
}
// GetFieldDeserializers the deserialization information for the current model
func (m *MetaDataKeyStringPair) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["key"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetKey(val)
}
return nil
}
res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["value"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetValue(val)
}
return nil
}
return res
func (m *MetaDataKeyStringPair) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error)
res["key"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetKey(val)
}
return nil
}
res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["value"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetValue(val)
}
return nil
}
return res
}
// GetKey gets the key property value. Key of the meta data.
func (m *MetaDataKeyStringPair) GetKey()(*string) {
return m.key
func (m *MetaDataKeyStringPair) GetKey() *string {
return m.key
}
// GetOdataType gets the @odata.type property value. The OdataType property
func (m *MetaDataKeyStringPair) GetOdataType()(*string) {
return m.odataType
func (m *MetaDataKeyStringPair) GetOdataType() *string {
return m.odataType
}
// GetValue gets the value property value. Value of the meta data.
func (m *MetaDataKeyStringPair) GetValue()(*string) {
return m.value
func (m *MetaDataKeyStringPair) GetValue() *string {
return m.value
}
// Serialize serializes information the current object
func (m *MetaDataKeyStringPair) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("key", m.GetKey())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("value", m.GetValue())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
func (m *MetaDataKeyStringPair) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error {
{
err := writer.WriteStringValue("key", m.GetKey())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("value", m.GetValue())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *MetaDataKeyStringPair) SetAdditionalData(value map[string]interface{})() {
m.additionalData = value
func (m *MetaDataKeyStringPair) SetAdditionalData(value map[string]interface{}) {
m.additionalData = value
}
// SetKey sets the key property value. Key of the meta data.
func (m *MetaDataKeyStringPair) SetKey(value *string)() {
m.key = value
func (m *MetaDataKeyStringPair) SetKey(value *string) {
m.key = value
}
// SetOdataType sets the @odata.type property value. The OdataType property
func (m *MetaDataKeyStringPair) SetOdataType(value *string)() {
m.odataType = value
func (m *MetaDataKeyStringPair) SetOdataType(value *string) {
m.odataType = value
}
// SetValue sets the value property value. Value of the meta data.
func (m *MetaDataKeyStringPair) SetValue(value *string)() {
m.value = value
func (m *MetaDataKeyStringPair) SetValue(value *string) {
m.value = value
}

View File

@ -1,17 +1,17 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// MetaDataKeyStringPairable
type MetaDataKeyStringPairable interface {
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetKey()(*string)
GetOdataType()(*string)
GetValue()(*string)
SetKey(value *string)()
SetOdataType(value *string)()
SetValue(value *string)()
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetKey() *string
GetOdataType() *string
GetValue() *string
SetKey(value *string)
SetOdataType(value *string)
SetValue(value *string)
}

View File

@ -1,40 +1,42 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the remove method.
type PageLayoutType int
const (
MICROSOFTRESERVED_PAGELAYOUTTYPE PageLayoutType = iota
ARTICLE_PAGELAYOUTTYPE
HOME_PAGELAYOUTTYPE
UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE
MICROSOFTRESERVED_PAGELAYOUTTYPE PageLayoutType = iota
ARTICLE_PAGELAYOUTTYPE
HOME_PAGELAYOUTTYPE
UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE
)
func (i PageLayoutType) String() string {
return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i]
return []string{"microsoftReserved", "article", "home", "unknownFutureValue"}[i]
}
func ParsePageLayoutType(v string) (interface{}, error) {
result := MICROSOFTRESERVED_PAGELAYOUTTYPE
switch v {
case "microsoftReserved":
result = MICROSOFTRESERVED_PAGELAYOUTTYPE
case "article":
result = ARTICLE_PAGELAYOUTTYPE
case "home":
result = HOME_PAGELAYOUTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE
default:
return 0, errors.New("Unknown PageLayoutType value: " + v)
}
return &result, nil
result := MICROSOFTRESERVED_PAGELAYOUTTYPE
switch v {
case "microsoftReserved":
result = MICROSOFTRESERVED_PAGELAYOUTTYPE
case "article":
result = ARTICLE_PAGELAYOUTTYPE
case "home":
result = HOME_PAGELAYOUTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_PAGELAYOUTTYPE
default:
return 0, errors.New("Unknown PageLayoutType value: " + v)
}
return &result, nil
}
func SerializePageLayoutType(values []PageLayoutType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -1,40 +1,42 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the remove method.
type PagePromotionType int
const (
MICROSOFTRESERVED_PAGEPROMOTIONTYPE PagePromotionType = iota
PAGE_PAGEPROMOTIONTYPE
NEWSPOST_PAGEPROMOTIONTYPE
UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE
MICROSOFTRESERVED_PAGEPROMOTIONTYPE PagePromotionType = iota
PAGE_PAGEPROMOTIONTYPE
NEWSPOST_PAGEPROMOTIONTYPE
UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE
)
func (i PagePromotionType) String() string {
return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i]
return []string{"microsoftReserved", "page", "newsPost", "unknownFutureValue"}[i]
}
func ParsePagePromotionType(v string) (interface{}, error) {
result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE
switch v {
case "microsoftReserved":
result = MICROSOFTRESERVED_PAGEPROMOTIONTYPE
case "page":
result = PAGE_PAGEPROMOTIONTYPE
case "newsPost":
result = NEWSPOST_PAGEPROMOTIONTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE
default:
return 0, errors.New("Unknown PagePromotionType value: " + v)
}
return &result, nil
result := MICROSOFTRESERVED_PAGEPROMOTIONTYPE
switch v {
case "microsoftReserved":
result = MICROSOFTRESERVED_PAGEPROMOTIONTYPE
case "page":
result = PAGE_PAGEPROMOTIONTYPE
case "newsPost":
result = NEWSPOST_PAGEPROMOTIONTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_PAGEPROMOTIONTYPE
default:
return 0, errors.New("Unknown PagePromotionType value: " + v)
}
return &result, nil
}
func SerializePagePromotionType(values []PagePromotionType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -1,123 +1,134 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// PublicationFacet
type PublicationFacet struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The state of publication for this document. Either published or checkout. Read-only.
level *string
// The OdataType property
odataType *string
// The unique identifier for the version that is visible to the current caller. Read-only.
versionId *string
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The state of publication for this document. Either published or checkout. Read-only.
level *string
// The OdataType property
odataType *string
// The unique identifier for the version that is visible to the current caller. Read-only.
versionId *string
}
// NewPublicationFacet instantiates a new publicationFacet and sets the default values.
func NewPublicationFacet()(*PublicationFacet) {
m := &PublicationFacet{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
func NewPublicationFacet() *PublicationFacet {
m := &PublicationFacet{}
m.SetAdditionalData(make(map[string]interface{}))
return m
}
// CreatePublicationFacetFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreatePublicationFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewPublicationFacet(), nil
func CreatePublicationFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewPublicationFacet(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PublicationFacet) GetAdditionalData()(map[string]interface{}) {
return m.additionalData
func (m *PublicationFacet) GetAdditionalData() map[string]interface{} {
return m.additionalData
}
// GetFieldDeserializers the deserialization information for the current model
func (m *PublicationFacet) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["level"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetLevel(val)
}
return nil
}
res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["versionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetVersionId(val)
}
return nil
}
return res
func (m *PublicationFacet) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error)
res["level"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetLevel(val)
}
return nil
}
res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["versionId"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetVersionId(val)
}
return nil
}
return res
}
// GetLevel gets the level property value. The state of publication for this document. Either published or checkout. Read-only.
func (m *PublicationFacet) GetLevel()(*string) {
return m.level
func (m *PublicationFacet) GetLevel() *string {
return m.level
}
// GetOdataType gets the @odata.type property value. The OdataType property
func (m *PublicationFacet) GetOdataType()(*string) {
return m.odataType
func (m *PublicationFacet) GetOdataType() *string {
return m.odataType
}
// GetVersionId gets the versionId property value. The unique identifier for the version that is visible to the current caller. Read-only.
func (m *PublicationFacet) GetVersionId()(*string) {
return m.versionId
func (m *PublicationFacet) GetVersionId() *string {
return m.versionId
}
// Serialize serializes information the current object
func (m *PublicationFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("level", m.GetLevel())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("versionId", m.GetVersionId())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
func (m *PublicationFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error {
{
err := writer.WriteStringValue("level", m.GetLevel())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("versionId", m.GetVersionId())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PublicationFacet) SetAdditionalData(value map[string]interface{})() {
m.additionalData = value
func (m *PublicationFacet) SetAdditionalData(value map[string]interface{}) {
m.additionalData = value
}
// SetLevel sets the level property value. The state of publication for this document. Either published or checkout. Read-only.
func (m *PublicationFacet) SetLevel(value *string)() {
m.level = value
func (m *PublicationFacet) SetLevel(value *string) {
m.level = value
}
// SetOdataType sets the @odata.type property value. The OdataType property
func (m *PublicationFacet) SetOdataType(value *string)() {
m.odataType = value
func (m *PublicationFacet) SetOdataType(value *string) {
m.odataType = value
}
// SetVersionId sets the versionId property value. The unique identifier for the version that is visible to the current caller. Read-only.
func (m *PublicationFacet) SetVersionId(value *string)() {
m.versionId = value
func (m *PublicationFacet) SetVersionId(value *string) {
m.versionId = value
}

View File

@ -1,17 +1,17 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// PublicationFacetable
type PublicationFacetable interface {
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetLevel()(*string)
GetOdataType()(*string)
GetVersionId()(*string)
SetLevel(value *string)()
SetOdataType(value *string)()
SetVersionId(value *string)()
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetLevel() *string
GetOdataType() *string
GetVersionId() *string
SetLevel(value *string)
SetOdataType(value *string)
SetVersionId(value *string)
}

View File

@ -1,149 +1,162 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// ReactionsFacet
type ReactionsFacet struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Count of comments.
commentCount *int32
// Count of likes.
likeCount *int32
// The OdataType property
odataType *string
// Count of shares.
shareCount *int32
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Count of comments.
commentCount *int32
// Count of likes.
likeCount *int32
// The OdataType property
odataType *string
// Count of shares.
shareCount *int32
}
// NewReactionsFacet instantiates a new reactionsFacet and sets the default values.
func NewReactionsFacet()(*ReactionsFacet) {
m := &ReactionsFacet{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
func NewReactionsFacet() *ReactionsFacet {
m := &ReactionsFacet{}
m.SetAdditionalData(make(map[string]interface{}))
return m
}
// CreateReactionsFacetFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateReactionsFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewReactionsFacet(), nil
func CreateReactionsFacetFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewReactionsFacet(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ReactionsFacet) GetAdditionalData()(map[string]interface{}) {
return m.additionalData
func (m *ReactionsFacet) GetAdditionalData() map[string]interface{} {
return m.additionalData
}
// GetCommentCount gets the commentCount property value. Count of comments.
func (m *ReactionsFacet) GetCommentCount()(*int32) {
return m.commentCount
func (m *ReactionsFacet) GetCommentCount() *int32 {
return m.commentCount
}
// GetFieldDeserializers the deserialization information for the current model
func (m *ReactionsFacet) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["commentCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetCommentCount(val)
}
return nil
}
res["likeCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetLikeCount(val)
}
return nil
}
res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["shareCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetShareCount(val)
}
return nil
}
return res
func (m *ReactionsFacet) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error)
res["commentCount"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetCommentCount(val)
}
return nil
}
res["likeCount"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetLikeCount(val)
}
return nil
}
res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["shareCount"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetShareCount(val)
}
return nil
}
return res
}
// GetLikeCount gets the likeCount property value. Count of likes.
func (m *ReactionsFacet) GetLikeCount()(*int32) {
return m.likeCount
func (m *ReactionsFacet) GetLikeCount() *int32 {
return m.likeCount
}
// GetOdataType gets the @odata.type property value. The OdataType property
func (m *ReactionsFacet) GetOdataType()(*string) {
return m.odataType
func (m *ReactionsFacet) GetOdataType() *string {
return m.odataType
}
// GetShareCount gets the shareCount property value. Count of shares.
func (m *ReactionsFacet) GetShareCount()(*int32) {
return m.shareCount
func (m *ReactionsFacet) GetShareCount() *int32 {
return m.shareCount
}
// Serialize serializes information the current object
func (m *ReactionsFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteInt32Value("commentCount", m.GetCommentCount())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("likeCount", m.GetLikeCount())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("shareCount", m.GetShareCount())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
func (m *ReactionsFacet) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error {
{
err := writer.WriteInt32Value("commentCount", m.GetCommentCount())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("likeCount", m.GetLikeCount())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("shareCount", m.GetShareCount())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ReactionsFacet) SetAdditionalData(value map[string]interface{})() {
m.additionalData = value
func (m *ReactionsFacet) SetAdditionalData(value map[string]interface{}) {
m.additionalData = value
}
// SetCommentCount sets the commentCount property value. Count of comments.
func (m *ReactionsFacet) SetCommentCount(value *int32)() {
m.commentCount = value
func (m *ReactionsFacet) SetCommentCount(value *int32) {
m.commentCount = value
}
// SetLikeCount sets the likeCount property value. Count of likes.
func (m *ReactionsFacet) SetLikeCount(value *int32)() {
m.likeCount = value
func (m *ReactionsFacet) SetLikeCount(value *int32) {
m.likeCount = value
}
// SetOdataType sets the @odata.type property value. The OdataType property
func (m *ReactionsFacet) SetOdataType(value *string)() {
m.odataType = value
func (m *ReactionsFacet) SetOdataType(value *string) {
m.odataType = value
}
// SetShareCount sets the shareCount property value. Count of shares.
func (m *ReactionsFacet) SetShareCount(value *int32)() {
m.shareCount = value
func (m *ReactionsFacet) SetShareCount(value *int32) {
m.shareCount = value
}

View File

@ -1,19 +1,19 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// ReactionsFacetable
type ReactionsFacetable interface {
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetCommentCount()(*int32)
GetLikeCount()(*int32)
GetOdataType()(*string)
GetShareCount()(*int32)
SetCommentCount(value *int32)()
SetLikeCount(value *int32)()
SetOdataType(value *string)()
SetShareCount(value *int32)()
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetCommentCount() *int32
GetLikeCount() *int32
GetOdataType() *string
GetShareCount() *int32
SetCommentCount(value *int32)
SetLikeCount(value *int32)
SetOdataType(value *string)
SetShareCount(value *int32)
}

View File

@ -1,43 +1,45 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the remove method.
type SectionEmphasisType int
const (
NONE_SECTIONEMPHASISTYPE SectionEmphasisType = iota
NEUTRAL_SECTIONEMPHASISTYPE
SOFT_SECTIONEMPHASISTYPE
STRONG_SECTIONEMPHASISTYPE
UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE
NONE_SECTIONEMPHASISTYPE SectionEmphasisType = iota
NEUTRAL_SECTIONEMPHASISTYPE
SOFT_SECTIONEMPHASISTYPE
STRONG_SECTIONEMPHASISTYPE
UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE
)
func (i SectionEmphasisType) String() string {
return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i]
return []string{"none", "neutral", "soft", "strong", "unknownFutureValue"}[i]
}
func ParseSectionEmphasisType(v string) (interface{}, error) {
result := NONE_SECTIONEMPHASISTYPE
switch v {
case "none":
result = NONE_SECTIONEMPHASISTYPE
case "neutral":
result = NEUTRAL_SECTIONEMPHASISTYPE
case "soft":
result = SOFT_SECTIONEMPHASISTYPE
case "strong":
result = STRONG_SECTIONEMPHASISTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE
default:
return 0, errors.New("Unknown SectionEmphasisType value: " + v)
}
return &result, nil
result := NONE_SECTIONEMPHASISTYPE
switch v {
case "none":
result = NONE_SECTIONEMPHASISTYPE
case "neutral":
result = NEUTRAL_SECTIONEMPHASISTYPE
case "soft":
result = SOFT_SECTIONEMPHASISTYPE
case "strong":
result = STRONG_SECTIONEMPHASISTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_SECTIONEMPHASISTYPE
default:
return 0, errors.New("Unknown SectionEmphasisType value: " + v)
}
return &result, nil
}
func SerializeSectionEmphasisType(values []SectionEmphasisType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -1,37 +1,39 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the remove method.
type SiteAccessType int
const (
BLOCK_SITEACCESSTYPE SiteAccessType = iota
FULL_SITEACCESSTYPE
LIMITED_SITEACCESSTYPE
BLOCK_SITEACCESSTYPE SiteAccessType = iota
FULL_SITEACCESSTYPE
LIMITED_SITEACCESSTYPE
)
func (i SiteAccessType) String() string {
return []string{"block", "full", "limited"}[i]
return []string{"block", "full", "limited"}[i]
}
func ParseSiteAccessType(v string) (interface{}, error) {
result := BLOCK_SITEACCESSTYPE
switch v {
case "block":
result = BLOCK_SITEACCESSTYPE
case "full":
result = FULL_SITEACCESSTYPE
case "limited":
result = LIMITED_SITEACCESSTYPE
default:
return 0, errors.New("Unknown SiteAccessType value: " + v)
}
return &result, nil
result := BLOCK_SITEACCESSTYPE
switch v {
case "block":
result = BLOCK_SITEACCESSTYPE
case "full":
result = FULL_SITEACCESSTYPE
case "limited":
result = LIMITED_SITEACCESSTYPE
default:
return 0, errors.New("Unknown SiteAccessType value: " + v)
}
return &result, nil
}
func SerializeSiteAccessType(values []SiteAccessType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -2,7 +2,6 @@ package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
msmodel "github.com/microsoftgraph/msgraph-sdk-go/models"
)

View File

@ -1,52 +1,54 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the add method.
type SiteSecurityLevel int
const (
// User Defined, default value, no intent.
USERDEFINED_SITESECURITYLEVEL SiteSecurityLevel = iota
// Low.
LOW_SITESECURITYLEVEL
// Medium-low.
MEDIUMLOW_SITESECURITYLEVEL
// Medium.
MEDIUM_SITESECURITYLEVEL
// Medium-high.
MEDIUMHIGH_SITESECURITYLEVEL
// High.
HIGH_SITESECURITYLEVEL
// User Defined, default value, no intent.
USERDEFINED_SITESECURITYLEVEL SiteSecurityLevel = iota
// Low.
LOW_SITESECURITYLEVEL
// Medium-low.
MEDIUMLOW_SITESECURITYLEVEL
// Medium.
MEDIUM_SITESECURITYLEVEL
// Medium-high.
MEDIUMHIGH_SITESECURITYLEVEL
// High.
HIGH_SITESECURITYLEVEL
)
func (i SiteSecurityLevel) String() string {
return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i]
return []string{"userDefined", "low", "mediumLow", "medium", "mediumHigh", "high"}[i]
}
func ParseSiteSecurityLevel(v string) (interface{}, error) {
result := USERDEFINED_SITESECURITYLEVEL
switch v {
case "userDefined":
result = USERDEFINED_SITESECURITYLEVEL
case "low":
result = LOW_SITESECURITYLEVEL
case "mediumLow":
result = MEDIUMLOW_SITESECURITYLEVEL
case "medium":
result = MEDIUM_SITESECURITYLEVEL
case "mediumHigh":
result = MEDIUMHIGH_SITESECURITYLEVEL
case "high":
result = HIGH_SITESECURITYLEVEL
default:
return 0, errors.New("Unknown SiteSecurityLevel value: " + v)
}
return &result, nil
result := USERDEFINED_SITESECURITYLEVEL
switch v {
case "userDefined":
result = USERDEFINED_SITESECURITYLEVEL
case "low":
result = LOW_SITESECURITYLEVEL
case "mediumLow":
result = MEDIUMLOW_SITESECURITYLEVEL
case "medium":
result = MEDIUM_SITESECURITYLEVEL
case "mediumHigh":
result = MEDIUMHIGH_SITESECURITYLEVEL
case "high":
result = HIGH_SITESECURITYLEVEL
default:
return 0, errors.New("Unknown SiteSecurityLevel value: " + v)
}
return &result, nil
}
func SerializeSiteSecurityLevel(values []SiteSecurityLevel) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -1,123 +1,134 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// SiteSettings
type SiteSettings struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The language tag for the language used on this site.
languageTag *string
// The OdataType property
odataType *string
// Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC).
timeZone *string
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The language tag for the language used on this site.
languageTag *string
// The OdataType property
odataType *string
// Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC).
timeZone *string
}
// NewSiteSettings instantiates a new siteSettings and sets the default values.
func NewSiteSettings()(*SiteSettings) {
m := &SiteSettings{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
func NewSiteSettings() *SiteSettings {
m := &SiteSettings{}
m.SetAdditionalData(make(map[string]interface{}))
return m
}
// CreateSiteSettingsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateSiteSettingsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewSiteSettings(), nil
func CreateSiteSettingsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewSiteSettings(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SiteSettings) GetAdditionalData()(map[string]interface{}) {
return m.additionalData
func (m *SiteSettings) GetAdditionalData() map[string]interface{} {
return m.additionalData
}
// GetFieldDeserializers the deserialization information for the current model
func (m *SiteSettings) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["languageTag"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetLanguageTag(val)
}
return nil
}
res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["timeZone"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetTimeZone(val)
}
return nil
}
return res
func (m *SiteSettings) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error)
res["languageTag"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetLanguageTag(val)
}
return nil
}
res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["timeZone"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetTimeZone(val)
}
return nil
}
return res
}
// GetLanguageTag gets the languageTag property value. The language tag for the language used on this site.
func (m *SiteSettings) GetLanguageTag()(*string) {
return m.languageTag
func (m *SiteSettings) GetLanguageTag() *string {
return m.languageTag
}
// GetOdataType gets the @odata.type property value. The OdataType property
func (m *SiteSettings) GetOdataType()(*string) {
return m.odataType
func (m *SiteSettings) GetOdataType() *string {
return m.odataType
}
// GetTimeZone gets the timeZone property value. Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC).
func (m *SiteSettings) GetTimeZone()(*string) {
return m.timeZone
func (m *SiteSettings) GetTimeZone() *string {
return m.timeZone
}
// Serialize serializes information the current object
func (m *SiteSettings) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("languageTag", m.GetLanguageTag())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("timeZone", m.GetTimeZone())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
func (m *SiteSettings) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error {
{
err := writer.WriteStringValue("languageTag", m.GetLanguageTag())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("timeZone", m.GetTimeZone())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *SiteSettings) SetAdditionalData(value map[string]interface{})() {
m.additionalData = value
func (m *SiteSettings) SetAdditionalData(value map[string]interface{}) {
m.additionalData = value
}
// SetLanguageTag sets the languageTag property value. The language tag for the language used on this site.
func (m *SiteSettings) SetLanguageTag(value *string)() {
m.languageTag = value
func (m *SiteSettings) SetLanguageTag(value *string) {
m.languageTag = value
}
// SetOdataType sets the @odata.type property value. The OdataType property
func (m *SiteSettings) SetOdataType(value *string)() {
m.odataType = value
func (m *SiteSettings) SetOdataType(value *string) {
m.odataType = value
}
// SetTimeZone sets the timeZone property value. Indicates the time offset for the time zone of the site from Coordinated Universal Time (UTC).
func (m *SiteSettings) SetTimeZone(value *string)() {
m.timeZone = value
func (m *SiteSettings) SetTimeZone(value *string) {
m.timeZone = value
}

View File

@ -1,17 +1,17 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// SiteSettingsable
type SiteSettingsable interface {
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetLanguageTag()(*string)
GetOdataType()(*string)
GetTimeZone()(*string)
SetLanguageTag(value *string)()
SetOdataType(value *string)()
SetTimeZone(value *string)()
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetLanguageTag() *string
GetOdataType() *string
GetTimeZone() *string
SetLanguageTag(value *string)
SetOdataType(value *string)
SetTimeZone(value *string)
}

View File

@ -1,88 +1,96 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// StandardWebPart
type StandardWebPart struct {
WebPart
// Data of the webPart.
data WebPartDataable
// A Guid which indicates the type of the webParts
webPartType *string
WebPart
// Data of the webPart.
data WebPartDataable
// A Guid which indicates the type of the webParts
webPartType *string
}
// NewStandardWebPart instantiates a new StandardWebPart and sets the default values.
func NewStandardWebPart()(*StandardWebPart) {
m := &StandardWebPart{
WebPart: *NewWebPart(),
}
odataTypeValue := "#microsoft.graph.standardWebPart";
m.SetOdataType(&odataTypeValue);
return m
func NewStandardWebPart() *StandardWebPart {
m := &StandardWebPart{
WebPart: *NewWebPart(),
}
odataTypeValue := "#microsoft.graph.standardWebPart"
m.SetOdataType(&odataTypeValue)
return m
}
// CreateStandardWebPartFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateStandardWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewStandardWebPart(), nil
func CreateStandardWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewStandardWebPart(), nil
}
// GetData gets the data property value. Data of the webPart.
func (m *StandardWebPart) GetData()(WebPartDataable) {
return m.data
func (m *StandardWebPart) GetData() WebPartDataable {
return m.data
}
// GetFieldDeserializers the deserialization information for the current model
func (m *StandardWebPart) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.WebPart.GetFieldDeserializers()
res["data"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateWebPartDataFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetData(val.(WebPartDataable))
}
return nil
}
res["webPartType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetWebPartType(val)
}
return nil
}
return res
func (m *StandardWebPart) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
res := m.WebPart.GetFieldDeserializers()
res["data"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateWebPartDataFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetData(val.(WebPartDataable))
}
return nil
}
res["webPartType"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetWebPartType(val)
}
return nil
}
return res
}
// GetWebPartType gets the webPartType property value. A Guid which indicates the type of the webParts
func (m *StandardWebPart) GetWebPartType()(*string) {
return m.webPartType
func (m *StandardWebPart) GetWebPartType() *string {
return m.webPartType
}
// Serialize serializes information the current object
func (m *StandardWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.WebPart.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteObjectValue("data", m.GetData())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("webPartType", m.GetWebPartType())
if err != nil {
return err
}
}
return nil
func (m *StandardWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error {
err := m.WebPart.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteObjectValue("data", m.GetData())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("webPartType", m.GetWebPartType())
if err != nil {
return err
}
}
return nil
}
// SetData sets the data property value. Data of the webPart.
func (m *StandardWebPart) SetData(value WebPartDataable)() {
m.data = value
func (m *StandardWebPart) SetData(value WebPartDataable) {
m.data = value
}
// SetWebPartType sets the webPartType property value. A Guid which indicates the type of the webParts
func (m *StandardWebPart) SetWebPartType(value *string)() {
m.webPartType = value
func (m *StandardWebPart) SetWebPartType(value *string) {
m.webPartType = value
}

View File

@ -1,15 +1,15 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// StandardWebPartable
type StandardWebPartable interface {
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
WebPartable
GetData()(WebPartDataable)
GetWebPartType()(*string)
SetData(value WebPartDataable)()
SetWebPartType(value *string)()
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
WebPartable
GetData() WebPartDataable
GetWebPartType() *string
SetData(value WebPartDataable)
SetWebPartType(value *string)
}

View File

@ -1,62 +1,68 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// TextWebPart
type TextWebPart struct {
WebPart
// The HTML string in text web part.
innerHtml *string
WebPart
// The HTML string in text web part.
innerHtml *string
}
// NewTextWebPart instantiates a new TextWebPart and sets the default values.
func NewTextWebPart()(*TextWebPart) {
m := &TextWebPart{
WebPart: *NewWebPart(),
}
odataTypeValue := "#microsoft.graph.textWebPart";
m.SetOdataType(&odataTypeValue);
return m
func NewTextWebPart() *TextWebPart {
m := &TextWebPart{
WebPart: *NewWebPart(),
}
odataTypeValue := "#microsoft.graph.textWebPart"
m.SetOdataType(&odataTypeValue)
return m
}
// CreateTextWebPartFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateTextWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewTextWebPart(), nil
func CreateTextWebPartFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewTextWebPart(), nil
}
// GetFieldDeserializers the deserialization information for the current model
func (m *TextWebPart) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.WebPart.GetFieldDeserializers()
res["innerHtml"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetInnerHtml(val)
}
return nil
}
return res
func (m *TextWebPart) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
res := m.WebPart.GetFieldDeserializers()
res["innerHtml"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetInnerHtml(val)
}
return nil
}
return res
}
// GetInnerHtml gets the innerHtml property value. The HTML string in text web part.
func (m *TextWebPart) GetInnerHtml()(*string) {
return m.innerHtml
func (m *TextWebPart) GetInnerHtml() *string {
return m.innerHtml
}
// Serialize serializes information the current object
func (m *TextWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.WebPart.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteStringValue("innerHtml", m.GetInnerHtml())
if err != nil {
return err
}
}
return nil
func (m *TextWebPart) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error {
err := m.WebPart.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteStringValue("innerHtml", m.GetInnerHtml())
if err != nil {
return err
}
}
return nil
}
// SetInnerHtml sets the innerHtml property value. The HTML string in text web part.
func (m *TextWebPart) SetInnerHtml(value *string)() {
m.innerHtml = value
func (m *TextWebPart) SetInnerHtml(value *string) {
m.innerHtml = value
}

View File

@ -1,13 +1,13 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// TextWebPartable
type TextWebPartable interface {
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
WebPartable
GetInnerHtml()(*string)
SetInnerHtml(value *string)()
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
WebPartable
GetInnerHtml() *string
SetInnerHtml(value *string)
}

View File

@ -1,43 +1,45 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the remove method.
type TitleAreaLayoutType int
const (
IMAGEANDTITLE_TITLEAREALAYOUTTYPE TitleAreaLayoutType = iota
PLAIN_TITLEAREALAYOUTTYPE
COLORBLOCK_TITLEAREALAYOUTTYPE
OVERLAP_TITLEAREALAYOUTTYPE
UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE
IMAGEANDTITLE_TITLEAREALAYOUTTYPE TitleAreaLayoutType = iota
PLAIN_TITLEAREALAYOUTTYPE
COLORBLOCK_TITLEAREALAYOUTTYPE
OVERLAP_TITLEAREALAYOUTTYPE
UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE
)
func (i TitleAreaLayoutType) String() string {
return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i]
return []string{"imageAndTitle", "plain", "colorBlock", "overlap", "unknownFutureValue"}[i]
}
func ParseTitleAreaLayoutType(v string) (interface{}, error) {
result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE
switch v {
case "imageAndTitle":
result = IMAGEANDTITLE_TITLEAREALAYOUTTYPE
case "plain":
result = PLAIN_TITLEAREALAYOUTTYPE
case "colorBlock":
result = COLORBLOCK_TITLEAREALAYOUTTYPE
case "overlap":
result = OVERLAP_TITLEAREALAYOUTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE
default:
return 0, errors.New("Unknown TitleAreaLayoutType value: " + v)
}
return &result, nil
result := IMAGEANDTITLE_TITLEAREALAYOUTTYPE
switch v {
case "imageAndTitle":
result = IMAGEANDTITLE_TITLEAREALAYOUTTYPE
case "plain":
result = PLAIN_TITLEAREALAYOUTTYPE
case "colorBlock":
result = COLORBLOCK_TITLEAREALAYOUTTYPE
case "overlap":
result = OVERLAP_TITLEAREALAYOUTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_TITLEAREALAYOUTTYPE
default:
return 0, errors.New("Unknown TitleAreaLayoutType value: " + v)
}
return &result, nil
}
func SerializeTitleAreaLayoutType(values []TitleAreaLayoutType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -1,37 +1,39 @@
package models
import (
"errors"
"errors"
)
// Provides operations to call the remove method.
type TitleAreaTextAlignmentType int
const (
LEFT_TITLEAREATEXTALIGNMENTTYPE TitleAreaTextAlignmentType = iota
CENTER_TITLEAREATEXTALIGNMENTTYPE
UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE
LEFT_TITLEAREATEXTALIGNMENTTYPE TitleAreaTextAlignmentType = iota
CENTER_TITLEAREATEXTALIGNMENTTYPE
UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE
)
func (i TitleAreaTextAlignmentType) String() string {
return []string{"left", "center", "unknownFutureValue"}[i]
return []string{"left", "center", "unknownFutureValue"}[i]
}
func ParseTitleAreaTextAlignmentType(v string) (interface{}, error) {
result := LEFT_TITLEAREATEXTALIGNMENTTYPE
switch v {
case "left":
result = LEFT_TITLEAREATEXTALIGNMENTTYPE
case "center":
result = CENTER_TITLEAREATEXTALIGNMENTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE
default:
return 0, errors.New("Unknown TitleAreaTextAlignmentType value: " + v)
}
return &result, nil
result := LEFT_TITLEAREATEXTALIGNMENTTYPE
switch v {
case "left":
result = LEFT_TITLEAREATEXTALIGNMENTTYPE
case "center":
result = CENTER_TITLEAREATEXTALIGNMENTTYPE
case "unknownFutureValue":
result = UNKNOWNFUTUREVALUE_TITLEAREATEXTALIGNMENTTYPE
default:
return 0, errors.New("Unknown TitleAreaTextAlignmentType value: " + v)
}
return &result, nil
}
func SerializeTitleAreaTextAlignmentType(values []TitleAreaTextAlignmentType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}

View File

@ -1,175 +1,190 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// WebPartPosition
type WebPartPosition struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Indicates the identifier of the column where the web part is located.
columnId *float64
// Indicates the horizontal section where the web part is located.
horizontalSectionId *float64
// Indicates whether the web part is located in the vertical section.
isInVerticalSection *bool
// The OdataType property
odataType *string
// Index of the current web part. Represents the order of the web part in this column or section.
webPartIndex *float64
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Indicates the identifier of the column where the web part is located.
columnId *float64
// Indicates the horizontal section where the web part is located.
horizontalSectionId *float64
// Indicates whether the web part is located in the vertical section.
isInVerticalSection *bool
// The OdataType property
odataType *string
// Index of the current web part. Represents the order of the web part in this column or section.
webPartIndex *float64
}
// NewWebPartPosition instantiates a new webPartPosition and sets the default values.
func NewWebPartPosition()(*WebPartPosition) {
m := &WebPartPosition{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
func NewWebPartPosition() *WebPartPosition {
m := &WebPartPosition{}
m.SetAdditionalData(make(map[string]interface{}))
return m
}
// CreateWebPartPositionFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateWebPartPositionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewWebPartPosition(), nil
func CreateWebPartPositionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) (i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewWebPartPosition(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *WebPartPosition) GetAdditionalData()(map[string]interface{}) {
return m.additionalData
func (m *WebPartPosition) GetAdditionalData() map[string]interface{} {
return m.additionalData
}
// GetColumnId gets the columnId property value. Indicates the identifier of the column where the web part is located.
func (m *WebPartPosition) GetColumnId()(*float64) {
return m.columnId
func (m *WebPartPosition) GetColumnId() *float64 {
return m.columnId
}
// GetFieldDeserializers the deserialization information for the current model
func (m *WebPartPosition) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["columnId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat64Value()
if err != nil {
return err
}
if val != nil {
m.SetColumnId(val)
}
return nil
}
res["horizontalSectionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat64Value()
if err != nil {
return err
}
if val != nil {
m.SetHorizontalSectionId(val)
}
return nil
}
res["isInVerticalSection"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsInVerticalSection(val)
}
return nil
}
res["@odata.type"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["webPartIndex"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat64Value()
if err != nil {
return err
}
if val != nil {
m.SetWebPartIndex(val)
}
return nil
}
return res
func (m *WebPartPosition) GetFieldDeserializers() map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error)
res["columnId"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat64Value()
if err != nil {
return err
}
if val != nil {
m.SetColumnId(val)
}
return nil
}
res["horizontalSectionId"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat64Value()
if err != nil {
return err
}
if val != nil {
m.SetHorizontalSectionId(val)
}
return nil
}
res["isInVerticalSection"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsInVerticalSection(val)
}
return nil
}
res["@odata.type"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOdataType(val)
}
return nil
}
res["webPartIndex"] = func(n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat64Value()
if err != nil {
return err
}
if val != nil {
m.SetWebPartIndex(val)
}
return nil
}
return res
}
// GetHorizontalSectionId gets the horizontalSectionId property value. Indicates the horizontal section where the web part is located.
func (m *WebPartPosition) GetHorizontalSectionId()(*float64) {
return m.horizontalSectionId
func (m *WebPartPosition) GetHorizontalSectionId() *float64 {
return m.horizontalSectionId
}
// GetIsInVerticalSection gets the isInVerticalSection property value. Indicates whether the web part is located in the vertical section.
func (m *WebPartPosition) GetIsInVerticalSection()(*bool) {
return m.isInVerticalSection
func (m *WebPartPosition) GetIsInVerticalSection() *bool {
return m.isInVerticalSection
}
// GetOdataType gets the @odata.type property value. The OdataType property
func (m *WebPartPosition) GetOdataType()(*string) {
return m.odataType
func (m *WebPartPosition) GetOdataType() *string {
return m.odataType
}
// GetWebPartIndex gets the webPartIndex property value. Index of the current web part. Represents the order of the web part in this column or section.
func (m *WebPartPosition) GetWebPartIndex()(*float64) {
return m.webPartIndex
func (m *WebPartPosition) GetWebPartIndex() *float64 {
return m.webPartIndex
}
// Serialize serializes information the current object
func (m *WebPartPosition) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteFloat64Value("columnId", m.GetColumnId())
if err != nil {
return err
}
}
{
err := writer.WriteFloat64Value("horizontalSectionId", m.GetHorizontalSectionId())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("isInVerticalSection", m.GetIsInVerticalSection())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteFloat64Value("webPartIndex", m.GetWebPartIndex())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
func (m *WebPartPosition) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter) error {
{
err := writer.WriteFloat64Value("columnId", m.GetColumnId())
if err != nil {
return err
}
}
{
err := writer.WriteFloat64Value("horizontalSectionId", m.GetHorizontalSectionId())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("isInVerticalSection", m.GetIsInVerticalSection())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("@odata.type", m.GetOdataType())
if err != nil {
return err
}
}
{
err := writer.WriteFloat64Value("webPartIndex", m.GetWebPartIndex())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *WebPartPosition) SetAdditionalData(value map[string]interface{})() {
m.additionalData = value
func (m *WebPartPosition) SetAdditionalData(value map[string]interface{}) {
m.additionalData = value
}
// SetColumnId sets the columnId property value. Indicates the identifier of the column where the web part is located.
func (m *WebPartPosition) SetColumnId(value *float64)() {
m.columnId = value
func (m *WebPartPosition) SetColumnId(value *float64) {
m.columnId = value
}
// SetHorizontalSectionId sets the horizontalSectionId property value. Indicates the horizontal section where the web part is located.
func (m *WebPartPosition) SetHorizontalSectionId(value *float64)() {
m.horizontalSectionId = value
func (m *WebPartPosition) SetHorizontalSectionId(value *float64) {
m.horizontalSectionId = value
}
// SetIsInVerticalSection sets the isInVerticalSection property value. Indicates whether the web part is located in the vertical section.
func (m *WebPartPosition) SetIsInVerticalSection(value *bool)() {
m.isInVerticalSection = value
func (m *WebPartPosition) SetIsInVerticalSection(value *bool) {
m.isInVerticalSection = value
}
// SetOdataType sets the @odata.type property value. The OdataType property
func (m *WebPartPosition) SetOdataType(value *string)() {
m.odataType = value
func (m *WebPartPosition) SetOdataType(value *string) {
m.odataType = value
}
// SetWebPartIndex sets the webPartIndex property value. Index of the current web part. Represents the order of the web part in this column or section.
func (m *WebPartPosition) SetWebPartIndex(value *float64)() {
m.webPartIndex = value
func (m *WebPartPosition) SetWebPartIndex(value *float64) {
m.webPartIndex = value
}

View File

@ -1,21 +1,21 @@
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// WebPartPositionable
type WebPartPositionable interface {
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetColumnId()(*float64)
GetHorizontalSectionId()(*float64)
GetIsInVerticalSection()(*bool)
GetOdataType()(*string)
GetWebPartIndex()(*float64)
SetColumnId(value *float64)()
SetHorizontalSectionId(value *float64)()
SetIsInVerticalSection(value *bool)()
SetOdataType(value *string)()
SetWebPartIndex(value *float64)()
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.AdditionalDataHolder
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable
GetColumnId() *float64
GetHorizontalSectionId() *float64
GetIsInVerticalSection() *bool
GetOdataType() *string
GetWebPartIndex() *float64
SetColumnId(value *float64)
SetHorizontalSectionId(value *float64)
SetIsInVerticalSection(value *bool)
SetOdataType(value *string)
SetWebPartIndex(value *float64)
}

View File

@ -3,9 +3,10 @@ package sites
import (
"context"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
)
// ItemPagesItemWebPartsItemGetPositionOfWebPartRequestBuilder provides operations to call the getPositionOfWebPart method.

View File

@ -3,9 +3,10 @@ package sites
import (
"context"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
)
// ItemPagesItemWebPartsRequestBuilder provides operations to manage the webParts property of the microsoft.graph.sitePage entity.

View File

@ -3,10 +3,10 @@ package sites
import (
"context"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
)
// ItemPagesItemWebPartsWebPartItemRequestBuilder provides operations to manage the webParts property of the microsoft.graph.sitePage entity.

View File

@ -3,9 +3,10 @@ package sites
import (
"context"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
i7ad325c11fbf3db4d761c429267362d8b24daa1eda0081f914ebc3cdc85181a0 "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
ifda19816f54f079134d70c11e75d6b26799300cf72079e282f1d3bb9a6750354 "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
)
// ItemPagesRequestBuilder provides operations to manage the pages property of the microsoft.graph.site entity.

View File

@ -17,6 +17,7 @@ import (
// ---------------------------------------------------------------------------
const (
errCodeActivityLimitReached = "activityLimitReached"
errCodeItemNotFound = "ErrorItemNotFound"
errCodeEmailFolderNotFound = "ErrorSyncFolderNotFound"
errCodeResyncRequired = "ResyncRequired"
@ -31,8 +32,10 @@ var (
// normally the graph client will catch this for us, but in case we
// run our own client Do(), we need to translate it to a timeout type
// failure locally.
Err429TooManyRequests = errors.New("429 too many requests")
Err503ServiceUnavailable = errors.New("503 Service Unavailable")
Err429TooManyRequests = errors.New("429 too many requests")
Err503ServiceUnavailable = errors.New("503 Service Unavailable")
Err504GatewayTimeout = errors.New("504 Gateway Timeout")
Err500InternalServerError = errors.New("500 Internal Server Error")
)
// The folder or item was deleted between the time we identified
@ -113,6 +116,10 @@ func IsErrThrottled(err error) bool {
return true
}
if hasErrorCode(err, errCodeActivityLimitReached) {
return true
}
e := ErrThrottled{}
return errors.As(err, &e)
@ -135,21 +142,18 @@ func IsErrUnauthorized(err error) bool {
return errors.As(err, &e)
}
type ErrServiceUnavailable struct {
type ErrInternalServerError struct {
common.Err
}
func IsSericeUnavailable(err error) bool {
if errors.Is(err, Err503ServiceUnavailable) {
func IsInternalServerError(err error) bool {
if errors.Is(err, Err500InternalServerError) {
return true
}
e := ErrUnauthorized{}
if errors.As(err, &e) {
return true
}
e := ErrInternalServerError{}
return true
return errors.As(err, &e)
}
// ---------------------------------------------------------------------------

View File

@ -0,0 +1,248 @@
package graph
import (
"context"
"testing"
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/common"
)
type GraphErrorsUnitSuite struct {
suite.Suite
}
func TestGraphErrorsUnitSuite(t *testing.T) {
suite.Run(t, new(GraphErrorsUnitSuite))
}
func odErr(code string) *odataerrors.ODataError {
odErr := &odataerrors.ODataError{}
merr := odataerrors.MainError{}
merr.SetCode(&code)
odErr.SetError(&merr)
return odErr
}
func (suite *GraphErrorsUnitSuite) TestIsErrDeletedInFlight() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "as",
err: ErrDeletedInFlight{Err: *common.EncapsulateError(assert.AnError)},
expect: assert.True,
},
{
name: "non-matching oDataErr",
err: odErr("fnords"),
expect: assert.False,
},
{
name: "not-found oDataErr",
err: odErr(errCodeItemNotFound),
expect: assert.True,
},
{
name: "sync-not-found oDataErr",
err: odErr(errCodeSyncFolderNotFound),
expect: assert.True,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.expect(t, IsErrDeletedInFlight(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrInvalidDelta() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "as",
err: ErrInvalidDelta{Err: *common.EncapsulateError(assert.AnError)},
expect: assert.True,
},
{
name: "non-matching oDataErr",
err: odErr("fnords"),
expect: assert.False,
},
{
name: "resync-required oDataErr",
err: odErr(errCodeResyncRequired),
expect: assert.True,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.expect(t, IsErrInvalidDelta(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrTimeout() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "as",
err: ErrTimeout{Err: *common.EncapsulateError(assert.AnError)},
expect: assert.True,
},
{
name: "context deadline",
err: context.DeadlineExceeded,
expect: assert.True,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.expect(t, IsErrTimeout(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrThrottled() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "as",
err: ErrThrottled{Err: *common.EncapsulateError(assert.AnError)},
expect: assert.True,
},
{
name: "is429",
err: Err429TooManyRequests,
expect: assert.True,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.expect(t, IsErrThrottled(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsErrUnauthorized() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "as",
err: ErrUnauthorized{Err: *common.EncapsulateError(assert.AnError)},
expect: assert.True,
},
{
name: "is429",
err: Err401Unauthorized,
expect: assert.True,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.expect(t, IsErrUnauthorized(test.err))
})
}
}
func (suite *GraphErrorsUnitSuite) TestIsInternalServerError() {
table := []struct {
name string
err error
expect assert.BoolAssertionFunc
}{
{
name: "nil",
err: nil,
expect: assert.False,
},
{
name: "non-matching",
err: assert.AnError,
expect: assert.False,
},
{
name: "as",
err: ErrInternalServerError{Err: *common.EncapsulateError(assert.AnError)},
expect: assert.True,
},
{
name: "is429",
err: Err500InternalServerError,
expect: assert.True,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.expect(t, IsInternalServerError(test.err))
})
}
}

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/microsoft/kiota-abstractions-go/serialization"
ka "github.com/microsoft/kiota-authentication-azure-go"
khttp "github.com/microsoft/kiota-http-go"
@ -22,6 +23,7 @@ import (
const (
logGraphRequestsEnvKey = "LOG_GRAPH_REQUESTS"
numberOfRetries = 3
)
// AllMetadataFileNames produces the standard set of filenames used to store graph
@ -149,7 +151,7 @@ func HTTPClient(opts ...option) *http.Client {
middlewares := msgraphgocore.GetDefaultMiddlewaresWithOptions(&clientOptions)
middlewares = append(middlewares, &LoggingMiddleware{})
httpClient := msgraphgocore.GetDefaultClient(&clientOptions, middlewares...)
httpClient.Timeout = time.Second * 90
httpClient.Timeout = time.Minute * 3
(&clientConfig{}).
populate(opts...).
@ -250,7 +252,6 @@ func (handler *LoggingMiddleware) Intercept(
respDump, _ := httputil.DumpResponse(resp, false)
metadata := []any{
"idx", middlewareIndex,
"method", req.Method,
"status", resp.Status,
"statusCode", resp.StatusCode,
@ -273,7 +274,6 @@ func (handler *LoggingMiddleware) Intercept(
respDump, _ := httputil.DumpResponse(resp, true)
metadata := []any{
"idx", middlewareIndex,
"method", req.Method,
"status", resp.Status,
"statusCode", resp.StatusCode,
@ -296,3 +296,26 @@ func (handler *LoggingMiddleware) Intercept(
return resp, err
}
// Run a function with retries
func RunWithRetry(run func() error) error {
var err error
for i := 0; i < numberOfRetries; i++ {
err = run()
if err == nil {
return nil
}
// only retry on timeouts and 500-internal-errors.
if !(IsErrTimeout(err) || IsInternalServerError(err)) {
break
}
if i < numberOfRetries {
time.Sleep(time.Duration(3*(i+2)) * time.Second)
}
}
return support.ConnectorStackErrorTraceWrap(err, "maximum retries or unretryable")
}

View File

@ -53,7 +53,7 @@ func (suite *GraphUnitSuite) TestHTTPClient() {
name: "no options",
opts: []option{},
check: func(t *testing.T, c *http.Client) {
assert.Equal(t, 90*time.Second, c.Timeout, "default timeout")
assert.Equal(t, 3*time.Minute, c.Timeout, "default timeout")
},
},
{

View File

@ -266,9 +266,11 @@ func (gc *GraphConnector) UnionSiteIDsAndWebURLs(ctx context.Context, ids, urls
// SideEffect: gc.status is updated at the completion of operation
func (gc *GraphConnector) RestoreDataCollections(
ctx context.Context,
backupVersion int,
acct account.Account,
selector selectors.Selector,
dest control.RestoreDestination,
opts control.Options,
dcs []data.Collection,
) (*details.Details, error) {
ctx, end := D.Span(ctx, "connector:restore")
@ -289,9 +291,9 @@ func (gc *GraphConnector) RestoreDataCollections(
case selectors.ServiceExchange:
status, err = exchange.RestoreExchangeDataCollections(ctx, creds, gc.Service, dest, dcs, deets)
case selectors.ServiceOneDrive:
status, err = onedrive.RestoreCollections(ctx, gc.Service, dest, dcs, deets)
status, err = onedrive.RestoreCollections(ctx, backupVersion, gc.Service, dest, opts, dcs, deets)
case selectors.ServiceSharePoint:
status, err = sharepoint.RestoreCollections(ctx, gc.Service, dest, dcs, deets)
status, err = sharepoint.RestoreCollections(ctx, backupVersion, gc.Service, dest, dcs, deets)
default:
err = errors.Errorf("restore data from service %s not supported", selector.Service.String())
}

View File

@ -2,9 +2,11 @@ package connector
import (
"context"
"encoding/json"
"io"
"net/http"
"reflect"
"strings"
"testing"
"github.com/microsoftgraph/msgraph-sdk-go/models"
@ -14,6 +16,7 @@ import (
"golang.org/x/exp/maps"
"github.com/alcionai/corso/src/internal/connector/mockconnector"
"github.com/alcionai/corso/src/internal/connector/onedrive"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/tester"
@ -169,6 +172,14 @@ type restoreBackupInfo struct {
resource resource
}
type restoreBackupInfoMultiVersion struct {
name string
service path.ServiceType
collectionsLatest []colInfo
collectionsPrevious []colInfo
resource resource
}
func attachmentEqual(
expected models.Attachmentable,
got models.Attachmentable,
@ -645,21 +656,52 @@ func compareOneDriveItem(
t *testing.T,
expected map[string][]byte,
item data.Stream,
restorePermissions bool,
) {
name := item.UUID()
expectedData := expected[item.UUID()]
if !assert.NotNil(t, expectedData, "unexpected file with name %s", item.UUID) {
if !assert.NotNil(t, expectedData, "unexpected file with name %s", item.UUID()) {
return
}
// OneDrive items are just byte buffers of the data. Nothing special to
// interpret. May need to do chunked comparisons in the future if we test
// large item equality.
buf, err := io.ReadAll(item.ToReader())
if !assert.NoError(t, err) {
return
}
assert.Equal(t, expectedData, buf)
if !strings.HasSuffix(name, onedrive.MetaFileSuffix) && !strings.HasSuffix(name, onedrive.DirMetaFileSuffix) {
// OneDrive data items are just byte buffers of the data. Nothing special to
// interpret. May need to do chunked comparisons in the future if we test
// large item equality.
assert.Equal(t, expectedData, buf)
return
}
var (
itemMeta onedrive.Metadata
expectedMeta onedrive.Metadata
)
err = json.Unmarshal(buf, &itemMeta)
assert.Nil(t, err)
err = json.Unmarshal(expectedData, &expectedMeta)
assert.Nil(t, err)
if !restorePermissions {
assert.Equal(t, 0, len(itemMeta.Permissions))
return
}
assert.Equal(t, len(expectedMeta.Permissions), len(itemMeta.Permissions), "number of permissions after restore")
// FIXME(meain): The permissions before and after might not be in the same order.
for i, p := range expectedMeta.Permissions {
assert.Equal(t, p.Email, itemMeta.Permissions[i].Email)
assert.Equal(t, p.Roles, itemMeta.Permissions[i].Roles)
assert.Equal(t, p.Expiration, itemMeta.Permissions[i].Expiration)
}
}
func compareItem(
@ -668,6 +710,7 @@ func compareItem(
service path.ServiceType,
category path.CategoryType,
item data.Stream,
restorePermissions bool,
) {
if mt, ok := item.(data.StreamModTime); ok {
assert.NotZero(t, mt.ModTime())
@ -687,7 +730,7 @@ func compareItem(
}
case path.OneDriveService:
compareOneDriveItem(t, expected, item)
compareOneDriveItem(t, expected, item, restorePermissions)
default:
assert.FailNowf(t, "unexpected service: %s", service.String())
@ -720,6 +763,7 @@ func checkCollections(
expectedItems int,
expected map[string]map[string][]byte,
got []data.Collection,
restorePermissions bool,
) int {
collectionsWithItems := []data.Collection{}
@ -754,7 +798,7 @@ func checkCollections(
continue
}
compareItem(t, expectedColData, service, category, item)
compareItem(t, expectedColData, service, category, item, restorePermissions)
}
if gotItems != startingItems {
@ -906,10 +950,63 @@ func collectionsForInfo(
tenant, user string,
dest control.RestoreDestination,
allInfo []colInfo,
) (int, []data.Collection, map[string]map[string][]byte) {
) (int, int, []data.Collection, map[string]map[string][]byte) {
collections := make([]data.Collection, 0, len(allInfo))
expectedData := make(map[string]map[string][]byte, len(allInfo))
totalItems := 0
kopiaEntries := 0
for _, info := range allInfo {
pth := mustToDataLayerPath(
t,
service,
tenant,
user,
info.category,
info.pathElements,
false,
)
c := mockconnector.NewMockExchangeCollection(pth, len(info.items))
baseDestPath := backupOutputPathFromRestore(t, dest, pth)
baseExpected := expectedData[baseDestPath.String()]
if baseExpected == nil {
expectedData[baseDestPath.String()] = make(map[string][]byte, len(info.items))
baseExpected = expectedData[baseDestPath.String()]
}
for i := 0; i < len(info.items); i++ {
c.Names[i] = info.items[i].name
c.Data[i] = info.items[i].data
baseExpected[info.items[i].lookupKey] = info.items[i].data
// We do not count metadata files against item count
if service != path.OneDriveService ||
(service == path.OneDriveService &&
strings.HasSuffix(info.items[i].name, onedrive.DataFileSuffix)) {
totalItems++
}
}
collections = append(collections, c)
kopiaEntries += len(info.items)
}
return totalItems, kopiaEntries, collections, expectedData
}
func collectionsForInfoVersion0(
t *testing.T,
service path.ServiceType,
tenant, user string,
dest control.RestoreDestination,
allInfo []colInfo,
) (int, int, []data.Collection, map[string]map[string][]byte) {
collections := make([]data.Collection, 0, len(allInfo))
expectedData := make(map[string]map[string][]byte, len(allInfo))
totalItems := 0
kopiaEntries := 0
for _, info := range allInfo {
pth := mustToDataLayerPath(
@ -939,9 +1036,10 @@ func collectionsForInfo(
collections = append(collections, c)
totalItems += len(info.items)
kopiaEntries += len(info.items)
}
return totalItems, collections, expectedData
return totalItems, kopiaEntries, collections, expectedData
}
//nolint:deadcode

File diff suppressed because it is too large Load Diff

View File

@ -202,6 +202,15 @@ func (suite *MockExchangeDataSuite) TestMockByteHydration() {
return err
},
},
{
name: "SharePoint: Page",
transformation: func(t *testing.T) error {
bytes := mockconnector.GetMockPage(subject)
_, err := support.CreatePageFromBytes(bytes)
return err
},
},
}
for _, test := range tests {

View File

@ -336,3 +336,212 @@ func GetMockEventMessageRequest(subject string) []byte {
return []byte(message)
}
func GetMockMessageWithItemAttachmentEvent(subject string) []byte {
//nolint:lll
message := "{\"id\":\"AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThMAAA=\",\"@odata.type\":\"#microsoft.graph.message\"," +
"\"@odata.etag\":\"W/\\\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK3BH\\\"\",\"@odata.context\":\"https://graph.microsoft.com/v1.0/$metadata#users('dustina%408qzvrj.onmicrosoft.com')/messages/$entity\",\"categories\":[]," +
"\"changeKey\":\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK3BH\",\"createdDateTime\":\"2023-02-01T13:48:43Z\",\"lastModifiedDateTime\":\"2023-02-01T18:27:03Z\"," +
"\"attachments\":[{\"id\":\"AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThMAAABEgAQAKHxTL6mNCZPo71dbwrfKYM=\"," +
"\"@odata.type\":\"#microsoft.graph.itemAttachment\",\"isInline\":false,\"lastModifiedDateTime\":\"2023-02-01T13:52:56Z\",\"name\":\"Holidayevent\",\"size\":2059,\"item\":{\"id\":\"\",\"@odata.type\":\"#microsoft.graph.event\"," +
"\"createdDateTime\":\"2023-02-01T13:52:56Z\",\"lastModifiedDateTime\":\"2023-02-01T13:52:56Z\",\"body\":{\"content\":\"<html><head>\\r\\n<metahttp-equiv=\\\"Content-Type\\\"content=\\\"text/html;charset=utf-8\\\"></head><body>Let'slookforfunding!</body></html>\"," +
"\"contentType\":\"html\"},\"end\":{\"dateTime\":\"2016-12-02T19:00:00.0000000Z\",\"timeZone\":\"UTC\"}," +
"\"hasAttachments\":false,\"isAllDay\":false,\"isCancelled\":false,\"isDraft\":true,\"isOnlineMeeting\":false,\"isOrganizer\":true,\"isReminderOn\":false,\"organizer\":{\"emailAddress\":{\"address\":\"" + defaultMessageFrom + "\",\"name\":\"" + defaultAlias + "\"}}," +
"\"originalEndTimeZone\":\"tzone://Microsoft/Utc\",\"originalStartTimeZone\":\"tzone://Microsoft/Utc\",\"reminderMinutesBeforeStart\":0,\"responseRequested\":true,\"start\":{\"dateTime\":\"2016-12-02T18:00:00.0000000Z\",\"timeZone\":\"UTC\"}," +
"\"subject\":\"Discussgiftsforchildren\",\"type\":\"singleInstance\"}}],\"bccRecipients\":[],\"body\":{\"content\":\"<html><head>\\r\\n<metahttp-equiv=\\\"Content-Type\\\"content=\\\"text/html;charset=utf-8\\\"><styletype=\\\"text/css\\\"style=\\\"display:none\\\">\\r\\n<!--\\r\\np\\r\\n\\t{margin-top:0;\\r\\n\\tmargin-bottom:0}\\r\\n-->\\r\\n</style></head><bodydir=\\\"ltr\\\"><divclass=\\\"elementToProof\\\"style=\\\"font-family:Calibri,Arial,Helvetica,sans-serif;font-size:12pt;color:rgb(0,0,0);background-color:rgb(255,255,255)\\\">Lookingtodothis </div></body></html>\",\"contentType\":\"html\"}," +
"\"bodyPreview\":\"Lookingtodothis\",\"ccRecipients\":[],\"conversationId\":\"AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQADGvj5ACBMdGpESX4xSOxCo=\",\"conversationIndex\":\"AQHZNkPmMa+PkAIEx0akRJfjFI7EKg==\",\"flag\":{\"flagStatus\":\"notFlagged\"}," +
"\"from\":{\"emailAddress\":{\"address\":\"" + defaultMessageFrom + "\",\"name\":\"" + defaultAlias + "\"}},\"hasAttachments\":true,\"importance\":\"normal\",\"inferenceClassification\":\"focused\"," +
"\"internetMessageId\":\"<SJ0PR17MB56220B4F6A443386A11D5154C3D19@SJ0PR17MB5622.namprd17.prod.outlook.com>\",\"isDeliveryReceiptRequested\":false,\"isDraft\":false,\"isRead\":true,\"isReadReceiptRequested\":false," +
"\"parentFolderId\":\"AQMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4ADVkZWQwNmNlMTgALgAAAw_9XBStqZdPuOVIalVTz7sBAHzBhzS2FNNNiXdGkRghSr4AAAIBDAAAAA==\",\"receivedDateTime\":\"2023-02-01T13:48:47Z\",\"replyTo\":[]," +
"\"sender\":{\"emailAddress\":{\"address\":\"" + defaultMessageSender + "\",\"name\":\"" + defaultAlias + "\"}},\"sentDateTime\":\"2023-02-01T13:48:46Z\"," +
"\"subject\":\"" + subject + "\",\"toRecipients\":[{\"emailAddress\":{\"address\":\"" + defaultMessageTo + "\",\"name\":\"" + defaultAlias + "\"}}]," +
"\"webLink\":\"https://outlook.office365.com/owa/?ItemID=AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8%2B7BwB8wYc0thTTTYl3RpEYIUq%2BAAAAAAEMAAB8wYc0thTTTYl3RpEYIUq%2BAADFfThMAAA%3D&exvsurl=1&viewmodel=ReadMessageItem\"}"
return []byte(message)
}
func GetMockMessageWithNestedItemAttachmentEvent(subject string) []byte {
//nolint:lll
// Order of fields:
// 1. subject
// 2. alias
// 3. sender address
// 4. from address
// 5. toRecipients email address
template := `{
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages(attachments())/$entity",
"@odata.etag": "W/\"CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK782\"",
"id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThSAAA=",
"createdDateTime": "2023-02-02T21:38:27Z",
"lastModifiedDateTime": "2023-02-02T22:42:49Z",
"changeKey": "CQAAABYAAAB8wYc0thTTTYl3RpEYIUq+AADFK782",
"categories": [],
"receivedDateTime": "2023-02-02T21:38:27Z",
"sentDateTime": "2023-02-02T21:38:24Z",
"hasAttachments": true,
"internetMessageId": "<SJ0PR17MB562287BE29A86751D6E77FE5C3D69@SJ0PR17MB5622.namprd17.prod.outlook.com>",
"subject": "%[1]v",
"bodyPreview": "Dustin,\r\n\r\nI'm here to see if we are still able to discover our object.",
"importance": "normal",
"parentFolderId": "AQMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4ADVkZWQwNmNlMTgALgAAAw_9XBStqZdPuOVIalVTz7sBAHzBhzS2FNNNiXdGkRghSr4AAAIBDAAAAA==",
"conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAB13OyMdkNJJqEaIrGi3Yjc=",
"conversationIndex": "AQHZN06dHXc7Ix2Q0kmoRoisaLdiNw==",
"isDeliveryReceiptRequested": false,
"isReadReceiptRequested": false,
"isRead": false,
"isDraft": false,
"webLink": "https://outlook.office365.com/owa/?ItemID=AAMkAGQ1NzTruncated",
"inferenceClassification": "focused",
"body": {
"contentType": "html",
"content": "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><style type=\"text/css\" style=\"display:none\">\r\n<!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n-->\r\n</style></head><body dir=\"ltr\"><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Dustin,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">I'm here to see if we are still able to discover our object.&nbsp;</div></body></html>"
},
"sender": {
"emailAddress": {
"name": "%[2]s",
"address": "%[3]s"
}
},
"from": {
"emailAddress": {
"name": "%[2]s",
"address": "%[4]s"
}
},
"toRecipients": [
{
"emailAddress": {
"name": "%[2]s",
"address": "%[5]s"
}
}
],
"ccRecipients": [],
"bccRecipients": [],
"replyTo": [],
"flag": {
"flagStatus": "notFlagged"
},
"attachments": [
{
"@odata.type": "#microsoft.graph.itemAttachment",
"id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThSAAABEgAQAIyAgT1ZccRCjKKyF7VZ3dA=",
"lastModifiedDateTime": "2023-02-02T21:38:27Z",
"name": "Mail Item Attachment",
"contentType": null,
"size": 5362,
"isInline": false,
"item@odata.associationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')/$ref",
"item@odata.navigationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/messages('')",
"item": {
"@odata.type": "#microsoft.graph.message",
"id": "",
"createdDateTime": "2023-02-02T21:38:27Z",
"lastModifiedDateTime": "2023-02-02T21:38:27Z",
"receivedDateTime": "2023-02-01T13:48:47Z",
"sentDateTime": "2023-02-01T13:48:46Z",
"hasAttachments": true,
"internetMessageId": "<SJ0PR17MB56220B4F6A443386A11D5154C3D19@SJ0PR17MB5622.namprd17.prod.outlook.com>",
"subject": "Mail Item Attachment",
"bodyPreview": "Lookingtodothis",
"importance": "normal",
"conversationId": "AAQkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOAAQAMNK0NU7Kx5GhAaHdzhfSRU=",
"conversationIndex": "AQHZN02pw0rQ1TsrHkaEBod3OF9JFQ==",
"isDeliveryReceiptRequested": false,
"isReadReceiptRequested": false,
"isRead": true,
"isDraft": false,
"webLink": "https://outlook.office365.com/owa/?AttachmentItemID=AAMkAGQ1NzViZTdhLTEwMTM",
"body": {
"contentType": "html",
"content": "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><metahttp-equiv=\"Content-Type\"content=\"text html;charset=\"utf-8&quot;\"><styletype=\"text css?style=\"display:none\"><!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n--><bodydir=\"ltr\"><divclass=\"elementToProof\"style=\"font-family:Calibri,Arial,Helvetica,sans-serif;font-size:12pt;color:rgb(0,0,0);background-color:rgb(255,255,255)\"></head><body>Lookingtodothis&nbsp; <div></div></body></html>"
},
"sender": {
"emailAddress": {
"name": "A Stranger",
"address": "foobar@8qzvrj.onmicrosoft.com"
}
},
"from": {
"emailAddress": {
"name": "A Stranger",
"address": "foobar@8qzvrj.onmicrosoft.com"
}
},
"toRecipients": [
{
"emailAddress": {
"name": "Direct Report",
"address": "notAvailable@8qzvrj.onmicrosoft.com"
}
}
],
"flag": {
"flagStatus": "notFlagged"
},
"attachments": [
{
"@odata.type": "#microsoft.graph.itemAttachment",
"id": "AAMkAGQ1NzViZTdhLTEwMTMtNGJjNi05YWI2LTg4NWRlZDA2Y2UxOABGAAAAAAAPvVwUramXT7jlSGpVU8_7BwB8wYc0thTTTYl3RpEYIUq_AAAAAAEMAAB8wYc0thTTTYl3RpEYIUq_AADFfThSAAACEgAQAIyAgT1ZccRCjKKyF7VZ3dASABAAuYCb3N2YZ02RpJrZPzCBFQ==",
"lastModifiedDateTime": "2023-02-02T21:38:27Z",
"name": "Holidayevent",
"contentType": null,
"size": 2331,
"isInline": false,
"item@odata.associationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/events('')/$ref",
"item@odata.navigationLink": "https://graph.microsoft.com/v1.0/users('f435c656-f8b2-4d71-93c3-6e092f52a167')/events('')",
"item": {
"@odata.type": "#microsoft.graph.event",
"id": "",
"createdDateTime": "2023-02-02T21:38:27Z",
"lastModifiedDateTime": "2023-02-02T21:38:27Z",
"originalStartTimeZone": "tzone://Microsoft/Utc",
"originalEndTimeZone": "tzone://Microsoft/Utc",
"reminderMinutesBeforeStart": 0,
"isReminderOn": false,
"hasAttachments": false,
"subject": "Discuss Gifts for Children",
"isAllDay": false,
"isCancelled": false,
"isOrganizer": true,
"responseRequested": true,
"type": "singleInstance",
"isOnlineMeeting": false,
"isDraft": true,
"body": {
"contentType": "html",
"content": "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><metahttp-equiv=\"Content-Type\"content=\"text html;charset=\"utf-8&quot;\"></head><body>Let'slookforfunding! </body></html>"
},
"start": {
"dateTime": "2016-12-02T18:00:00.0000000Z",
"timeZone": "UTC"
},
"end": {
"dateTime": "2016-12-02T19:00:00.0000000Z",
"timeZone": "UTC"
},
"organizer": {
"emailAddress": {
"name": "Event Manager",
"address": "philonis@8qzvrj.onmicrosoft.com"
}
}
}
}
]
}
}
]
}`
message := fmt.Sprintf(
template,
subject,
defaultAlias,
defaultMessageSender,
defaultMessageFrom,
defaultMessageTo,
)
return []byte(message)
}

View File

@ -0,0 +1,25 @@
package mockconnector
// GetMockPage returns bytes for models.SitePageable object
// Title string changes of fields: name and title
func GetMockPage(title string) []byte {
fileName := title + ".aspx"
// Create Test Page
//nolint:lll
byteArray := []byte("{\"name\":\"" + fileName + "\",\"title\":\"" + title + "\",\"pageLayout\":\"article\",\"showComments\":true," +
"\"showRecommendedPages\":false,\"titleArea\":{\"enableGradientEffect\":true,\"imageWebUrl\":\"/_LAYOUTS/IMAGES/VISUALTEMPLATETITLEIMAGE.JPG\"," +
"\"layout\":\"colorBlock\",\"showAuthor\":true,\"showPublishedDate\":false,\"showTextBlockAboveTitle\":false,\"textAboveTitle\":\"TEXTABOVETITLE\"," +
"\"textAlignment\":\"left\",\"imageSourceType\":2,\"title\":\"sample1\"}," +
"\"canvasLayout\":{\"horizontalSections\":[{\"layout\":\"oneThirdRightColumn\",\"id\":\"1\",\"emphasis\":\"none\",\"columns\":[{\"id\":\"1\",\"width\":8," +
"\"webparts\":[{\"id\":\"6f9230af-2a98-4952-b205-9ede4f9ef548\",\"innerHtml\":\"<p><b>Hello!</b></p>\"}]},{\"id\":\"2\",\"width\":4," +
"\"webparts\":[{\"id\":\"73d07dde-3474-4545-badb-f28ba239e0e1\",\"webPartType\":\"d1d91016-032f-456d-98a4-721247c305e8\",\"data\":{\"dataVersion\":\"1.9\"," +
"\"description\":\"Showanimageonyourpage\",\"title\":\"Image\",\"properties\":{\"imageSourceType\":2,\"altText\":\"\",\"overlayText\":\"\"," +
"\"siteid\":\"0264cabe-6b92-450a-b162-b0c3d54fe5e8\",\"webid\":\"f3989670-cd37-4514-8ccb-0f7c2cbe5314\",\"listid\":\"bdb41041-eb06-474e-ac29-87093386bb14\"," +
"\"uniqueid\":\"d9f94b40-78ba-48d0-a39f-3cb23c2fe7eb\",\"imgWidth\":4288,\"imgHeight\":2848,\"fixAspectRatio\":false,\"captionText\":\"\",\"alignment\":\"Center\"}," +
"\"serverProcessedContent\":{\"imageSources\":[{\"key\":\"imageSource\",\"value\":\"/_LAYOUTS/IMAGES/VISUALTEMPLATEIMAGE1.JPG\"}]," +
"\"customMetadata\":[{\"key\":\"imageSource\",\"value\":{\"siteid\":\"0264cabe-6b92-450a-b162-b0c3d54fe5e8\",\"webid\":\"f3989670-cd37-4514-8ccb-0f7c2cbe5314\"," +
"\"listid\":\"bdb41041-eb06-474e-ac29-87093386bb14\",\"uniqueid\":\"d9f94b40-78ba-48d0-a39f-3cb23c2fe7eb\",\"width\":\"4288\",\"height\":\"2848\"}}]}}}]}]}]}}")
return byteArray
}

View File

@ -3,6 +3,7 @@ package api
import (
"context"
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models"
mssites "github.com/microsoftgraph/msgraph-sdk-go/sites"
msusers "github.com/microsoftgraph/msgraph-sdk-go/users"
@ -12,6 +13,75 @@ import (
"github.com/alcionai/corso/src/internal/connector/graph/api"
)
func getValues[T any](l api.PageLinker) ([]T, error) {
page, ok := l.(interface{ GetValue() []T })
if !ok {
return nil, errors.Errorf(
"response of type [%T] does not comply with GetValue() interface",
l,
)
}
return page.GetValue(), nil
}
// max we can do is 999
const pageSize = int32(999)
type driveItemPager struct {
gs graph.Servicer
builder *msdrives.ItemRootDeltaRequestBuilder
options *msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration
}
func NewItemPager(
gs graph.Servicer,
driveID, link string,
fields []string,
) *driveItemPager {
pageCount := pageSize
requestConfig := &msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration{
QueryParameters: &msdrives.ItemRootDeltaRequestBuilderGetQueryParameters{
Top: &pageCount,
Select: fields,
},
}
res := &driveItemPager{
gs: gs,
options: requestConfig,
builder: gs.Client().DrivesById(driveID).Root().Delta(),
}
if len(link) > 0 {
res.builder = msdrives.NewItemRootDeltaRequestBuilder(link, gs.Adapter())
}
return res
}
func (p *driveItemPager) GetPage(ctx context.Context) (api.DeltaPageLinker, error) {
var (
resp api.DeltaPageLinker
err error
)
err = graph.RunWithRetry(func() error {
resp, err = p.builder.Get(ctx, p.options)
return err
})
return resp, err
}
func (p *driveItemPager) SetNext(link string) {
p.builder = msdrives.NewItemRootDeltaRequestBuilder(link, p.gs.Adapter())
}
func (p *driveItemPager) ValuesIn(l api.DeltaPageLinker) ([]models.DriveItemable, error) {
return getValues[models.DriveItemable](l)
}
type userDrivePager struct {
gs graph.Servicer
builder *msusers.ItemDrivesRequestBuilder
@ -39,7 +109,17 @@ func NewUserDrivePager(
}
func (p *userDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
return p.builder.Get(ctx, p.options)
var (
resp api.PageLinker
err error
)
err = graph.RunWithRetry(func() error {
resp, err = p.builder.Get(ctx, p.options)
return err
})
return resp, err
}
func (p *userDrivePager) SetNext(link string) {
@ -47,15 +127,7 @@ func (p *userDrivePager) SetNext(link string) {
}
func (p *userDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
page, ok := l.(interface{ GetValue() []models.Driveable })
if !ok {
return nil, errors.Errorf(
"response of type [%T] does not comply with GetValue() interface",
l,
)
}
return page.GetValue(), nil
return getValues[models.Driveable](l)
}
type siteDrivePager struct {
@ -85,7 +157,17 @@ func NewSiteDrivePager(
}
func (p *siteDrivePager) GetPage(ctx context.Context) (api.PageLinker, error) {
return p.builder.Get(ctx, p.options)
var (
resp api.PageLinker
err error
)
err = graph.RunWithRetry(func() error {
resp, err = p.builder.Get(ctx, p.options)
return err
})
return resp, err
}
func (p *siteDrivePager) SetNext(link string) {
@ -93,13 +175,5 @@ func (p *siteDrivePager) SetNext(link string) {
}
func (p *siteDrivePager) ValuesIn(l api.PageLinker) ([]models.Driveable, error) {
page, ok := l.(interface{ GetValue() []models.Driveable })
if !ok {
return nil, errors.Errorf(
"response of type [%T] does not comply with GetValue() interface",
l,
)
}
return page.GetValue(), nil
return getValues[models.Driveable](l)
}

View File

@ -5,6 +5,7 @@ import (
"context"
"io"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
@ -34,6 +35,10 @@ const (
// Max number of retries to get doc from M365
// Seems to timeout at times because of multiple requests
maxRetries = 4 // 1 + 3 retries
MetaFileSuffix = ".meta"
DirMetaFileSuffix = ".dirmeta"
DataFileSuffix = ".data"
)
var (
@ -56,12 +61,13 @@ type Collection struct {
// M365 IDs of file items within this collection
driveItems map[string]models.DriveItemable
// M365 ID of the drive this collection was created from
driveID string
source driveSource
service graph.Servicer
statusUpdater support.StatusUpdater
itemReader itemReaderFunc
ctrl control.Options
driveID string
source driveSource
service graph.Servicer
statusUpdater support.StatusUpdater
itemReader itemReaderFunc
itemMetaReader itemMetaReaderFunc
ctrl control.Options
// should only be true if the old delta token expired
doNotMergeItems bool
@ -73,6 +79,15 @@ type itemReaderFunc func(
item models.DriveItemable,
) (itemInfo details.ItemInfo, itemData io.ReadCloser, err error)
// itemMetaReaderFunc returns a reader for the metadata of the
// specified item
type itemMetaReaderFunc func(
ctx context.Context,
service graph.Servicer,
driveID string,
item models.DriveItemable,
) (io.ReadCloser, int, error)
// NewCollection creates a Collection
func NewCollection(
itemClient *http.Client,
@ -101,6 +116,7 @@ func NewCollection(
c.itemReader = sharePointItemReader
default:
c.itemReader = oneDriveItemReader
c.itemMetaReader = oneDriveItemMetaReader
}
return c
@ -138,6 +154,21 @@ func (oc Collection) DoNotMergeItems() bool {
return oc.doNotMergeItems
}
// FilePermission is used to store permissions of a specific user to a
// OneDrive item.
type UserPermission struct {
ID string `json:"id,omitempty"`
Roles []string `json:"role,omitempty"`
Email string `json:"email,omitempty"`
Expiration *time.Time `json:"expiration,omitempty"`
}
// ItemMeta contains metadata about the Item. It gets stored in a
// separate file in kopia
type Metadata struct {
Permissions []UserPermission `json:"permissions,omitempty"`
}
// Item represents a single item retrieved from OneDrive
type Item struct {
id string
@ -173,18 +204,21 @@ func (od *Item) ModTime() time.Time {
// and uses the collection `itemReader` to read the item
func (oc *Collection) populateItems(ctx context.Context) {
var (
errs error
byteCount int64
itemsRead int64
wg sync.WaitGroup
m sync.Mutex
errs error
byteCount int64
itemsRead int64
dirsRead int64
itemsFound int64
dirsFound int64
wg sync.WaitGroup
m sync.Mutex
)
// Retrieve the OneDrive folder path to set later in
// `details.OneDriveInfo`
parentPathString, err := path.GetDriveFolderPath(oc.folderPath)
if err != nil {
oc.reportAsCompleted(ctx, 0, 0, err)
oc.reportAsCompleted(ctx, 0, 0, 0, err)
return
}
@ -205,16 +239,11 @@ func (oc *Collection) populateItems(ctx context.Context) {
m.Unlock()
}
for id, item := range oc.driveItems {
for _, item := range oc.driveItems {
if oc.ctrl.FailFast && errs != nil {
break
}
if item == nil {
errUpdater(id, errors.New("nil item"))
continue
}
semaphoreCh <- struct{}{}
wg.Add(1)
@ -223,13 +252,64 @@ func (oc *Collection) populateItems(ctx context.Context) {
defer wg.Done()
defer func() { <-semaphoreCh }()
// Read the item
var (
itemID = *item.GetId()
itemName = *item.GetName()
itemSize = *item.GetSize()
itemInfo details.ItemInfo
itemID = *item.GetId()
itemName = *item.GetName()
itemSize = *item.GetSize()
itemInfo details.ItemInfo
itemMeta io.ReadCloser
itemMetaSize int
metaSuffix string
err error
)
isFile := item.GetFile() != nil
if isFile {
atomic.AddInt64(&itemsFound, 1)
metaSuffix = MetaFileSuffix
} else {
atomic.AddInt64(&dirsFound, 1)
metaSuffix = DirMetaFileSuffix
}
if oc.source == OneDriveSource {
// Fetch metadata for the file
for i := 1; i <= maxRetries; i++ {
if !oc.ctrl.ToggleFeatures.EnablePermissionsBackup {
// We are still writing the metadata file but with
// empty permissions as we don't have a way to
// signify that the permissions was explicitly
// not added.
itemMeta = io.NopCloser(strings.NewReader("{}"))
itemMetaSize = 2
break
}
itemMeta, itemMetaSize, err = oc.itemMetaReader(ctx, oc.service, oc.driveID, item)
// retry on Timeout type errors, break otherwise.
if err == nil ||
!graph.IsErrTimeout(err) ||
!graph.IsInternalServerError(err) {
break
}
if i < maxRetries {
time.Sleep(1 * time.Second)
}
}
if err != nil {
errUpdater(*item.GetId(), errors.Wrap(err, "failed to get item permissions"))
return
}
}
switch oc.source {
case SharePointSource:
itemInfo.SharePoint = sharePointItemInfo(item, itemSize)
@ -239,101 +319,127 @@ func (oc *Collection) populateItems(ctx context.Context) {
itemInfo.OneDrive.ParentPath = parentPathString
}
// Construct a new lazy readCloser to feed to the collection consumer.
// This ensures that downloads won't be attempted unless that consumer
// attempts to read bytes. Assumption is that kopia will check things
// like file modtimes before attempting to read.
itemReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
// Read the item
var (
itemData io.ReadCloser
err error
)
if isFile {
dataSuffix := ""
if oc.source == OneDriveSource {
dataSuffix = DataFileSuffix
}
for i := 1; i <= maxRetries; i++ {
_, itemData, err = oc.itemReader(oc.itemClient, item)
if err == nil {
break
}
// Construct a new lazy readCloser to feed to the collection consumer.
// This ensures that downloads won't be attempted unless that consumer
// attempts to read bytes. Assumption is that kopia will check things
// like file modtimes before attempting to read.
itemReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
// Read the item
var (
itemData io.ReadCloser
err error
)
if graph.IsErrUnauthorized(err) {
// assume unauthorized requests are a sign of an expired
// jwt token, and that we've overrun the available window
// to download the actual file. Re-downloading the item
// will refresh that download url.
di, diErr := getDriveItem(ctx, oc.service, oc.driveID, itemID)
if diErr != nil {
err = errors.Wrap(diErr, "retrieving expired item")
for i := 1; i <= maxRetries; i++ {
_, itemData, err = oc.itemReader(oc.itemClient, item)
if err == nil {
break
}
item = di
if graph.IsErrUnauthorized(err) {
// assume unauthorized requests are a sign of an expired
// jwt token, and that we've overrun the available window
// to download the actual file. Re-downloading the item
// will refresh that download url.
di, diErr := getDriveItem(ctx, oc.service, oc.driveID, itemID)
if diErr != nil {
err = errors.Wrap(diErr, "retrieving expired item")
break
}
continue
item = di
} else if !graph.IsErrTimeout(err) && !graph.IsErrThrottled(err) && !graph.IsSericeUnavailable(err) {
// TODO: graphAPI will provides headers that state the duration to wait
// in order to succeed again. The one second sleep won't cut it here.
//
// for all non-timeout, non-unauth, non-throttling errors, do not retry
break
continue
} else if !graph.IsErrTimeout(err) &&
!graph.IsInternalServerError(err) {
// Don't retry for non-timeout, on-unauth, as
// we are already retrying it in the default
// retry middleware
break
}
if i < maxRetries {
time.Sleep(1 * time.Second)
}
}
if i < maxRetries {
time.Sleep(1 * time.Second)
// check for errors following retries
if err != nil {
errUpdater(itemID, err)
return nil, err
}
// display/log the item download
progReader, closer := observe.ItemProgress(
ctx,
itemData,
observe.ItemBackupMsg,
observe.PII(itemName+dataSuffix),
itemSize,
)
go closer()
return progReader, nil
})
oc.data <- &Item{
id: itemName + dataSuffix,
data: itemReader,
info: itemInfo,
}
}
// check for errors following retries
if err != nil {
errUpdater(itemID, err)
return nil, err
if oc.source == OneDriveSource {
metaReader := lazy.NewLazyReadCloser(func() (io.ReadCloser, error) {
progReader, closer := observe.ItemProgress(
ctx, itemMeta, observe.ItemBackupMsg,
observe.PII(itemName+metaSuffix), int64(itemMetaSize))
go closer()
return progReader, nil
})
oc.data <- &Item{
id: itemName + metaSuffix,
data: metaReader,
info: itemInfo,
}
}
// display/log the item download
progReader, closer := observe.ItemProgress(ctx, itemData, observe.ItemBackupMsg, observe.PII(itemName), itemSize)
go closer()
return progReader, nil
})
// This can cause inaccurate counts. Right now it counts all the items
// we intend to read. Errors within the lazy readCloser will create a
// conflict: an item is both successful and erroneous. But the async
// control to fix that is more error-prone than helpful.
//
// TODO: transform this into a stats bus so that async control of stats
// aggregation is handled at the backup level, not at the item iteration
// level.
//
// Item read successfully, add to collection
atomic.AddInt64(&itemsRead, 1)
if isFile {
atomic.AddInt64(&itemsRead, 1)
} else {
atomic.AddInt64(&dirsRead, 1)
}
// byteCount iteration
atomic.AddInt64(&byteCount, itemSize)
oc.data <- &Item{
id: itemName,
data: itemReader,
info: itemInfo,
}
folderProgress <- struct{}{}
}(item)
}
wg.Wait()
oc.reportAsCompleted(ctx, int(itemsRead), byteCount, errs)
oc.reportAsCompleted(ctx, int(itemsFound), int(itemsRead), byteCount, errs)
}
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsRead int, byteCount int64, errs error) {
func (oc *Collection) reportAsCompleted(ctx context.Context, itemsFound, itemsRead int, byteCount int64, errs error) {
close(oc.data)
status := support.CreateStatus(ctx, support.Backup,
1, // num folders (always 1)
support.CollectionMetrics{
Objects: len(oc.driveItems), // items to read,
Successes: itemsRead, // items read successfully,
TotalBytes: byteCount, // Number of bytes read in the operation,
Objects: itemsFound, // items to read,
Successes: itemsRead, // items read successfully,
TotalBytes: byteCount, // Number of bytes read in the operation,
},
errs,
oc.folderPath.Folder(), // Additional details

View File

@ -2,8 +2,11 @@ package onedrive
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"strings"
"sync"
"testing"
"time"
@ -60,6 +63,14 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
testItemName = "itemName"
testItemData = []byte("testdata")
now = time.Now()
testItemMeta = Metadata{Permissions: []UserPermission{
{
ID: "testMetaID",
Roles: []string{"read", "write"},
Email: "email@provider.com",
Expiration: &now,
},
}}
)
type nst struct {
@ -157,13 +168,14 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
suite,
suite.testStatusUpdater(&wg, &collStatus),
test.source,
control.Options{})
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}})
require.NotNil(t, coll)
assert.Equal(t, folderPath, coll.FullPath())
// Set a item reader, add an item and validate we get the item back
mockItem := models.NewDriveItem()
mockItem.SetId(&testItemID)
mockItem.SetFile(models.NewFile())
mockItem.SetName(&test.itemDeets.name)
mockItem.SetSize(&test.itemDeets.size)
mockItem.SetCreatedDateTime(&test.itemDeets.time)
@ -174,6 +186,18 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
}
coll.itemReader = test.itemReader
coll.itemMetaReader = func(_ context.Context,
_ graph.Servicer,
_ string,
_ models.DriveItemable,
) (io.ReadCloser, int, error) {
metaJSON, err := json.Marshal(testItemMeta)
if err != nil {
return nil, 0, err
}
return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil
}
// Read items from the collection
wg.Add(1)
@ -184,28 +208,54 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
wg.Wait()
if test.source == OneDriveSource {
require.Len(t, readItems, 2) // .data and .meta
} else {
require.Len(t, readItems, 1)
}
// Expect only 1 item
require.Equal(t, 1, collStatus.ObjectCount)
require.Equal(t, 1, collStatus.Successful)
// Validate item info and data
readItem := readItems[0]
readItemInfo := readItem.(data.StreamInfo)
readData, err := io.ReadAll(readItem.ToReader())
require.NoError(t, err)
assert.Equal(t, testItemData, readData)
// Expect only 1 item
require.Len(t, readItems, 1)
require.Equal(t, 1, collStatus.ObjectCount, "items iterated")
require.Equal(t, 1, collStatus.Successful, "items successful")
assert.Equal(t, testItemName, readItem.UUID())
if test.source == OneDriveSource {
assert.Equal(t, testItemName+DataFileSuffix, readItem.UUID())
} else {
assert.Equal(t, testItemName, readItem.UUID())
}
require.Implements(t, (*data.StreamModTime)(nil), readItem)
mt := readItem.(data.StreamModTime)
assert.Equal(t, now, mt.ModTime())
readData, err := io.ReadAll(readItem.ToReader())
require.NoError(t, err)
name, parentPath := test.infoFrom(t, readItemInfo.Info())
assert.Equal(t, testItemData, readData)
assert.Equal(t, testItemName, name)
assert.Equal(t, driveFolderPath, parentPath)
if test.source == OneDriveSource {
readItemMeta := readItems[1]
assert.Equal(t, testItemName+MetaFileSuffix, readItemMeta.UUID())
readMetaData, err := io.ReadAll(readItemMeta.ToReader())
require.NoError(t, err)
tm, err := json.Marshal(testItemMeta)
if err != nil {
t.Fatal("unable to marshall test permissions", err)
}
assert.Equal(t, tm, readMetaData)
}
})
}
}
@ -251,10 +301,11 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
suite,
suite.testStatusUpdater(&wg, &collStatus),
test.source,
control.Options{})
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}})
mockItem := models.NewDriveItem()
mockItem.SetId(&testItemID)
mockItem.SetFile(models.NewFile())
mockItem.SetName(&name)
mockItem.SetSize(&size)
mockItem.SetCreatedDateTime(&now)
@ -265,6 +316,14 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
return details.ItemInfo{}, nil, assert.AnError
}
coll.itemMetaReader = func(_ context.Context,
_ graph.Servicer,
_ string,
_ models.DriveItemable,
) (io.ReadCloser, int, error) {
return io.NopCloser(strings.NewReader(`{}`)), 2, nil
}
collItem, ok := <-coll.Items()
assert.True(t, ok)
@ -279,3 +338,87 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
})
}
}
func (suite *CollectionUnitTestSuite) TestCollectionDisablePermissionsBackup() {
table := []struct {
name string
source driveSource
}{
{
name: "oneDrive",
source: OneDriveSource,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
var (
testItemID = "fakeItemID"
testItemName = "Fake Item"
testItemSize = int64(10)
collStatus = support.ConnectorOperationStatus{}
wg = sync.WaitGroup{}
)
wg.Add(1)
folderPath, err := GetCanonicalPath("drive/driveID1/root:/folderPath", "a-tenant", "a-user", test.source)
require.NoError(t, err)
coll := NewCollection(
graph.HTTPClient(graph.NoTimeout()),
folderPath,
"fakeDriveID",
suite,
suite.testStatusUpdater(&wg, &collStatus),
test.source,
control.Options{ToggleFeatures: control.Toggles{}})
now := time.Now()
mockItem := models.NewDriveItem()
mockItem.SetFile(models.NewFile())
mockItem.SetId(&testItemID)
mockItem.SetName(&testItemName)
mockItem.SetSize(&testItemSize)
mockItem.SetCreatedDateTime(&now)
mockItem.SetLastModifiedDateTime(&now)
coll.Add(mockItem)
coll.itemReader = func(
*http.Client,
models.DriveItemable,
) (details.ItemInfo, io.ReadCloser, error) {
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: "fakeName", Modified: time.Now()}},
io.NopCloser(strings.NewReader("Fake Data!")),
nil
}
coll.itemMetaReader = func(_ context.Context,
_ graph.Servicer,
_ string,
_ models.DriveItemable,
) (io.ReadCloser, int, error) {
return io.NopCloser(strings.NewReader(`{"key": "value"}`)), 16, nil
}
readItems := []data.Stream{}
for item := range coll.Items() {
readItems = append(readItems, item)
}
wg.Wait()
// Expect no items
require.Equal(t, 1, collStatus.ObjectCount)
require.Equal(t, 1, collStatus.Successful)
for _, i := range readItems {
if strings.HasSuffix(i.UUID(), MetaFileSuffix) {
content, err := io.ReadAll(i.ToReader())
require.NoError(t, err)
require.Equal(t, content, []byte("{}"))
}
}
})
}
}

View File

@ -2,7 +2,9 @@ package onedrive
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
@ -63,6 +65,19 @@ type Collections struct {
// for a OneDrive folder
CollectionMap map[string]data.Collection
// Not the most ideal, but allows us to change the pager function for testing
// as needed. This will allow us to mock out some scenarios during testing.
drivePagerFunc func(
source driveSource,
servicer graph.Servicer,
resourceOwner string,
fields []string,
) (drivePager, error)
itemPagerFunc func(
servicer graph.Servicer,
driveID, link string,
) itemPager
// Track stats from drive enumeration. Represents the items backed up.
NumItems int
NumFiles int
@ -80,23 +95,169 @@ func NewCollections(
ctrlOpts control.Options,
) *Collections {
return &Collections{
itemClient: itemClient,
tenant: tenant,
resourceOwner: resourceOwner,
source: source,
matcher: matcher,
CollectionMap: map[string]data.Collection{},
service: service,
statusUpdater: statusUpdater,
ctrl: ctrlOpts,
itemClient: itemClient,
tenant: tenant,
resourceOwner: resourceOwner,
source: source,
matcher: matcher,
CollectionMap: map[string]data.Collection{},
drivePagerFunc: PagerForSource,
itemPagerFunc: defaultItemPager,
service: service,
statusUpdater: statusUpdater,
ctrl: ctrlOpts,
}
}
func deserializeMetadata(
ctx context.Context,
cols []data.Collection,
) (map[string]string, map[string]map[string]string, error) {
logger.Ctx(ctx).Infow(
"deserialzing previous backup metadata",
"num_collections",
len(cols),
)
prevDeltas := map[string]string{}
prevFolders := map[string]map[string]string{}
for _, col := range cols {
items := col.Items()
for breakLoop := false; !breakLoop; {
select {
case <-ctx.Done():
return nil, nil, errors.Wrap(ctx.Err(), "deserialzing previous backup metadata")
case item, ok := <-items:
if !ok {
// End of collection items.
breakLoop = true
break
}
var err error
switch item.UUID() {
case graph.PreviousPathFileName:
err = deserializeMap(item.ToReader(), prevFolders)
case graph.DeltaURLsFileName:
err = deserializeMap(item.ToReader(), prevDeltas)
default:
logger.Ctx(ctx).Infow(
"skipping unknown metadata file",
"file_name",
item.UUID(),
)
continue
}
if err == nil {
// Successful decode.
continue
}
// This is conservative, but report an error if any of the items for
// any of the deserialized maps have duplicate drive IDs. This will
// cause the entire backup to fail, but it's not clear if higher
// layers would have caught this. Worst case if we don't handle this
// we end up in a situation where we're sourcing items from the wrong
// base in kopia wrapper.
if errors.Is(err, errExistingMapping) {
return nil, nil, errors.Wrapf(
err,
"deserializing metadata file %s",
item.UUID(),
)
}
logger.Ctx(ctx).Errorw(
"deserializing base backup metadata. Falling back to full backup for selected drives",
"error",
err,
"file_name",
item.UUID(),
)
}
}
// Go through and remove partial results (i.e. path mapping but no delta URL
// or vice-versa).
for k, v := range prevDeltas {
// Remove entries with an empty delta token as it's not useful.
if len(v) == 0 {
delete(prevDeltas, k)
delete(prevFolders, k)
}
// Remove entries without a folders map as we can't tell kopia the
// hierarchy changes.
if _, ok := prevFolders[k]; !ok {
delete(prevDeltas, k)
}
}
for k := range prevFolders {
if _, ok := prevDeltas[k]; !ok {
delete(prevFolders, k)
}
}
}
return prevDeltas, prevFolders, nil
}
var errExistingMapping = errors.New("mapping already exists for same drive ID")
// deserializeMap takes an reader and a map of already deserialized items and
// adds the newly deserialized items to alreadyFound. Items are only added to
// alreadyFound if none of the keys in the freshly deserialized map already
// exist in alreadyFound. reader is closed at the end of this function.
func deserializeMap[T any](reader io.ReadCloser, alreadyFound map[string]T) error {
defer reader.Close()
tmp := map[string]T{}
err := json.NewDecoder(reader).Decode(&tmp)
if err != nil {
return errors.Wrap(err, "deserializing file contents")
}
var duplicate bool
for k := range tmp {
if _, ok := alreadyFound[k]; ok {
duplicate = true
break
}
}
if duplicate {
return errors.WithStack(errExistingMapping)
}
maps.Copy(alreadyFound, tmp)
return nil
}
// Retrieves drive data as set of `data.Collections` and a set of item names to
// be excluded from the upcoming backup.
func (c *Collections) Get(ctx context.Context) ([]data.Collection, map[string]struct{}, error) {
func (c *Collections) Get(
ctx context.Context,
prevMetadata []data.Collection,
) ([]data.Collection, map[string]struct{}, error) {
_, _, err := deserializeMetadata(ctx, prevMetadata)
if err != nil {
return nil, nil, err
}
// Enumerate drives for the specified resourceOwner
pager, err := PagerForSource(c.source, c.service, c.resourceOwner, nil)
pager, err := c.drivePagerFunc(c.source, c.service, c.resourceOwner, nil)
if err != nil {
return nil, nil, err
}
@ -128,7 +289,11 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, map[string]st
delta, paths, excluded, err := collectItems(
ctx,
c.service,
c.itemPagerFunc(
c.service,
driveID,
"",
),
driveID,
driveName,
c.UpdateCollections,
@ -137,17 +302,21 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, map[string]st
return nil, nil, err
}
// It's alright to have an empty folders map (i.e. no folders found) but not
// an empty delta token. This is because when deserializing the metadata we
// remove entries for which there is no corresponding delta token/folder. If
// we leave empty delta tokens then we may end up setting the State field
// for collections when not actually getting delta results.
if len(delta) > 0 {
deltaURLs[driveID] = delta
}
if len(paths) > 0 {
folderPaths[driveID] = map[string]string{}
for id, p := range paths {
folderPaths[driveID][id] = p
}
}
// Avoid the edge case where there's no paths but we do have a valid delta
// token. We can accomplish this by adding an empty paths map for this
// drive. If we don't have this then the next backup won't use the delta
// token because it thinks the folder paths weren't persisted.
folderPaths[driveID] = map[string]string{}
maps.Copy(folderPaths[driveID], paths)
maps.Copy(excludedItems, excluded)
}
@ -261,6 +430,12 @@ func (c *Collections) UpdateCollections(
// already created and partially populated.
updatePath(newPaths, *item.GetId(), folderPath.String())
if c.source != OneDriveSource {
continue
}
fallthrough
case item.GetFile() != nil:
if item.GetDeleted() != nil {
excluded[*item.GetId()] = struct{}{}
@ -276,6 +451,7 @@ func (c *Collections) UpdateCollections(
// the exclude list.
col, found := c.CollectionMap[collectionPath.String()]
if !found {
// TODO(ashmrtn): Compare old and new path and set collection state
// accordingly.
@ -290,13 +466,17 @@ func (c *Collections) UpdateCollections(
c.CollectionMap[collectionPath.String()] = col
c.NumContainers++
c.NumItems++
}
collection := col.(*Collection)
collection.Add(item)
c.NumFiles++
c.NumItems++
if item.GetFile() != nil {
// This is necessary as we have a fallthrough for
// folders and packages
c.NumFiles++
}
default:
return errors.Errorf("item type not supported. item name : %s", *item.GetName())

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,6 @@ import (
"time"
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
"github.com/pkg/errors"
@ -81,7 +80,7 @@ func drives(
page, err = pager.GetPage(ctx)
if err != nil {
// Various error handling. May return an error or perform a retry.
detailedError := support.ConnectorStackErrorTrace(err)
detailedError := err.Error()
if strings.Contains(detailedError, userMysiteURLNotFound) ||
strings.Contains(detailedError, userMysiteNotFound) {
logger.Ctx(ctx).Infof("resource owner does not have a drive")
@ -135,11 +134,42 @@ type itemCollector func(
excluded map[string]struct{},
) error
type itemPager interface {
GetPage(context.Context) (gapi.DeltaPageLinker, error)
SetNext(nextLink string)
ValuesIn(gapi.DeltaPageLinker) ([]models.DriveItemable, error)
}
func defaultItemPager(
servicer graph.Servicer,
driveID, link string,
) itemPager {
return api.NewItemPager(
servicer,
driveID,
link,
[]string{
"content.downloadUrl",
"createdBy",
"createdDateTime",
"file",
"folder",
"id",
"lastModifiedDateTime",
"name",
"package",
"parentReference",
"root",
"size",
},
)
}
// collectItems will enumerate all items in the specified drive and hand them to the
// provided `collector` method
func collectItems(
ctx context.Context,
service graph.Servicer,
pager itemPager,
driveID, driveName string,
collector itemCollector,
) (string, map[string]string, map[string]struct{}, error) {
@ -154,34 +184,8 @@ func collectItems(
maps.Copy(newPaths, oldPaths)
// TODO: Specify a timestamp in the delta query
// https://docs.microsoft.com/en-us/graph/api/driveitem-delta?
// view=graph-rest-1.0&tabs=http#example-4-retrieving-delta-results-using-a-timestamp
builder := service.Client().DrivesById(driveID).Root().Delta()
pageCount := int32(999) // max we can do is 999
requestFields := []string{
"content.downloadUrl",
"createdBy",
"createdDateTime",
"file",
"folder",
"id",
"lastModifiedDateTime",
"name",
"package",
"parentReference",
"root",
"size",
}
requestConfig := &msdrives.ItemRootDeltaRequestBuilderGetRequestConfiguration{
QueryParameters: &msdrives.ItemRootDeltaRequestBuilderGetQueryParameters{
Top: &pageCount,
Select: requestFields,
},
}
for {
r, err := builder.Get(ctx, requestConfig)
page, err := pager.GetPage(ctx)
if err != nil {
return "", nil, nil, errors.Wrapf(
err,
@ -190,23 +194,29 @@ func collectItems(
)
}
err = collector(ctx, driveID, driveName, r.GetValue(), oldPaths, newPaths, excluded)
vals, err := pager.ValuesIn(page)
if err != nil {
return "", nil, nil, errors.Wrap(err, "extracting items from response")
}
err = collector(ctx, driveID, driveName, vals, oldPaths, newPaths, excluded)
if err != nil {
return "", nil, nil, err
}
if r.GetOdataDeltaLink() != nil && len(*r.GetOdataDeltaLink()) > 0 {
newDeltaURL = *r.GetOdataDeltaLink()
nextLink, deltaLink := gapi.NextAndDeltaLink(page)
if len(deltaLink) > 0 {
newDeltaURL = deltaLink
}
// Check if there are more items
nextLink := r.GetOdataNextLink()
if nextLink == nil {
if len(nextLink) == 0 {
break
}
logger.Ctx(ctx).Debugf("Found %s nextLink", *nextLink)
builder = msdrives.NewItemRootDeltaRequestBuilder(*nextLink, service.Adapter())
logger.Ctx(ctx).Debugw("Found nextLink", "link", nextLink)
pager.SetNext(nextLink)
}
return newDeltaURL, newPaths, excluded, nil
@ -226,7 +236,16 @@ func getFolder(
rawURL := fmt.Sprintf(itemByPathRawURLFmt, driveID, parentFolderID, folderName)
builder := msdrive.NewItemsDriveItemItemRequestBuilder(rawURL, service.Adapter())
foundItem, err := builder.Get(ctx, nil)
var (
foundItem models.DriveItemable
err error
)
err = graph.RunWithRetry(func() error {
foundItem, err = builder.Get(ctx, nil)
return err
})
if err != nil {
var oDataError *odataerrors.ODataError
if errors.As(err, &oDataError) &&
@ -318,7 +337,11 @@ func GetAllFolders(
for _, d := range drives {
_, _, _, err = collectItems(
ctx,
gs,
defaultItemPager(
gs,
*d.GetId(),
"",
),
*d.GetId(),
*d.GetName(),
func(

View File

@ -15,6 +15,7 @@ import (
"github.com/alcionai/corso/src/internal/common"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/graph/api"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/logger"
@ -76,6 +77,15 @@ func TestOneDriveUnitSuite(t *testing.T) {
suite.Run(t, new(OneDriveUnitSuite))
}
func odErr(code string) *odataerrors.ODataError {
odErr := &odataerrors.ODataError{}
merr := odataerrors.MainError{}
merr.SetCode(&code)
odErr.SetError(&merr)
return odErr
}
func (suite *OneDriveUnitSuite) TestDrives() {
numDriveResults := 4
emptyLink := ""
@ -84,26 +94,18 @@ func (suite *OneDriveUnitSuite) TestDrives() {
// These errors won't be the "correct" format when compared to what graph
// returns, but they're close enough to have the same info when the inner
// details are extracted via support package.
tmp := userMysiteURLNotFound
tmpMySiteURLNotFound := odataerrors.NewMainError()
tmpMySiteURLNotFound.SetMessage(&tmp)
mySiteURLNotFound := odataerrors.NewODataError()
mySiteURLNotFound.SetError(tmpMySiteURLNotFound)
tmp2 := userMysiteNotFound
tmpMySiteNotFound := odataerrors.NewMainError()
tmpMySiteNotFound.SetMessage(&tmp2)
mySiteNotFound := odataerrors.NewODataError()
mySiteNotFound.SetError(tmpMySiteNotFound)
tmp3 := contextDeadlineExceeded
tmpDeadlineExceeded := odataerrors.NewMainError()
tmpDeadlineExceeded.SetMessage(&tmp3)
deadlineExceeded := odataerrors.NewODataError()
deadlineExceeded.SetError(tmpDeadlineExceeded)
mySiteURLNotFound := support.ConnectorStackErrorTraceWrap(
odErr(userMysiteURLNotFound),
"maximum retries or unretryable",
)
mySiteNotFound := support.ConnectorStackErrorTraceWrap(
odErr(userMysiteNotFound),
"maximum retries or unretryable",
)
deadlineExceeded := support.ConnectorStackErrorTraceWrap(
odErr(contextDeadlineExceeded),
"maximum retries or unretryable",
)
resultDrives := make([]models.Driveable, 0, numDriveResults)
@ -462,8 +464,8 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() {
testFolderMatcher{scope},
service,
service.updateStatus,
control.Options{},
).Get(ctx)
control.Options{ToggleFeatures: control.Toggles{EnablePermissionsBackup: true}},
).Get(ctx, nil)
assert.NoError(t, err)
// Don't expect excludes as this isn't an incremental backup.
assert.Empty(t, excludes)

View File

@ -1,7 +1,9 @@
package onedrive
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
@ -37,6 +39,7 @@ func getDriveItem(
// sharePointItemReader will return a io.ReadCloser for the specified item
// It crafts this by querying M365 for a download URL for the item
// and using a http client to initialize a reader
// TODO: Add metadata fetching to SharePoint
func sharePointItemReader(
hc *http.Client,
item models.DriveItemable,
@ -53,6 +56,25 @@ func sharePointItemReader(
return dii, resp.Body, nil
}
func oneDriveItemMetaReader(
ctx context.Context,
service graph.Servicer,
driveID string,
item models.DriveItemable,
) (io.ReadCloser, int, error) {
meta, err := oneDriveItemMetaInfo(ctx, service, driveID, item)
if err != nil {
return nil, 0, err
}
metaJSON, err := json.Marshal(meta)
if err != nil {
return nil, 0, err
}
return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil
}
// oneDriveItemReader will return a io.ReadCloser for the specified item
// It crafts this by querying M365 for a download URL for the item
// and using a http client to initialize a reader
@ -60,16 +82,25 @@ func oneDriveItemReader(
hc *http.Client,
item models.DriveItemable,
) (details.ItemInfo, io.ReadCloser, error) {
resp, err := downloadItem(hc, item)
if err != nil {
return details.ItemInfo{}, nil, errors.Wrap(err, "downloading item")
var (
rc io.ReadCloser
isFile = item.GetFile() != nil
)
if isFile {
resp, err := downloadItem(hc, item)
if err != nil {
return details.ItemInfo{}, nil, errors.Wrap(err, "downloading item")
}
rc = resp.Body
}
dii := details.ItemInfo{
OneDrive: oneDriveItemInfo(item, *item.GetSize()),
}
return dii, resp.Body, nil
return dii, rc, nil
}
func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, error) {
@ -105,6 +136,10 @@ func downloadItem(hc *http.Client, item models.DriveItemable) (*http.Response, e
return resp, graph.Err401Unauthorized
}
if resp.StatusCode == http.StatusInternalServerError {
return resp, graph.Err500InternalServerError
}
if resp.StatusCode == http.StatusServiceUnavailable {
return resp, graph.Err503ServiceUnavailable
}
@ -145,6 +180,59 @@ func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDrive
}
}
// oneDriveItemMetaInfo will fetch the meta information for a drive
// item. As of now, it only adds the permissions applicable for a
// onedrive item.
func oneDriveItemMetaInfo(
ctx context.Context, service graph.Servicer,
driveID string, di models.DriveItemable,
) (Metadata, error) {
itemID := di.GetId()
perm, err := service.Client().DrivesById(driveID).ItemsById(*itemID).Permissions().Get(ctx, nil)
if err != nil {
return Metadata{}, err
}
uperms := filterUserPermissions(perm.GetValue())
return Metadata{Permissions: uperms}, nil
}
func filterUserPermissions(perms []models.Permissionable) []UserPermission {
up := []UserPermission{}
for _, p := range perms {
if p.GetGrantedToV2() == nil {
// For link shares, we get permissions without a user
// specified
continue
}
roles := []string{}
for _, r := range p.GetRoles() {
// Skip if the only role available in owner
if r != "owner" {
roles = append(roles, r)
}
}
if len(roles) == 0 {
continue
}
up = append(up, UserPermission{
ID: *p.GetId(),
Roles: roles,
Email: *p.GetGrantedToV2().GetUser().GetAdditionalData()["email"].(*string),
Expiration: p.GetExpirationDateTime(),
})
}
return up
}
// sharePointItemInfo will populate a details.SharePointInfo struct
// with properties from the drive item. ItemSize is specified
// separately for restore processes because the local itemable

View File

@ -8,6 +8,7 @@ import (
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
@ -115,7 +116,17 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
return nil
}
_, _, _, err := collectItems(ctx, suite, suite.userDriveID, "General", itemCollector)
_, _, _, err := collectItems(
ctx,
defaultItemPager(
suite,
suite.userDriveID,
"",
),
suite.userDriveID,
"General",
itemCollector,
)
require.NoError(suite.T(), err)
// Test Requirement 2: Need a file
@ -128,8 +139,8 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
)
// Read data for the file
itemInfo, itemData, err := oneDriveItemReader(graph.HTTPClient(graph.NoTimeout()), driveItem)
require.NoError(suite.T(), err)
require.NotNil(suite.T(), itemInfo.OneDrive)
require.NotEmpty(suite.T(), itemInfo.OneDrive.ItemName)
@ -247,3 +258,72 @@ func (suite *ItemIntegrationSuite) TestDriveGetFolder() {
})
}
}
func getPermsUperms(permID, userID string, scopes []string) (models.Permissionable, UserPermission) {
identity := models.NewIdentity()
identity.SetAdditionalData(map[string]any{"email": &userID})
sharepointIdentity := models.NewSharePointIdentitySet()
sharepointIdentity.SetUser(identity)
perm := models.NewPermission()
perm.SetId(&permID)
perm.SetRoles([]string{"read"})
perm.SetGrantedToV2(sharepointIdentity)
uperm := UserPermission{
ID: permID,
Roles: []string{"read"},
Email: userID,
}
return perm, uperm
}
func TestOneDrivePermissionsFilter(t *testing.T) {
permID := "fakePermId"
userID := "fakeuser@provider.com"
userID2 := "fakeuser2@provider.com"
readPerm, readUperm := getPermsUperms(permID, userID, []string{"read"})
readWritePerm, readWriteUperm := getPermsUperms(permID, userID2, []string{"read", "write"})
noPerm, _ := getPermsUperms(permID, userID, []string{"read"})
noPerm.SetGrantedToV2(nil) // eg: link shares
cases := []struct {
name string
graphPermissions []models.Permissionable
parsedPermissions []UserPermission
}{
{
name: "no perms",
graphPermissions: []models.Permissionable{},
parsedPermissions: []UserPermission{},
},
{
name: "no user bound to perms",
graphPermissions: []models.Permissionable{noPerm},
parsedPermissions: []UserPermission{},
},
{
name: "user with read permissions",
graphPermissions: []models.Permissionable{readPerm},
parsedPermissions: []UserPermission{readUperm},
},
{
name: "user with read and write permissions",
graphPermissions: []models.Permissionable{readWritePerm},
parsedPermissions: []UserPermission{readWriteUperm},
},
{
name: "multiple users with separate permissions",
graphPermissions: []models.Permissionable{readPerm, readWritePerm},
parsedPermissions: []UserPermission{readUperm, readWriteUperm},
},
}
for _, tc := range cases {
actual := filterUserPermissions(tc.graphPermissions)
assert.ElementsMatch(t, tc.parsedPermissions, actual)
}
}

View File

@ -2,9 +2,15 @@ package onedrive
import (
"context"
"encoding/json"
"fmt"
"io"
"runtime/trace"
"sort"
"strings"
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/connector/graph"
@ -23,30 +29,101 @@ const (
// Microsoft recommends 5-10MB buffers
// https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession?view=graph-rest-1.0#best-practices
copyBufferSize = 5 * 1024 * 1024
// versionWithDataAndMetaFiles is the corso backup format version
// in which we split from storing just the data to storing both
// the data and metadata in two files.
versionWithDataAndMetaFiles = 1
)
func getParentPermissions(
parentPath path.Path,
parentPermissions map[string][]UserPermission,
) ([]UserPermission, error) {
parentPerms, ok := parentPermissions[parentPath.String()]
if !ok {
onedrivePath, err := path.ToOneDrivePath(parentPath)
if err != nil {
return nil, errors.Wrap(err, "invalid restore path")
}
if len(onedrivePath.Folders) != 0 {
return nil, errors.Wrap(err, "unable to compute item permissions")
}
parentPerms = []UserPermission{}
}
return parentPerms, nil
}
// RestoreCollections will restore the specified data collections into OneDrive
func RestoreCollections(
ctx context.Context,
backupVersion int,
service graph.Servicer,
dest control.RestoreDestination,
opts control.Options,
dcs []data.Collection,
deets *details.Builder,
) (*support.ConnectorOperationStatus, error) {
var (
restoreMetrics support.CollectionMetrics
restoreErrors error
metrics support.CollectionMetrics
folderPerms map[string][]UserPermission
canceled bool
// permissionIDMappings is used to map between old and new id
// of permissions as we restore them
permissionIDMappings = map[string]string{}
)
errUpdater := func(id string, err error) {
restoreErrors = support.WrapAndAppend(id, err, restoreErrors)
}
// Reorder collections so that the parents directories are created
// before the child directories
sort.Slice(dcs, func(i, j int) bool {
return dcs[i].FullPath().String() < dcs[j].FullPath().String()
})
parentPermissions := map[string][]UserPermission{}
// Iterate through the data collections and restore the contents of each
for _, dc := range dcs {
temp, canceled := RestoreCollection(ctx, service, dc, OneDriveSource, dest.ContainerName, deets, errUpdater)
var (
parentPerms []UserPermission
err error
)
restoreMetrics.Combine(temp)
if opts.RestorePermissions {
parentPerms, err = getParentPermissions(dc.FullPath(), parentPermissions)
if err != nil {
errUpdater(dc.FullPath().String(), err)
}
}
metrics, folderPerms, permissionIDMappings, canceled = RestoreCollection(
ctx,
backupVersion,
service,
dc,
parentPerms,
OneDriveSource,
dest.ContainerName,
deets,
errUpdater,
permissionIDMappings,
opts.RestorePermissions,
)
for k, v := range folderPerms {
parentPermissions[k] = v
}
restoreMetrics.Combine(metrics)
if canceled {
break
@ -66,29 +143,37 @@ func RestoreCollections(
// RestoreCollection handles restoration of an individual collection.
// returns:
// - the collection's item and byte count metrics
// - the context cancellation state (true if the context is cancelled)
// - the context cancellation state (true if the context is canceled)
func RestoreCollection(
ctx context.Context,
backupVersion int,
service graph.Servicer,
dc data.Collection,
parentPerms []UserPermission,
source driveSource,
restoreContainerName string,
deets *details.Builder,
errUpdater func(string, error),
) (support.CollectionMetrics, bool) {
permissionIDMappings map[string]string,
restorePerms bool,
) (support.CollectionMetrics, map[string][]UserPermission, map[string]string, bool) {
ctx, end := D.Span(ctx, "gc:oneDrive:restoreCollection", D.Label("path", dc.FullPath()))
defer end()
var (
metrics = support.CollectionMetrics{}
copyBuffer = make([]byte, copyBufferSize)
directory = dc.FullPath()
metrics = support.CollectionMetrics{}
copyBuffer = make([]byte, copyBufferSize)
directory = dc.FullPath()
restoredIDs = map[string]string{}
itemInfo details.ItemInfo
itemID string
folderPerms = map[string][]UserPermission{}
)
drivePath, err := path.ToOneDrivePath(directory)
if err != nil {
errUpdater(directory.String(), err)
return metrics, false
return metrics, folderPerms, permissionIDMappings, false
}
// Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy
@ -108,7 +193,7 @@ func RestoreCollection(
restoreFolderID, err := CreateRestoreFolders(ctx, service, drivePath.DriveID, restoreFolderElements)
if err != nil {
errUpdater(directory.String(), errors.Wrapf(err, "failed to create folders %v", restoreFolderElements))
return metrics, false
return metrics, folderPerms, permissionIDMappings, false
}
// Restore items from the collection
@ -118,50 +203,175 @@ func RestoreCollection(
select {
case <-ctx.Done():
errUpdater("context canceled", ctx.Err())
return metrics, true
return metrics, folderPerms, permissionIDMappings, true
case itemData, ok := <-items:
if !ok {
return metrics, false
}
metrics.Objects++
metrics.TotalBytes += int64(len(copyBuffer))
itemInfo, err := restoreItem(ctx,
service,
itemData,
drivePath.DriveID,
restoreFolderID,
copyBuffer,
source)
if err != nil {
errUpdater(itemData.UUID(), err)
continue
return metrics, folderPerms, permissionIDMappings, false
}
itemPath, err := dc.FullPath().Append(itemData.UUID(), true)
if err != nil {
logger.Ctx(ctx).DPanicw("transforming item to full path", "error", err)
errUpdater(itemData.UUID(), err)
continue
}
deets.Add(
itemPath.String(),
itemPath.ShortRef(),
"",
true,
itemInfo)
if source == OneDriveSource && backupVersion >= versionWithDataAndMetaFiles {
name := itemData.UUID()
if strings.HasSuffix(name, DataFileSuffix) {
metrics.Objects++
metrics.TotalBytes += int64(len(copyBuffer))
trimmedName := strings.TrimSuffix(name, DataFileSuffix)
metrics.Successes++
itemID, itemInfo, err = restoreData(ctx, service, trimmedName, itemData,
drivePath.DriveID, restoreFolderID, copyBuffer, source)
if err != nil {
errUpdater(itemData.UUID(), err)
continue
}
restoredIDs[trimmedName] = itemID
deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo)
// Mark it as success without processing .meta
// file if we are not restoring permissions
if !restorePerms {
metrics.Successes++
}
} else if strings.HasSuffix(name, MetaFileSuffix) {
if !restorePerms {
continue
}
meta, err := getMetadata(itemData.ToReader())
if err != nil {
errUpdater(itemData.UUID(), err)
continue
}
trimmedName := strings.TrimSuffix(name, MetaFileSuffix)
restoreID, ok := restoredIDs[trimmedName]
if !ok {
errUpdater(itemData.UUID(), fmt.Errorf("item not available to restore permissions"))
continue
}
permissionIDMappings, err = restorePermissions(
ctx,
service,
drivePath.DriveID,
restoreID,
parentPerms,
meta.Permissions,
permissionIDMappings,
)
if err != nil {
errUpdater(itemData.UUID(), err)
continue
}
// Objects count is incremented when we restore a
// data file and success count is incremented when
// we restore a meta file as every data file
// should have an associated meta file
metrics.Successes++
} else if strings.HasSuffix(name, DirMetaFileSuffix) {
trimmedName := strings.TrimSuffix(name, DirMetaFileSuffix)
folderID, err := createRestoreFolder(
ctx,
service,
drivePath.DriveID,
trimmedName,
restoreFolderID,
)
if err != nil {
errUpdater(itemData.UUID(), err)
continue
}
if !restorePerms {
continue
}
meta, err := getMetadata(itemData.ToReader())
if err != nil {
errUpdater(itemData.UUID(), err)
continue
}
permissionIDMappings, err = restorePermissions(
ctx,
service,
drivePath.DriveID,
folderID,
parentPerms,
meta.Permissions,
permissionIDMappings,
)
if err != nil {
errUpdater(itemData.UUID(), err)
continue
}
trimmedPath := strings.TrimSuffix(itemPath.String(), DirMetaFileSuffix)
folderPerms[trimmedPath] = meta.Permissions
} else {
if !ok {
errUpdater(itemData.UUID(), fmt.Errorf("invalid backup format, you might be using an old backup"))
continue
}
}
} else {
metrics.Objects++
metrics.TotalBytes += int64(len(copyBuffer))
// No permissions stored at the moment for SharePoint
_, itemInfo, err = restoreData(ctx,
service,
itemData.UUID(),
itemData,
drivePath.DriveID,
restoreFolderID,
copyBuffer,
source)
if err != nil {
errUpdater(itemData.UUID(), err)
continue
}
deets.Add(itemPath.String(), itemPath.ShortRef(), "", true, itemInfo)
metrics.Successes++
}
}
}
}
// createRestoreFolders creates the restore folder hieararchy in the specified drive and returns the folder ID
// of the last folder entry in the hiearchy
// Creates a folder with its permissions
func createRestoreFolder(
ctx context.Context,
service graph.Servicer,
driveID, folder, parentFolderID string,
) (string, error) {
folderItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(folder, true))
if err != nil {
return "", errors.Wrapf(
err,
"failed to create folder %s/%s. details: %s", parentFolderID, folder,
support.ConnectorStackErrorTrace(err),
)
}
logger.Ctx(ctx).Debugf("Resolved %s in %s to %s", folder, parentFolderID, *folderItem.GetId())
return *folderItem.GetId(), nil
}
// createRestoreFolders creates the restore folder hierarchy in the specified drive and returns the folder ID
// of the last folder entry in the hierarchy
func CreateRestoreFolders(ctx context.Context, service graph.Servicer, driveID string, restoreFolders []string,
) (string, error) {
driveRoot, err := service.Client().DrivesById(driveID).Root().Get(ctx, nil)
@ -209,15 +419,16 @@ func CreateRestoreFolders(ctx context.Context, service graph.Servicer, driveID s
return parentFolderID, nil
}
// restoreItem will create a new item in the specified `parentFolderID` and upload the data.Stream
func restoreItem(
// restoreData will create a new item in the specified `parentFolderID` and upload the data.Stream
func restoreData(
ctx context.Context,
service graph.Servicer,
name string,
itemData data.Stream,
driveID, parentFolderID string,
copyBuffer []byte,
source driveSource,
) (details.ItemInfo, error) {
) (string, details.ItemInfo, error) {
ctx, end := D.Span(ctx, "gc:oneDrive:restoreItem", D.Label("item_uuid", itemData.UUID()))
defer end()
@ -227,19 +438,19 @@ func restoreItem(
// Get the stream size (needed to create the upload session)
ss, ok := itemData.(data.StreamSize)
if !ok {
return details.ItemInfo{}, errors.Errorf("item %q does not implement DataStreamInfo", itemName)
return "", details.ItemInfo{}, errors.Errorf("item %q does not implement DataStreamInfo", itemName)
}
// Create Item
newItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(itemData.UUID(), false))
newItem, err := createItem(ctx, service, driveID, parentFolderID, newItem(name, false))
if err != nil {
return details.ItemInfo{}, errors.Wrapf(err, "failed to create item %s", itemName)
return "", details.ItemInfo{}, errors.Wrapf(err, "failed to create item %s", itemName)
}
// Get a drive item writer
w, err := driveItemWriter(ctx, service, driveID, *newItem.GetId(), ss.Size())
if err != nil {
return details.ItemInfo{}, errors.Wrapf(err, "failed to create item upload session %s", itemName)
return "", details.ItemInfo{}, errors.Wrapf(err, "failed to create item upload session %s", itemName)
}
iReader := itemData.ToReader()
@ -250,7 +461,7 @@ func restoreItem(
// Upload the stream data
written, err := io.CopyBuffer(w, progReader, copyBuffer)
if err != nil {
return details.ItemInfo{}, errors.Wrapf(err, "failed to upload data: item %s", itemName)
return "", details.ItemInfo{}, errors.Wrapf(err, "failed to upload data: item %s", itemName)
}
dii := details.ItemInfo{}
@ -262,5 +473,129 @@ func restoreItem(
dii.OneDrive = oneDriveItemInfo(newItem, written)
}
return dii, nil
return *newItem.GetId(), dii, nil
}
// getMetadata read and parses the metadata info for an item
func getMetadata(metar io.ReadCloser) (Metadata, error) {
var meta Metadata
// `metar` will be nil for the top level container folder
if metar != nil {
metaraw, err := io.ReadAll(metar)
if err != nil {
return Metadata{}, err
}
err = json.Unmarshal(metaraw, &meta)
if err != nil {
return Metadata{}, err
}
}
return meta, nil
}
// getChildPermissions is to filter out permissions present in the
// parent from the ones that are available for child. This is
// necessary as we store the nested permissions in the child. We
// cannot avoid storing the nested permissions as it is possible that
// a file in a folder can remove the nested permission that is present
// on itself.
func getChildPermissions(childPermissions, parentPermissions []UserPermission) ([]UserPermission, []UserPermission) {
addedPermissions := []UserPermission{}
removedPermissions := []UserPermission{}
for _, cp := range childPermissions {
found := false
for _, pp := range parentPermissions {
if cp.ID == pp.ID {
found = true
break
}
}
if !found {
addedPermissions = append(addedPermissions, cp)
}
}
for _, pp := range parentPermissions {
found := false
for _, cp := range childPermissions {
if pp.ID == cp.ID {
found = true
break
}
}
if !found {
removedPermissions = append(removedPermissions, pp)
}
}
return addedPermissions, removedPermissions
}
// restorePermissions takes in the permissions that were added and the
// removed(ones present in parent but not in child) and adds/removes
// the necessary permissions on onedrive objects.
func restorePermissions(
ctx context.Context,
service graph.Servicer,
driveID string,
itemID string,
parentPerms []UserPermission,
childPerms []UserPermission,
permissionIDMappings map[string]string,
) (map[string]string, error) {
permAdded, permRemoved := getChildPermissions(childPerms, parentPerms)
for _, p := range permRemoved {
err := service.Client().DrivesById(driveID).ItemsById(itemID).
PermissionsById(permissionIDMappings[p.ID]).Delete(ctx, nil)
if err != nil {
return permissionIDMappings, errors.Wrapf(
err,
"failed to remove permission for item %s. details: %s",
itemID,
support.ConnectorStackErrorTrace(err),
)
}
}
for _, p := range permAdded {
pbody := msdrive.NewItemsItemInvitePostRequestBody()
pbody.SetRoles(p.Roles)
if p.Expiration != nil {
expiry := p.Expiration.String()
pbody.SetExpirationDateTime(&expiry)
}
si := false
pbody.SetSendInvitation(&si)
rs := true
pbody.SetRequireSignIn(&rs)
rec := models.NewDriveRecipient()
rec.SetEmail(&p.Email)
pbody.SetRecipients([]models.DriveRecipientable{rec})
np, err := service.Client().DrivesById(driveID).ItemsById(itemID).Invite().Post(ctx, pbody, nil)
if err != nil {
return permissionIDMappings, errors.Wrapf(
err,
"failed to set permission for item %s. details: %s",
itemID,
support.ConnectorStackErrorTrace(err),
)
}
permissionIDMappings[p.ID] = *np.GetValue()[0].GetId()
}
return permissionIDMappings, nil
}

View File

@ -0,0 +1,6 @@
package api
type Tuple struct {
Name string
ID string
}

View File

@ -0,0 +1,21 @@
package api
import (
"testing"
"github.com/alcionai/corso/src/internal/connector/discovery/api"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/pkg/account"
"github.com/stretchr/testify/require"
)
func createTestBetaService(t *testing.T, credentials account.M365Config) *api.BetaService {
adapter, err := graph.CreateAdapter(
credentials.AzureTenantID,
credentials.AzureClientID,
credentials.AzureClientSecret,
)
require.NoError(t, err)
return api.NewBetaService(adapter)
}

View File

@ -0,0 +1,93 @@
package api
import (
"context"
"github.com/alcionai/corso/src/internal/connector/discovery/api"
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
"github.com/alcionai/corso/src/internal/connector/graph/betasdk/sites"
"github.com/alcionai/corso/src/internal/connector/support"
)
// GetSitePages retrieves a collection of Pages related to the give Site.
// Returns error if error experienced during the call
func GetSitePage(
ctx context.Context,
serv *api.BetaService,
siteID string,
pages []string,
) ([]models.SitePageable, error) {
col := make([]models.SitePageable, 0)
opts := retrieveSitePageOptions()
for _, entry := range pages {
page, err := serv.Client().SitesById(siteID).PagesById(entry).Get(ctx, opts)
if err != nil {
return nil, support.ConnectorStackErrorTraceWrap(err, "fetching page: "+entry)
}
col = append(col, page)
}
return col, nil
}
// fetchPages utility function to return the tuple of item
func FetchPages(ctx context.Context, bs *api.BetaService, siteID string) ([]Tuple, error) {
var (
builder = bs.Client().SitesById(siteID).Pages()
opts = fetchPageOptions()
pageTuples = make([]Tuple, 0)
)
for {
resp, err := builder.Get(ctx, opts)
if err != nil {
return nil, support.ConnectorStackErrorTraceWrap(err, "failed fetching site page")
}
for _, entry := range resp.GetValue() {
pid := *entry.GetId()
temp := Tuple{pid, pid}
if entry.GetName() != nil {
temp.Name = *entry.GetName()
}
pageTuples = append(pageTuples, temp)
}
if resp.GetOdataNextLink() == nil {
break
}
builder = sites.NewItemPagesRequestBuilder(*resp.GetOdataNextLink(), bs.Client().Adapter())
}
return pageTuples, nil
}
// fetchPageOptions is used to return minimal information reltating to Site Pages
// Pages API: https://learn.microsoft.com/en-us/graph/api/resources/sitepage?view=graph-rest-beta
func fetchPageOptions() *sites.ItemPagesRequestBuilderGetRequestConfiguration {
fields := []string{"id", "name"}
options := &sites.ItemPagesRequestBuilderGetRequestConfiguration{
QueryParameters: &sites.ItemPagesRequestBuilderGetQueryParameters{
Select: fields,
},
}
return options
}
// retrievePageOptions returns options to expand
func retrieveSitePageOptions() *sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration {
fields := []string{"canvasLayout"}
options := &sites.ItemPagesSitePageItemRequestBuilderGetRequestConfiguration{
QueryParameters: &sites.ItemPagesSitePageItemRequestBuilderGetQueryParameters{
Expand: fields,
},
}
return options
}

View File

@ -0,0 +1,71 @@
package api
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/account"
)
type SharePointPageSuite struct {
suite.Suite
siteID string
creds account.M365Config
}
func (suite *SharePointPageSuite) SetupSuite() {
t := suite.T()
tester.MustGetEnvSets(t, tester.M365AcctCredEnvs)
suite.siteID = tester.M365SiteID(t)
a := tester.NewM365Account(t)
m365, err := a.M365Config()
require.NoError(t, err)
suite.creds = m365
}
func TestSharePointPageSuite(t *testing.T) {
tester.RunOnAny(
t,
tester.CorsoCITests,
tester.CorsoGraphConnectorSharePointTests)
suite.Run(t, new(SharePointPageSuite))
}
func (suite *SharePointPageSuite) TestFetchPages() {
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
service := createTestBetaService(t, suite.creds)
pgs, err := FetchPages(ctx, service, suite.siteID)
assert.NoError(t, err)
require.NotNil(t, pgs)
assert.NotZero(t, len(pgs))
for _, entry := range pgs {
t.Logf("id: %s\t name: %s\n", entry.ID, entry.Name)
}
}
func (suite *SharePointPageSuite) TestGetSitePage() {
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
service := createTestBetaService(t, suite.creds)
tuples, err := FetchPages(ctx, service, suite.siteID)
require.NoError(t, err)
require.NotNil(t, tuples)
jobs := []string{tuples[0].ID}
pages, err := GetSitePage(ctx, service, suite.siteID, jobs)
assert.NoError(t, err)
assert.NotEmpty(t, pages)
}

View File

@ -9,6 +9,7 @@ import (
kw "github.com/microsoft/kiota-serialization-json-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/alcionai/corso/src/internal/connector/discovery/api"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
@ -46,6 +47,7 @@ type Collection struct {
jobs []string
// M365 IDs of the items of this collection
service graph.Servicer
betaService *api.BetaService
statusUpdater support.StatusUpdater
}

View File

@ -17,11 +17,27 @@ import (
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/path"
)
type SharePointCollectionSuite struct {
suite.Suite
siteID string
creds account.M365Config
}
func (suite *SharePointCollectionSuite) SetupSuite() {
t := suite.T()
tester.MustGetEnvSets(t, tester.M365AcctCredEnvs)
suite.siteID = tester.M365SiteID(t)
a := tester.NewM365Account(t)
m365, err := a.M365Config()
require.NoError(t, err)
suite.creds = m365
}
func TestSharePointCollectionSuite(t *testing.T) {
@ -95,20 +111,33 @@ func (suite *SharePointCollectionSuite) TestSharePointListCollection() {
assert.Equal(t, testName, shareInfo.Info().SharePoint.ItemName)
}
func (suite *SharePointCollectionSuite) TestCollectPages() {
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
col, err := collectPages(
ctx,
suite.creds,
nil,
account.AzureTenantID,
suite.siteID,
nil,
&MockGraphService{},
control.Defaults(),
)
assert.NoError(t, err)
assert.NotEmpty(t, col)
}
// TestRestoreListCollection verifies Graph Restore API for the List Collection
func (suite *SharePointCollectionSuite) TestRestoreListCollection() {
ctx, flush := tester.NewContext()
defer flush()
t := suite.T()
siteID := tester.M365SiteID(t)
a := tester.NewM365Account(t)
account, err := a.M365Config()
require.NoError(t, err)
service, err := createTestService(account)
require.NoError(t, err)
service := createTestService(t, suite.creds)
listing := mockconnector.GetMockListDefault("Mock List")
testName := "MockListing"
listing.SetDisplayName(&testName)
@ -123,13 +152,13 @@ func (suite *SharePointCollectionSuite) TestRestoreListCollection() {
destName := "Corso_Restore_" + common.FormatNow(common.SimpleTimeTesting)
deets, err := restoreListItem(ctx, service, listData, siteID, destName)
deets, err := restoreListItem(ctx, service, listData, suite.siteID, destName)
assert.NoError(t, err)
t.Logf("List created: %s\n", deets.SharePoint.ItemName)
// Clean-Up
var (
builder = service.Client().SitesById(siteID).Lists()
builder = service.Client().SitesById(suite.siteID).Lists()
isFound bool
deleteID string
)
@ -156,7 +185,7 @@ func (suite *SharePointCollectionSuite) TestRestoreListCollection() {
}
if isFound {
err := DeleteList(ctx, service, siteID, deleteID)
err := DeleteList(ctx, service, suite.siteID, deleteID)
assert.NoError(t, err)
}
}
@ -168,25 +197,18 @@ func (suite *SharePointCollectionSuite) TestRestoreLocation() {
defer flush()
t := suite.T()
a := tester.NewM365Account(t)
account, err := a.M365Config()
require.NoError(t, err)
service, err := createTestService(account)
require.NoError(t, err)
service := createTestService(t, suite.creds)
rootFolder := "General_" + common.FormatNow(common.SimpleTimeTesting)
siteID := tester.M365SiteID(t)
folderID, err := createRestoreFolders(ctx, service, siteID, []string{rootFolder})
folderID, err := createRestoreFolders(ctx, service, suite.siteID, []string{rootFolder})
assert.NoError(t, err)
t.Log("FolderID: " + folderID)
_, err = createRestoreFolders(ctx, service, siteID, []string{rootFolder, "Tsao"})
_, err = createRestoreFolders(ctx, service, suite.siteID, []string{rootFolder, "Tsao"})
assert.NoError(t, err)
// CleanUp
siteDrive, err := service.Client().SitesById(siteID).Drive().Get(ctx, nil)
siteDrive, err := service.Client().SitesById(suite.siteID).Drive().Get(ctx, nil)
require.NoError(t, err)
driveID := *siteDrive.GetId()

View File

@ -6,11 +6,14 @@ import (
"github.com/pkg/errors"
"github.com/alcionai/corso/src/internal/connector/discovery/api"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive"
sapi "github.com/alcionai/corso/src/internal/connector/sharepoint/api"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/control"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
@ -152,7 +155,9 @@ func collectLibraries(
updater.UpdateStatus,
ctrlOpts)
odcs, excludes, err := colls.Get(ctx)
// TODO(ashmrtn): Pass previous backup metadata when SharePoint supports delta
// token-based incrementals.
odcs, excludes, err := colls.Get(ctx, nil)
if err != nil {
return nil, nil, support.WrapAndAppend(siteID, err, errs)
}
@ -160,6 +165,55 @@ func collectLibraries(
return append(collections, odcs...), excludes, errs
}
// collectPages constructs a sharepoint Collections struct and Get()s the associated
// M365 IDs for the associated Pages
func collectPages(
ctx context.Context,
creds account.M365Config,
serv graph.Servicer,
tenantID, siteID string,
scope selectors.SharePointScope,
updater statusUpdater,
ctrlOpts control.Options,
) ([]data.Collection, error) {
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Pages collections")
spcs := make([]data.Collection, 0)
// make the betaClient
adpt, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
if err != nil {
return nil, errors.Wrap(err, "adapter for betaservice not created")
}
betaService := api.NewBetaService(adpt)
tuples, err := sapi.FetchPages(ctx, betaService, siteID)
if err != nil {
return nil, err
}
for _, tuple := range tuples {
dir, err := path.Builder{}.Append(tuple.Name).
ToDataLayerSharePointPath(
tenantID,
siteID,
path.PagesCategory,
false)
if err != nil {
return nil, errors.Wrapf(err, "failed to create collection path for site: %s", siteID)
}
collection := NewCollection(dir, serv, updater.UpdateStatus)
collection.betaService = betaService
collection.AddJob(tuple.ID)
spcs = append(spcs, collection)
}
return spcs, nil
}
type folderMatcher struct {
scope selectors.SharePointScope
}

View File

@ -77,7 +77,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() {
site,
testBaseDrivePath,
),
expectedItemCount: 2,
expectedItemCount: 1,
expectedFileCount: 1,
expectedContainerCount: 1,
},

View File

@ -4,11 +4,11 @@ import (
"testing"
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive"
"github.com/alcionai/corso/src/internal/connector/support"
"github.com/alcionai/corso/src/pkg/account"
)
@ -29,21 +29,22 @@ func (ms *MockGraphService) Adapter() *msgraphsdk.GraphRequestAdapter {
return nil
}
func (ms *MockGraphService) UpdateStatus(*support.ConnectorOperationStatus) {
}
// ---------------------------------------------------------------------------
// Helper Functions
// ---------------------------------------------------------------------------
func createTestService(credentials account.M365Config) (*graph.Service, error) {
func createTestService(t *testing.T, credentials account.M365Config) *graph.Service {
adapter, err := graph.CreateAdapter(
credentials.AzureTenantID,
credentials.AzureClientID,
credentials.AzureClientSecret,
)
if err != nil {
return nil, errors.Wrap(err, "creating microsoft graph service for exchange")
}
require.NoError(t, err, "creating microsoft graph service for exchange")
return graph.NewService(adapter), nil
return graph.NewService(adapter)
}
func expectedPathAsSlice(t *testing.T, tenant, user string, rest ...string) []string {

View File

@ -49,9 +49,7 @@ func (suite *SharePointSuite) TestLoadList() {
defer flush()
t := suite.T()
service, err := createTestService(suite.creds)
require.NoError(t, err)
service := createTestService(t, suite.creds)
tuples, err := preFetchLists(ctx, service, "root")
require.NoError(t, err)

View File

@ -36,6 +36,7 @@ import (
// RestoreCollections will restore the specified data collections into OneDrive
func RestoreCollections(
ctx context.Context,
backupVersion int,
service graph.Servicer,
dest control.RestoreDestination,
dcs []data.Collection,
@ -59,14 +60,19 @@ func RestoreCollections(
switch dc.FullPath().Category() {
case path.LibrariesCategory:
metrics, canceled = onedrive.RestoreCollection(
metrics, _, _, canceled = onedrive.RestoreCollection(
ctx,
backupVersion,
service,
dc,
[]onedrive.UserPermission{}, // Currently permission data is not stored for sharepoint
onedrive.OneDriveSource,
dest.ContainerName,
deets,
errUpdater)
errUpdater,
map[string]string{},
false,
)
case path.ListsCategory:
metrics, canceled = RestoreCollection(
ctx,

View File

@ -1,6 +1,9 @@
package support
import (
"strings"
bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
absser "github.com/microsoft/kiota-abstractions-go/serialization"
js "github.com/microsoft/kiota-serialization-json-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
@ -12,7 +15,7 @@ import (
func CreateFromBytes(bytes []byte, createFunc absser.ParsableFactory) (absser.Parsable, error) {
parseNode, err := js.NewJsonParseNodeFactory().GetRootParseNode("application/json", bytes)
if err != nil {
return nil, errors.Wrap(err, "parsing byte array into m365 object")
return nil, errors.Wrap(err, "deserializing bytes into base m365 object")
}
anObject, err := parseNode.GetObjectValue(createFunc)
@ -27,7 +30,7 @@ func CreateFromBytes(bytes []byte, createFunc absser.ParsableFactory) (absser.Pa
func CreateMessageFromBytes(bytes []byte) (models.Messageable, error) {
aMessage, err := CreateFromBytes(bytes, models.CreateMessageFromDiscriminatorValue)
if err != nil {
return nil, errors.Wrap(err, "creating m365 exchange.Mail object from provided bytes")
return nil, errors.Wrap(err, "deserializing bytes to exchange message")
}
message := aMessage.(models.Messageable)
@ -40,7 +43,7 @@ func CreateMessageFromBytes(bytes []byte) (models.Messageable, error) {
func CreateContactFromBytes(bytes []byte) (models.Contactable, error) {
parsable, err := CreateFromBytes(bytes, models.CreateContactFromDiscriminatorValue)
if err != nil {
return nil, errors.Wrap(err, "creating m365 exchange.Contact object from provided bytes")
return nil, errors.Wrap(err, "deserializing bytes to exchange contact")
}
contact := parsable.(models.Contactable)
@ -52,7 +55,7 @@ func CreateContactFromBytes(bytes []byte) (models.Contactable, error) {
func CreateEventFromBytes(bytes []byte) (models.Eventable, error) {
parsable, err := CreateFromBytes(bytes, models.CreateEventFromDiscriminatorValue)
if err != nil {
return nil, errors.Wrap(err, "creating m365 exchange.Event object from provided bytes")
return nil, errors.Wrap(err, "deserializing bytes to exchange event")
}
event := parsable.(models.Eventable)
@ -64,10 +67,33 @@ func CreateEventFromBytes(bytes []byte) (models.Eventable, error) {
func CreateListFromBytes(bytes []byte) (models.Listable, error) {
parsable, err := CreateFromBytes(bytes, models.CreateListFromDiscriminatorValue)
if err != nil {
return nil, errors.Wrap(err, "creating m365 sharepoint.List object from provided bytes")
return nil, errors.Wrap(err, "deserializing bytes to sharepoint list")
}
list := parsable.(models.Listable)
return list, nil
}
// CreatePageFromBytes transforms given bytes in models.SitePageable object
func CreatePageFromBytes(bytes []byte) (bmodels.SitePageable, error) {
parsable, err := CreateFromBytes(bytes, bmodels.CreateSitePageFromDiscriminatorValue)
if err != nil {
return nil, errors.Wrap(err, "deserializing bytes to sharepoint page")
}
page := parsable.(bmodels.SitePageable)
return page, nil
}
func HasAttachments(body models.ItemBodyable) bool {
if body.GetContent() == nil || body.GetContentType() == nil ||
*body.GetContentType() == models.TEXT_BODYTYPE || len(*body.GetContent()) == 0 {
return false
}
content := *body.GetContent()
return strings.Contains(content, "src=\"cid:")
}

View File

@ -3,10 +3,13 @@ package support
import (
"testing"
kioser "github.com/microsoft/kiota-serialization-json-go"
"github.com/microsoftgraph/msgraph-sdk-go/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
bmodels "github.com/alcionai/corso/src/internal/connector/graph/betasdk/models"
"github.com/alcionai/corso/src/internal/connector/mockconnector"
)
@ -18,6 +21,11 @@ func TestDataSupportSuite(t *testing.T) {
suite.Run(t, new(DataSupportSuite))
}
var (
empty = "Empty Bytes"
invalid = "Invalid Bytes"
)
// TestCreateMessageFromBytes verifies approved mockdata bytes can
// be successfully transformed into M365 Message data.
func (suite *DataSupportSuite) TestCreateMessageFromBytes() {
@ -59,13 +67,13 @@ func (suite *DataSupportSuite) TestCreateContactFromBytes() {
isNil assert.ValueAssertionFunc
}{
{
name: "Empty Bytes",
name: empty,
byteArray: make([]byte, 0),
checkError: assert.Error,
isNil: assert.Nil,
},
{
name: "Invalid Bytes",
name: invalid,
byteArray: []byte("A random sentence doesn't make an object"),
checkError: assert.Error,
isNil: assert.Nil,
@ -94,13 +102,13 @@ func (suite *DataSupportSuite) TestCreateEventFromBytes() {
isNil assert.ValueAssertionFunc
}{
{
name: "Empty Byes",
name: empty,
byteArray: make([]byte, 0),
checkError: assert.Error,
isNil: assert.Nil,
},
{
name: "Invalid Bytes",
name: invalid,
byteArray: []byte("Invalid byte stream \"subject:\" Not going to work"),
checkError: assert.Error,
isNil: assert.Nil,
@ -132,13 +140,13 @@ func (suite *DataSupportSuite) TestCreateListFromBytes() {
isNil assert.ValueAssertionFunc
}{
{
name: "Empty Byes",
name: empty,
byteArray: make([]byte, 0),
checkError: assert.Error,
isNil: assert.Nil,
},
{
name: "Invalid Bytes",
name: invalid,
byteArray: []byte("Invalid byte stream \"subject:\" Not going to work"),
checkError: assert.Error,
isNil: assert.Nil,
@ -159,3 +167,111 @@ func (suite *DataSupportSuite) TestCreateListFromBytes() {
})
}
}
func (suite *DataSupportSuite) TestCreatePageFromBytes() {
tests := []struct {
name string
checkError assert.ErrorAssertionFunc
isNil assert.ValueAssertionFunc
getBytes func(t *testing.T) []byte
}{
{
empty,
assert.Error,
assert.Nil,
func(t *testing.T) []byte {
return make([]byte, 0)
},
},
{
invalid,
assert.Error,
assert.Nil,
func(t *testing.T) []byte {
return []byte("snarf")
},
},
{
"Valid Page",
assert.NoError,
assert.NotNil,
func(t *testing.T) []byte {
pg := bmodels.NewSitePage()
title := "Tested"
pg.SetTitle(&title)
pg.SetName(&title)
pg.SetWebUrl(&title)
writer := kioser.NewJsonSerializationWriter()
err := pg.Serialize(writer)
require.NoError(t, err)
byteArray, err := writer.GetSerializedContent()
require.NoError(t, err)
return byteArray
},
},
}
for _, test := range tests {
suite.T().Run(test.name, func(t *testing.T) {
result, err := CreatePageFromBytes(test.getBytes(t))
test.checkError(t, err)
test.isNil(t, result)
})
}
}
func (suite *DataSupportSuite) TestHasAttachments() {
tests := []struct {
name string
hasAttachment assert.BoolAssertionFunc
getBodyable func(t *testing.T) models.ItemBodyable
}{
{
name: "Mock w/out attachment",
hasAttachment: assert.False,
getBodyable: func(t *testing.T) models.ItemBodyable {
byteArray := mockconnector.GetMockMessageWithBodyBytes(
"Test",
"This is testing",
"This is testing",
)
message, err := CreateMessageFromBytes(byteArray)
require.NoError(t, err)
return message.GetBody()
},
},
{
name: "Mock w/ inline attachment",
hasAttachment: assert.True,
getBodyable: func(t *testing.T) models.ItemBodyable {
byteArray := mockconnector.GetMessageWithOneDriveAttachment("Test legacy")
message, err := CreateMessageFromBytes(byteArray)
require.NoError(t, err)
return message.GetBody()
},
},
{
name: "Edge Case",
hasAttachment: assert.True,
getBodyable: func(t *testing.T) models.ItemBodyable {
//nolint:lll
content := "<html><head>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"><style type=\"text/css\" style=\"display:none\">\r\n<!--\r\np\r\n\t{margin-top:0;\r\n\tmargin-bottom:0}\r\n-->\r\n</style></head><body dir=\"ltr\"><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Happy New Year,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">In accordance with TPS report guidelines, there have been questions about how to address our activities SharePoint Cover page. Do you believe this is the best picture?&nbsp;</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><img class=\"FluidPluginCopy ContentPasted0 w-2070 h-1380\" size=\"5854817\" data-outlook-trace=\"F:1|T:1\" src=\"cid:85f4faa3-9851-40c7-ba0a-e63dce1185f9\" style=\"max-width:100%\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Let me know if this meets our culture requirements.</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Warm Regards,</div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\"><br></div><div class=\"elementToProof\" style=\"font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0); background-color:rgb(255,255,255)\">Dustin</div></body></html>"
body := models.NewItemBody()
body.SetContent(&content)
cat := models.HTML_BODYTYPE
body.SetContentType(&cat)
return body
},
},
}
for _, test := range tests {
suite.T().Run(test.name, func(t *testing.T) {
found := HasAttachments(test.getBodyable(t))
test.hasAttachment(t, found)
})
}
}

View File

@ -1,11 +1,14 @@
package support
import (
"fmt"
"strings"
"github.com/microsoftgraph/msgraph-sdk-go/models"
)
const itemAttachment = "#microsoft.graph.itemAttachment"
// CloneMessageableFields places data from original data into new message object.
// SingleLegacyValueProperty is not populated during this operation
func CloneMessageableFields(orig, message models.Messageable) models.Messageable {
@ -278,3 +281,90 @@ func cloneColumnDefinitionable(orig models.ColumnDefinitionable) models.ColumnDe
return newColumn
}
// ToItemAttachment transforms internal item, OutlookItemables, into
// objects that are able to be uploaded into M365.
// Supported Internal Items:
// - Events
func ToItemAttachment(orig models.Attachmentable) (models.Attachmentable, error) {
transform, ok := orig.(models.ItemAttachmentable)
supported := "#microsoft.graph.event"
if !ok { // Shouldn't ever happen
return nil, fmt.Errorf("transforming attachment to item attachment")
}
item := transform.GetItem()
itemType := item.GetOdataType()
switch *itemType {
case supported:
event := item.(models.Eventable)
newEvent, err := sanitizeEvent(event)
if err != nil {
return nil, err
}
transform.SetItem(newEvent)
return transform, nil
default:
return nil, fmt.Errorf("exiting ToItemAttachment: %s not supported", *itemType)
}
}
// sanitizeEvent transfers data into event object and
// removes unique IDs from the M365 object
func sanitizeEvent(orig models.Eventable) (models.Eventable, error) {
newEvent := models.NewEvent()
newEvent.SetAttendees(orig.GetAttendees())
newEvent.SetBody(orig.GetBody())
newEvent.SetBodyPreview(orig.GetBodyPreview())
newEvent.SetCalendar(orig.GetCalendar())
newEvent.SetCreatedDateTime(orig.GetCreatedDateTime())
newEvent.SetEnd(orig.GetEnd())
newEvent.SetHasAttachments(orig.GetHasAttachments())
newEvent.SetHideAttendees(orig.GetHideAttendees())
newEvent.SetImportance(orig.GetImportance())
newEvent.SetIsAllDay(orig.GetIsAllDay())
newEvent.SetIsOnlineMeeting(orig.GetIsOnlineMeeting())
newEvent.SetLocation(orig.GetLocation())
newEvent.SetLocations(orig.GetLocations())
newEvent.SetSensitivity(orig.GetSensitivity())
newEvent.SetReminderMinutesBeforeStart(orig.GetReminderMinutesBeforeStart())
newEvent.SetStart(orig.GetStart())
newEvent.SetSubject(orig.GetSubject())
newEvent.SetType(orig.GetType())
// Sanitation
// isDraft and isOrganizer *bool ptr's have to be removed completely
// from JSON in order for POST method to succeed.
// Current as of 2/2/2023
newEvent.SetIsOrganizer(nil)
newEvent.SetIsDraft(nil)
newEvent.SetAdditionalData(orig.GetAdditionalData())
attached := orig.GetAttachments()
attachments := make([]models.Attachmentable, len(attached))
for _, ax := range attached {
if *ax.GetOdataType() == itemAttachment {
newAttachment, err := ToItemAttachment(ax)
if err != nil {
return nil, err
}
attachments = append(attachments, newAttachment)
continue
}
attachments = append(attachments, ax)
}
newEvent.SetAttachments(attachments)
return newEvent, nil
}

View File

@ -4,8 +4,8 @@ import (
"context"
"fmt"
"github.com/dustin/go-humanize"
multierror "github.com/hashicorp/go-multierror"
bytesize "github.com/inhies/go-bytesize"
"github.com/alcionai/corso/src/pkg/logger"
)
@ -66,6 +66,7 @@ func CreateStatus(
hasErrors := err != nil
numErr := GetNumberOfErrors(err)
status := ConnectorOperationStatus{
lastOperation: op,
ObjectCount: cm.Objects,
@ -142,7 +143,7 @@ func (cos *ConnectorOperationStatus) String() string {
cos.lastOperation.String(),
cos.Successful,
cos.ObjectCount,
bytesize.New(float64(cos.bytes)),
humanize.Bytes(uint64(cos.bytes)),
cos.FolderCount,
)

View File

@ -177,7 +177,7 @@ func MessageWithCompletion(
completionCh := make(chan struct{}, 1)
if cfg.hidden() {
return completionCh, func() {}
return completionCh, func() { log.Info("done - " + clean) }
}
wg.Add(1)
@ -232,7 +232,7 @@ func ItemProgress(
log.Debug(header)
if cfg.hidden() || rc == nil || totalBytes == 0 {
return rc, func() {}
return rc, func() { log.Debug("done - " + header) }
}
wg.Add(1)
@ -286,7 +286,7 @@ func ProgressWithCount(
}
}(progressCh)
return progressCh, func() {}
return progressCh, func() { log.Info("done - " + lmsg) }
}
wg.Add(1)
@ -381,16 +381,24 @@ func CollectionProgress(
if cfg.hidden() || len(user.String()) == 0 || len(dirName.String()) == 0 {
ch := make(chan struct{})
counted := 0
go func(ci <-chan struct{}) {
for {
_, ok := <-ci
if !ok {
return
}
counted++
// Log every 1000 items that are processed
if counted%1000 == 0 {
log.Infow("uploading", "count", counted)
}
}
}(ch)
return ch, func() {}
return ch, func() { log.Infow("done - "+message, "count", counted) }
}
wg.Add(1)
@ -432,6 +440,11 @@ func CollectionProgress(
counted++
// Log every 1000 items that are processed
if counted%1000 == 0 {
log.Infow("uploading", "count", counted)
}
bar.Increment()
}
}

View File

@ -2,6 +2,8 @@ package operations
import (
"context"
"fmt"
"runtime/debug"
"time"
"github.com/alcionai/clues"
@ -40,6 +42,9 @@ type BackupOperation struct {
Version string `json:"version"`
account account.Account
// when true, this allows for incremental backups instead of full data pulls
incremental bool
}
// BackupResults aggregate the details of the result of the operation.
@ -66,6 +71,7 @@ func NewBackupOperation(
Selectors: selector,
Version: "v0",
account: acct,
incremental: useIncrementalBackup(selector, opts),
}
if err := op.validate(); err != nil {
return BackupOperation{}, err
@ -103,29 +109,52 @@ type detailsWriter interface {
// Run begins a synchronous backup operation.
func (op *BackupOperation) Run(ctx context.Context) (err error) {
defer func() {
if r := recover(); r != nil {
var rerr error
if re, ok := r.(error); ok {
rerr = re
} else if re, ok := r.(string); ok {
rerr = clues.New(re)
} else {
rerr = clues.New(fmt.Sprintf("%v", r))
}
err = clues.Wrap(rerr, "panic recovery").
WithClues(ctx).
With("stacktrace", string(debug.Stack()))
logger.Ctx(ctx).
With("err", err).
Errorw("backup panic", clues.InErr(err).Slice()...)
}
}()
ctx, end := D.Span(ctx, "operations:backup:run")
defer end()
defer func() {
end()
// wait for the progress display to clean up
observe.Complete()
}()
// -----
// Setup
// -----
var (
opStats backupStats
backupDetails *details.Builder
toMerge map[string]path.Path
tenantID = op.account.ID()
startTime = time.Now()
detailsStore = streamstore.New(op.kopia, tenantID, op.Selectors.PathService())
reasons = selectorToReasons(op.Selectors)
uib = useIncrementalBackup(op.Selectors, op.Options)
opStats backupStats
startTime = time.Now()
detailsStore = streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService())
)
op.Results.BackupID = model.StableID(uuid.NewString())
ctx = clues.AddAll(
ctx,
"tenant_id", tenantID, // TODO: pii
"tenant_id", op.account.ID(), // TODO: pii
"resource_owner", op.ResourceOwner, // TODO: pii
"backup_id", op.Results.BackupID,
"service", op.Selectors.Service,
"incremental", uib)
"incremental", op.incremental)
op.bus.Event(
ctx,
@ -134,101 +163,128 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
events.StartTime: startTime,
events.Service: op.Selectors.Service.String(),
events.BackupID: op.Results.BackupID,
},
)
})
// persist operation results to the model store on exit
defer func() {
// wait for the progress display to clean up
observe.Complete()
// -----
// Execution
// -----
err = op.persistResults(startTime, &opStats)
if err != nil {
return
}
deets, err := op.do(
ctx,
&opStats,
detailsStore,
op.Results.BackupID)
if err != nil {
// No return here! We continue down to persistResults, even in case of failure.
logger.Ctx(ctx).
With("err", err).
Errorw("doing backup", clues.InErr(err).Slice()...)
op.Errors.Fail(errors.Wrap(err, "doing backup"))
opStats.readErr = op.Errors.Err()
}
err = op.createBackupModels(
ctx,
detailsStore,
opStats.k.SnapshotID,
backupDetails.Details())
if err != nil {
opStats.writeErr = err
}
}()
// -----
// Persistence
// -----
err = op.persistResults(startTime, &opStats)
if err != nil {
op.Errors.Fail(errors.Wrap(err, "persisting backup results"))
opStats.writeErr = op.Errors.Err()
return op.Errors.Err()
}
err = op.createBackupModels(
ctx,
detailsStore,
opStats.k.SnapshotID,
op.Results.BackupID,
deets.Details())
if err != nil {
op.Errors.Fail(errors.Wrap(err, "persisting backup"))
opStats.writeErr = op.Errors.Err()
return op.Errors.Err()
}
logger.Ctx(ctx).Infow("completed backup", "results", op.Results)
return nil
}
// do is purely the action of running a backup. All pre/post behavior
// is found in Run().
func (op *BackupOperation) do(
ctx context.Context,
opStats *backupStats,
detailsStore detailsReader,
backupID model.StableID,
) (*details.Builder, error) {
reasons := selectorToReasons(op.Selectors)
// should always be 1, since backups are 1:1 with resourceOwners.
opStats.resourceCount = 1
mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata(
ctx,
op.kopia,
op.store,
reasons,
tenantID,
uib,
)
op.account.ID(),
op.incremental,
op.Errors)
if err != nil {
opStats.readErr = errors.Wrap(err, "connecting to M365")
return opStats.readErr
return nil, errors.Wrap(err, "producing manifests and metadata")
}
gc, err := connectToM365(ctx, op.Selectors, op.account)
if err != nil {
opStats.readErr = errors.Wrap(err, "connecting to M365")
return opStats.readErr
return nil, errors.Wrap(err, "connectng to m365")
}
cs, err := produceBackupDataCollections(ctx, gc, op.Selectors, mdColls, op.Options)
if err != nil {
opStats.readErr = errors.Wrap(err, "retrieving data to backup")
return opStats.readErr
return nil, errors.Wrap(err, "producing backup data collections")
}
ctx = clues.Add(ctx, "collections", len(cs))
ctx = clues.Add(ctx, "coll_count", len(cs))
opStats.k, backupDetails, toMerge, err = consumeBackupDataCollections(
writeStats, deets, toMerge, err := consumeBackupDataCollections(
ctx,
op.kopia,
tenantID,
op.account.ID(),
reasons,
mans,
cs,
op.Results.BackupID,
uib && canUseMetaData)
backupID,
op.incremental && canUseMetaData)
if err != nil {
opStats.writeErr = errors.Wrap(err, "backing up service data")
return opStats.writeErr
return nil, errors.Wrap(err, "persisting collection backups")
}
logger.Ctx(ctx).Debugf(
"Backed up %d directories and %d files",
opStats.k.TotalDirectoryCount, opStats.k.TotalFileCount,
)
opStats.k = writeStats
if err = mergeDetails(
err = mergeDetails(
ctx,
op.store,
detailsStore,
mans,
toMerge,
backupDetails,
); err != nil {
opStats.writeErr = errors.Wrap(err, "merging backup details")
return opStats.writeErr
deets)
if err != nil {
return nil, errors.Wrap(err, "merging details")
}
opStats.gc = gc.AwaitStatus()
// TODO(keepers): remove when fault.Errors handles all iterable error aggregation.
if opStats.gc.ErrorCount > 0 {
merr := multierror.Append(opStats.readErr, errors.Wrap(opStats.gc.Err, "retrieving data"))
opStats.readErr = merr.ErrorOrNil()
// Need to exit before we set started to true else we'll report no errors.
return opStats.readErr
return nil, opStats.gc.Err
}
// should always be 1, since backups are 1:1 with resourceOwners.
opStats.resourceCount = 1
logger.Ctx(ctx).Debug(gc.PrintableStatus())
return err
return deets, nil
}
// checker to see if conditions are correct for incremental backup behavior such as
@ -307,7 +363,9 @@ func selectorToReasons(sel selectors.Selector) []kopia.Reason {
return reasons
}
func builderFromReason(tenant string, r kopia.Reason) (*path.Builder, error) {
func builderFromReason(ctx context.Context, tenant string, r kopia.Reason) (*path.Builder, error) {
ctx = clues.Add(ctx, "category", r.Category.String())
// This is hacky, but we want the path package to format the path the right
// way (e.x. proper order for service, category, etc), but we don't care about
// the folders after the prefix.
@ -319,12 +377,7 @@ func builderFromReason(tenant string, r kopia.Reason) (*path.Builder, error) {
false,
)
if err != nil {
return nil, errors.Wrapf(
err,
"building path for service %s category %s",
r.Service.String(),
r.Category.String(),
)
return nil, clues.Wrap(err, "building path").WithClues(ctx)
}
return p.ToBuilder().Dir(), nil
@ -367,7 +420,7 @@ func consumeBackupDataCollections(
categories := map[string]struct{}{}
for _, reason := range m.Reasons {
pb, err := builderFromReason(tenantID, reason)
pb, err := builderFromReason(ctx, tenantID, reason)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "getting subtree paths for bases")
}
@ -394,13 +447,9 @@ func consumeBackupDataCollections(
logger.Ctx(ctx).Infow(
"using base for backup",
"snapshot_id",
m.ID,
"services",
svcs,
"categories",
cats,
)
"snapshot_id", m.ID,
"services", svcs,
"categories", cats)
}
kopiaStats, deets, itemsSourcedFromBase, err := bu.BackupCollections(
@ -409,24 +458,22 @@ func consumeBackupDataCollections(
cs,
nil,
tags,
isIncremental,
)
isIncremental)
if err != nil {
if kopiaStats == nil {
return nil, nil, nil, err
}
return nil, nil, nil, errors.Wrapf(
err,
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount, kopiaStats.IgnoredErrorCount)
}
if kopiaStats.ErrorCount > 0 || kopiaStats.IgnoredErrorCount > 0 {
if err != nil {
err = errors.Wrapf(
err,
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount,
kopiaStats.IgnoredErrorCount,
)
} else {
err = errors.Errorf(
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount,
kopiaStats.IgnoredErrorCount,
)
}
err = errors.Errorf(
"kopia snapshot failed with %v catastrophic errors and %v ignored errors",
kopiaStats.ErrorCount, kopiaStats.IgnoredErrorCount)
}
return kopiaStats, deets, itemsSourcedFromBase, err
@ -461,6 +508,8 @@ func mergeDetails(
var addedEntries int
for _, man := range mans {
mctx := clues.Add(ctx, "manifest_id", man.ID)
// For now skip snapshots that aren't complete. We will need to revisit this
// when we tackle restartability.
if len(man.IncompleteReason) > 0 {
@ -469,28 +518,26 @@ func mergeDetails(
bID, ok := man.GetTag(kopia.TagBackupID)
if !ok {
return errors.Errorf("no backup ID in snapshot manifest with ID %s", man.ID)
return clues.New("no backup ID in snapshot manifest").WithClues(mctx)
}
mctx = clues.Add(mctx, "manifest_backup_id", bID)
_, baseDeets, err := getBackupAndDetailsFromID(
ctx,
model.StableID(bID),
ms,
detailsStore,
)
detailsStore)
if err != nil {
return errors.Wrapf(err, "backup fetching base details for backup %s", bID)
return clues.New("fetching base details for backup").WithClues(mctx)
}
for _, entry := range baseDeets.Items() {
rr, err := path.FromDataLayerPath(entry.RepoRef, true)
if err != nil {
return errors.Wrapf(
err,
"parsing base item info path %s in backup %s",
entry.RepoRef,
bID,
)
return clues.New("parsing base item info path").
WithClues(mctx).
With("repo_ref", entry.RepoRef) // todo: pii
}
// Although this base has an entry it may not be the most recent. Check
@ -513,11 +560,7 @@ func mergeDetails(
// Fixup paths in the item.
item := entry.ItemInfo
if err := details.UpdateItem(&item, newPath); err != nil {
return errors.Wrapf(
err,
"updating item info for entry from backup %s",
bID,
)
return clues.New("updating item details").WithClues(mctx)
}
// TODO(ashmrtn): This may need updated if we start using this merge
@ -529,8 +572,7 @@ func mergeDetails(
newPath.ShortRef(),
newPath.ToBuilder().Dir().ShortRef(),
itemUpdated,
item,
)
item)
folders := details.FolderEntriesForPath(newPath.ToBuilder().Dir())
deets.AddFoldersForItem(folders, item, itemUpdated)
@ -542,11 +584,9 @@ func mergeDetails(
}
if addedEntries != len(shortRefsFromPrevBackup) {
return errors.Errorf(
"incomplete migration of backup details: found %v of %v expected items",
addedEntries,
len(shortRefsFromPrevBackup),
)
return clues.New("incomplete migration of backup details").
WithClues(ctx).
WithAll("item_count", addedEntries, "expected_item_count", len(shortRefsFromPrevBackup))
}
return nil
@ -568,21 +608,28 @@ func (op *BackupOperation) persistResults(
if opStats.readErr != nil || opStats.writeErr != nil {
op.Status = Failed
// TODO(keepers): replace with fault.Errors handling.
return multierror.Append(
errors.New("errors prevented the operation from processing"),
opStats.readErr,
opStats.writeErr)
}
if opStats.readErr == nil && opStats.writeErr == nil && opStats.gc.Successful == 0 {
op.Results.BytesRead = opStats.k.TotalHashedBytes
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
op.Results.ItemsWritten = opStats.k.TotalFileCount
op.Results.ResourceOwners = opStats.resourceCount
if opStats.gc == nil {
op.Status = Failed
return errors.New("backup population never completed")
}
if opStats.gc.Successful == 0 {
op.Status = NoData
}
op.Results.BytesRead = opStats.k.TotalHashedBytes
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
op.Results.ItemsRead = opStats.gc.Successful
op.Results.ItemsWritten = opStats.k.TotalFileCount
op.Results.ResourceOwners = opStats.resourceCount
return nil
}
@ -592,29 +639,32 @@ func (op *BackupOperation) createBackupModels(
ctx context.Context,
detailsStore detailsWriter,
snapID string,
backupID model.StableID,
backupDetails *details.Details,
) error {
ctx = clues.Add(ctx, "snapshot_id", snapID)
if backupDetails == nil {
return errors.New("no backup details to record")
return clues.New("no backup details to record").WithClues(ctx)
}
detailsID, err := detailsStore.WriteBackupDetails(ctx, backupDetails)
if err != nil {
return errors.Wrap(err, "creating backupdetails model")
return clues.Wrap(err, "creating backupDetails model").WithClues(ctx)
}
ctx = clues.Add(ctx, "details_id", detailsID)
b := backup.New(
snapID, detailsID, op.Status.String(),
op.Results.BackupID,
backupID,
op.Selectors,
op.Results.ReadWrites,
op.Results.StartAndEndTime,
op.Errors,
)
err = op.store.Put(ctx, model.BackupSchema, b)
if err != nil {
return errors.Wrap(err, "creating backup model")
if err = op.store.Put(ctx, model.BackupSchema, b); err != nil {
return clues.Wrap(err, "creating backup model").WithClues(ctx)
}
dur := op.Results.CompletedAt.Sub(op.Results.StartedAt)

View File

@ -339,7 +339,15 @@ func generateContainerOfItems(
dest,
collections)
deets, err := gc.RestoreDataCollections(ctx, acct, sel, dest, dataColls)
deets, err := gc.RestoreDataCollections(
ctx,
backup.Version,
acct,
sel,
dest,
control.Options{RestorePermissions: true},
dataColls,
)
require.NoError(t, err)
return deets
@ -1073,7 +1081,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDrive() {
sel.Include(sel.AllData())
bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{})
bo, _, _, _, closer := prepNewTestBackupOp(t, ctx, mb, sel.Selector, control.Toggles{EnablePermissionsBackup: true})
defer closer()
runAndCheckBackup(t, ctx, &bo, mb)

View File

@ -432,258 +432,6 @@ func (suite *BackupOpSuite) TestBackupOperation_PersistResults() {
}
}
func (suite *BackupOpSuite) TestBackupOperation_VerifyDistinctBases() {
const user = "a-user"
table := []struct {
name string
input []*kopia.ManifestEntry
errCheck assert.ErrorAssertionFunc
}{
{
name: "SingleManifestMultipleReasons",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
},
},
},
errCheck: assert.NoError,
},
{
name: "MultipleManifestsDistinctReason",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
},
},
},
errCheck: assert.NoError,
},
{
name: "MultipleManifestsSameReason",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
},
errCheck: assert.Error,
},
{
name: "MultipleManifestsSameReasonOneIncomplete",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
IncompleteReason: "checkpoint",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
},
errCheck: assert.NoError,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
test.errCheck(t, verifyDistinctBases(test.input))
})
}
}
func (suite *BackupOpSuite) TestBackupOperation_CollectMetadata() {
var (
tenant = "a-tenant"
resourceOwner = "a-user"
fileNames = []string{
"delta",
"paths",
}
emailDeltaPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.EmailCategory,
fileNames[0],
)
emailPathsPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.EmailCategory,
fileNames[1],
)
contactsDeltaPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.ContactsCategory,
fileNames[0],
)
contactsPathsPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.ContactsCategory,
fileNames[1],
)
)
table := []struct {
name string
inputMan *kopia.ManifestEntry
inputFiles []string
expected []path.Path
}{
{
name: "SingleReasonSingleFile",
inputMan: &kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
inputFiles: []string{fileNames[0]},
expected: []path.Path{emailDeltaPath},
},
{
name: "SingleReasonMultipleFiles",
inputMan: &kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
inputFiles: fileNames,
expected: []path.Path{emailDeltaPath, emailPathsPath},
},
{
name: "MultipleReasonsMultipleFiles",
inputMan: &kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.ContactsCategory,
},
},
},
inputFiles: fileNames,
expected: []path.Path{
emailDeltaPath,
emailPathsPath,
contactsDeltaPath,
contactsPathsPath,
},
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
ctx, flush := tester.NewContext()
defer flush()
mr := &mockRestorer{}
_, err := collectMetadata(ctx, mr, test.inputMan, test.inputFiles, tenant)
assert.NoError(t, err)
checkPaths(t, test.expected, mr.gotPaths)
})
}
}
func (suite *BackupOpSuite) TestBackupOperation_ConsumeBackupDataCollections_Paths() {
var (
tenant = "a-tenant"

View File

@ -3,7 +3,7 @@ package operations
import (
"context"
multierror "github.com/hashicorp/go-multierror"
"github.com/alcionai/clues"
"github.com/kopia/kopia/repo/manifest"
"github.com/pkg/errors"
@ -12,6 +12,7 @@ import (
"github.com/alcionai/corso/src/internal/kopia"
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/path"
)
@ -44,6 +45,7 @@ func produceManifestsAndMetadata(
reasons []kopia.Reason,
tenantID string,
getMetadata bool,
errs fault.Adder,
) ([]*kopia.ManifestEntry, []data.Collection, bool, error) {
var (
metadataFiles = graph.AllMetadataFileNames()
@ -68,12 +70,10 @@ func produceManifestsAndMetadata(
//
// TODO(ashmrtn): This may need updating if we start sourcing item backup
// details from previous snapshots when using kopia-assisted incrementals.
if err := verifyDistinctBases(ms); err != nil {
logger.Ctx(ctx).Warnw(
if err := verifyDistinctBases(ctx, ms, errs); err != nil {
logger.Ctx(ctx).With("error", err).Infow(
"base snapshot collision, falling back to full backup",
"error",
err,
)
clues.In(ctx).Slice()...)
return ms, nil, false, nil
}
@ -83,40 +83,41 @@ func produceManifestsAndMetadata(
continue
}
mctx := clues.Add(ctx, "manifest_id", man.ID)
bID, ok := man.GetTag(kopia.TagBackupID)
if !ok {
return nil, nil, false, errors.New("snapshot manifest missing backup ID")
err = clues.New("snapshot manifest missing backup ID").WithClues(ctx)
return nil, nil, false, err
}
dID, _, err := gdi.GetDetailsIDFromBackupID(ctx, model.StableID(bID))
mctx = clues.Add(mctx, "manifest_backup_id", man.ID)
dID, _, err := gdi.GetDetailsIDFromBackupID(mctx, model.StableID(bID))
if err != nil {
// if no backup exists for any of the complete manifests, we want
// to fall back to a complete backup.
if errors.Is(err, kopia.ErrNotFound) {
logger.Ctx(ctx).Infow(
"backup missing, falling back to full backup",
"backup_id", bID)
logger.Ctx(ctx).Infow("backup missing, falling back to full backup", clues.In(mctx).Slice()...)
return ms, nil, false, nil
}
return nil, nil, false, errors.Wrap(err, "retrieving prior backup data")
}
mctx = clues.Add(mctx, "manifest_details_id", dID)
// if no detailsID exists for any of the complete manifests, we want
// to fall back to a complete backup. This is a temporary prevention
// mechanism to keep backups from falling into a perpetually bad state.
// This makes an assumption that the ID points to a populated set of
// details; we aren't doing the work to look them up.
if len(dID) == 0 {
logger.Ctx(ctx).Infow(
"backup missing details ID, falling back to full backup",
"backup_id", bID)
logger.Ctx(ctx).Infow("backup missing details ID, falling back to full backup", clues.In(mctx).Slice()...)
return ms, nil, false, nil
}
colls, err := collectMetadata(ctx, mr, man, metadataFiles, tenantID)
colls, err := collectMetadata(mctx, mr, man, metadataFiles, tenantID)
if err != nil && !errors.Is(err, kopia.ErrNotFound) {
// prior metadata isn't guaranteed to exist.
// if it doesn't, we'll just have to do a
@ -134,9 +135,9 @@ func produceManifestsAndMetadata(
// of manifests, that each manifest's Reason (owner, service, category) is only
// included once. If a reason is duplicated by any two manifests, an error is
// returned.
func verifyDistinctBases(mans []*kopia.ManifestEntry) error {
func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry, errs fault.Adder) error {
var (
errs *multierror.Error
failed bool
reasons = map[string]manifest.ID{}
)
@ -155,10 +156,11 @@ func verifyDistinctBases(mans []*kopia.ManifestEntry) error {
reasonKey := reason.ResourceOwner + reason.Service.String() + reason.Category.String()
if b, ok := reasons[reasonKey]; ok {
errs = multierror.Append(errs, errors.Errorf(
"multiple base snapshots source data for %s %s. IDs: %s, %s",
reason.Service, reason.Category, b, man.ID,
))
failed = true
errs.Add(clues.New("manifests have overlapping reasons").
WithClues(ctx).
With("other_manifest_id", b))
continue
}
@ -167,7 +169,11 @@ func verifyDistinctBases(mans []*kopia.ManifestEntry) error {
}
}
return errs.ErrorOrNil()
if failed {
return clues.New("multiple base snapshots qualify").WithClues(ctx)
}
return nil
}
// collectMetadata retrieves all metadata files associated with the manifest.
@ -191,7 +197,9 @@ func collectMetadata(
reason.Category,
true)
if err != nil {
return nil, errors.Wrapf(err, "building metadata path")
return nil, clues.
Wrap(err, "building metadata path").
WithAll("metadata_file", fn, "category", reason.Category)
}
paths = append(paths, p)

View File

@ -14,6 +14,7 @@ import (
"github.com/alcionai/corso/src/internal/model"
"github.com/alcionai/corso/src/internal/tester"
"github.com/alcionai/corso/src/pkg/backup"
"github.com/alcionai/corso/src/pkg/fault/mock"
"github.com/alcionai/corso/src/pkg/path"
)
@ -400,7 +401,10 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
err := verifyDistinctBases(test.mans)
ctx, flush := tester.NewContext()
defer flush()
err := verifyDistinctBases(ctx, test.mans, mock.NewAdder())
test.expect(t, err)
})
}
@ -646,6 +650,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
ctx, flush := tester.NewContext()
defer flush()
ma := mock.NewAdder()
mans, dcs, b, err := produceManifestsAndMetadata(
ctx,
&test.mr,
@ -653,7 +659,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
test.reasons,
tid,
test.getMeta,
)
ma)
test.assertErr(t, err)
test.assertB(t, b)
@ -683,3 +689,270 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
})
}
}
// ---------------------------------------------------------------------------
// older tests
// ---------------------------------------------------------------------------
type BackupManifestSuite struct {
suite.Suite
}
func TestBackupManifestSuite(t *testing.T) {
suite.Run(t, new(BackupOpSuite))
}
func (suite *BackupManifestSuite) TestBackupOperation_VerifyDistinctBases() {
const user = "a-user"
table := []struct {
name string
input []*kopia.ManifestEntry
errCheck assert.ErrorAssertionFunc
}{
{
name: "SingleManifestMultipleReasons",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
},
},
},
errCheck: assert.NoError,
},
{
name: "MultipleManifestsDistinctReason",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EventsCategory,
},
},
},
},
errCheck: assert.NoError,
},
{
name: "MultipleManifestsSameReason",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
},
errCheck: assert.Error,
},
{
name: "MultipleManifestsSameReasonOneIncomplete",
input: []*kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
{
Manifest: &snapshot.Manifest{
ID: "id2",
IncompleteReason: "checkpoint",
},
Reasons: []kopia.Reason{
{
ResourceOwner: user,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
},
errCheck: assert.NoError,
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
ctx, flush := tester.NewContext()
defer flush()
test.errCheck(t, verifyDistinctBases(ctx, test.input, mock.NewAdder()))
})
}
}
func (suite *BackupManifestSuite) TestBackupOperation_CollectMetadata() {
var (
tenant = "a-tenant"
resourceOwner = "a-user"
fileNames = []string{
"delta",
"paths",
}
emailDeltaPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.EmailCategory,
fileNames[0],
)
emailPathsPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.EmailCategory,
fileNames[1],
)
contactsDeltaPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.ContactsCategory,
fileNames[0],
)
contactsPathsPath = makeMetadataPath(
suite.T(),
tenant,
path.ExchangeService,
resourceOwner,
path.ContactsCategory,
fileNames[1],
)
)
table := []struct {
name string
inputMan *kopia.ManifestEntry
inputFiles []string
expected []path.Path
}{
{
name: "SingleReasonSingleFile",
inputMan: &kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
inputFiles: []string{fileNames[0]},
expected: []path.Path{emailDeltaPath},
},
{
name: "SingleReasonMultipleFiles",
inputMan: &kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
},
},
inputFiles: fileNames,
expected: []path.Path{emailDeltaPath, emailPathsPath},
},
{
name: "MultipleReasonsMultipleFiles",
inputMan: &kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.EmailCategory,
},
{
ResourceOwner: resourceOwner,
Service: path.ExchangeService,
Category: path.ContactsCategory,
},
},
},
inputFiles: fileNames,
expected: []path.Path{
emailDeltaPath,
emailPathsPath,
contactsDeltaPath,
contactsPathsPath,
},
},
}
for _, test := range table {
suite.T().Run(test.name, func(t *testing.T) {
ctx, flush := tester.NewContext()
defer flush()
mr := &mockRestorer{}
_, err := collectMetadata(ctx, mr, test.inputMan, test.inputFiles, tenant)
assert.NoError(t, err)
checkPaths(t, test.expected, mr.gotPaths)
})
}
}

View File

@ -3,6 +3,8 @@ package operations
import (
"context"
"fmt"
"runtime/debug"
"sort"
"time"
"github.com/alcionai/clues"
@ -106,35 +108,89 @@ type restorer interface {
// Run begins a synchronous restore operation.
func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.Details, err error) {
ctx, end := D.Span(ctx, "operations:restore:run")
defer end()
defer func() {
if r := recover(); r != nil {
var rerr error
if re, ok := r.(error); ok {
rerr = re
} else if re, ok := r.(string); ok {
rerr = clues.New(re)
} else {
rerr = clues.New(fmt.Sprintf("%v", r))
}
err = clues.Wrap(rerr, "panic recovery").
WithClues(ctx).
With("stacktrace", string(debug.Stack()))
logger.Ctx(ctx).
With("err", err).
Errorw("backup panic", clues.InErr(err).Slice()...)
}
}()
var (
opStats = restoreStats{
bytesRead: &stats.ByteCounter{},
restoreID: uuid.NewString(),
}
startTime = time.Now()
start = time.Now()
detailsStore = streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService())
)
// -----
// Setup
// -----
ctx, end := D.Span(ctx, "operations:restore:run")
defer func() {
end()
// wait for the progress display to clean up
observe.Complete()
err = op.persistResults(ctx, startTime, &opStats)
if err != nil {
return
}
}()
detailsStore := streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService())
ctx = clues.AddAll(
ctx,
"tenant_id", op.account.ID(), // TODO: pii
"backup_id", op.BackupID,
"service", op.Selectors.Service)
// -----
// Execution
// -----
deets, err := op.do(ctx, &opStats, detailsStore, start)
if err != nil {
// No return here! We continue down to persistResults, even in case of failure.
logger.Ctx(ctx).
With("err", err).
Errorw("doing restore", clues.InErr(err).Slice()...)
op.Errors.Fail(errors.Wrap(err, "doing restore"))
opStats.readErr = op.Errors.Err()
}
// -----
// Persistence
// -----
err = op.persistResults(ctx, start, &opStats)
if err != nil {
op.Errors.Fail(errors.Wrap(err, "persisting restore results"))
opStats.writeErr = op.Errors.Err()
return nil, op.Errors.Err()
}
logger.Ctx(ctx).Infow("completed restore", "results", op.Results)
return deets, nil
}
func (op *RestoreOperation) do(
ctx context.Context,
opStats *restoreStats,
detailsStore detailsReader,
start time.Time,
) (*details.Details, error) {
bup, deets, err := getBackupAndDetailsFromID(
ctx,
op.BackupID,
@ -142,30 +198,28 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
detailsStore,
)
if err != nil {
opStats.readErr = errors.Wrap(err, "restore")
return nil, opStats.readErr
return nil, errors.Wrap(err, "getting backup and details")
}
ctx = clues.Add(ctx, "resource_owner", bup.Selector.DiscreteOwner)
paths, err := formatDetailsForRestoration(ctx, op.Selectors, deets)
if err != nil {
return nil, errors.Wrap(err, "formatting paths from details")
}
ctx = clues.AddAll(
ctx,
"resource_owner", bup.Selector.DiscreteOwner,
"details_paths", len(paths))
op.bus.Event(
ctx,
events.RestoreStart,
map[string]any{
events.StartTime: startTime,
events.StartTime: start,
events.BackupID: op.BackupID,
events.BackupCreateTime: bup.CreationTime,
events.RestoreID: opStats.restoreID,
},
)
paths, err := formatDetailsForRestoration(ctx, op.Selectors, deets)
if err != nil {
opStats.readErr = err
return nil, err
}
ctx = clues.Add(ctx, "details_paths", len(paths))
})
observe.Message(ctx, observe.Safe(fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID)))
@ -175,39 +229,45 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
dcs, err := op.kopia.RestoreMultipleItems(ctx, bup.SnapshotID, paths, opStats.bytesRead)
if err != nil {
opStats.readErr = errors.Wrap(err, "retrieving service data")
return nil, opStats.readErr
return nil, errors.Wrap(err, "retrieving collections from repository")
}
kopiaComplete <- struct{}{}
ctx = clues.Add(ctx, "collections", len(dcs))
ctx = clues.Add(ctx, "coll_count", len(dcs))
// should always be 1, since backups are 1:1 with resourceOwners.
opStats.resourceCount = 1
opStats.cs = dcs
opStats.resourceCount = len(data.ResourceOwnerSet(dcs))
gc, err := connectToM365(ctx, op.Selectors, op.account)
if err != nil {
opStats.readErr = errors.Wrap(err, "connecting to M365")
return nil, opStats.readErr
return nil, errors.Wrap(err, "connecting to M365")
}
restoreComplete, closer := observe.MessageWithCompletion(ctx, observe.Safe("Restoring data"))
defer closer()
defer close(restoreComplete)
restoreDetails, err = gc.RestoreDataCollections(
restoreDetails, err := gc.RestoreDataCollections(
ctx,
bup.Version,
op.account,
op.Selectors,
op.Destination,
op.Options,
dcs)
if err != nil {
opStats.writeErr = errors.Wrap(err, "restoring service data")
return nil, opStats.writeErr
return nil, errors.Wrap(err, "restoring collections")
}
restoreComplete <- struct{}{}
opStats.gc = gc.AwaitStatus()
// TODO(keepers): remove when fault.Errors handles all iterable error aggregation.
if opStats.gc.ErrorCount > 0 {
return nil, opStats.gc.Err
}
logger.Ctx(ctx).Debug(gc.PrintableStatus())
@ -236,14 +296,20 @@ func (op *RestoreOperation) persistResults(
opStats.writeErr)
}
if opStats.readErr == nil && opStats.writeErr == nil && opStats.gc.Successful == 0 {
op.Results.BytesRead = opStats.bytesRead.NumBytes
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
op.Results.ResourceOwners = opStats.resourceCount
if opStats.gc == nil {
op.Status = Failed
return errors.New("restoration never completed")
}
if opStats.gc.Successful == 0 {
op.Status = NoData
}
op.Results.BytesRead = opStats.bytesRead.NumBytes
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
op.Results.ItemsWritten = opStats.gc.Successful
op.Results.ResourceOwners = opStats.resourceCount
dur := op.Results.CompletedAt.Sub(op.Results.StartedAt)
@ -300,6 +366,17 @@ func formatDetailsForRestoration(
paths[i] = p
}
// TODO(meain): Move this to onedrive specific component, but as
// of now the paths can technically be from multiple services
// This sort is done primarily to order `.meta` files after `.data`
// files. This is only a necessity for OneDrive as we are storing
// metadata for files/folders in separate meta files and we the
// data to be restored before we can restore the metadata.
sort.Slice(paths, func(i, j int) bool {
return paths[i].String() < paths[j].String()
})
if errs != nil {
return nil, errs
}

View File

@ -14,6 +14,8 @@ import (
"github.com/alcionai/corso/src/pkg/selectors"
)
const Version = 1
// Backup represents the result of a backup operation
type Backup struct {
model.BaseModel
@ -32,6 +34,9 @@ type Backup struct {
// Selector used in this operation
Selector selectors.Selector `json:"selectors"`
// Version represents the version of the backup format
Version int `json:"version"`
// Errors contains all errors aggregated during a backup operation.
Errors fault.ErrorsData `json:"errors"`
@ -67,6 +72,7 @@ func New(
Errors: errs.Data(),
ReadWrites: rw,
StartAndEndTime: se,
Version: Version,
}
}

View File

@ -6,10 +6,11 @@ import (
// Options holds the optional configurations for a process
type Options struct {
Collision CollisionPolicy `json:"-"`
DisableMetrics bool `json:"disableMetrics"`
FailFast bool `json:"failFast"`
ToggleFeatures Toggles `json:"ToggleFeatures"`
Collision CollisionPolicy `json:"-"`
DisableMetrics bool `json:"disableMetrics"`
FailFast bool `json:"failFast"`
RestorePermissions bool `json:"restorePermissions"`
ToggleFeatures Toggles `json:"ToggleFeatures"`
}
// Defaults provides an Options with the default values set.
@ -74,4 +75,9 @@ type Toggles struct {
// DisableIncrementals prevents backups from using incremental lookups,
// forcing a new, complete backup of all data regardless of prior state.
DisableIncrementals bool `json:"exchangeIncrementals,omitempty"`
// EnablePermissionsBackup is used to enable backups of item
// permissions. Permission metadata increases graph api call count,
// so disabling their retrieval when not needed is advised.
EnablePermissionsBackup bool `json:"enablePermissionsBackup,omitempty"`
}

View File

@ -87,16 +87,19 @@ func (e *Errors) Fail(err error) *Errors {
// setErr handles setting errors.err. Sync locking gets
// handled upstream of this call.
func (e *Errors) setErr(err error) *Errors {
if e.err != nil {
return e.addErr(err)
if e.err == nil {
e.err = err
return e
}
e.err = err
e.errs = append(e.errs, err)
return e
}
// TODO: introduce Adder interface
type Adder interface {
Add(err error) *Errors
}
// Add appends the error to the slice of recoverable and
// iterated errors (ie: errors.errs). If failFast is true,

View File

@ -73,6 +73,8 @@ func (suite *FaultErrorsUnitSuite) TestErr() {
suite.T().Run(test.name, func(t *testing.T) {
n := fault.New(test.failFast)
require.NotNil(t, n)
require.NoError(t, n.Err())
require.Empty(t, n.Errs())
e := n.Fail(test.fail)
require.NotNil(t, e)
@ -90,6 +92,8 @@ func (suite *FaultErrorsUnitSuite) TestFail() {
n := fault.New(false)
require.NotNil(t, n)
require.NoError(t, n.Err())
require.Empty(t, n.Errs())
n.Fail(assert.AnError)
assert.Error(t, n.Err())

Some files were not shown because too many files have changed in this diff Show More