Merge branch 'main' of https://github.com/alcionai/corso into nestedAttachment
This commit is contained in:
commit
cb621bb359
12
CHANGELOG.md
12
CHANGELOG.md
@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased] (beta)
|
||||
|
||||
## [v0.9.0] (beta) - 2023-06-05
|
||||
|
||||
### Added
|
||||
- Added ProtectedResourceName to the backup list json output. ProtectedResourceName holds either a UPN or a WebURL, depending on the resource type.
|
||||
- Rework base selection logic for incremental backups so it's more likely to find a valid base.
|
||||
@ -20,8 +22,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
### Changed
|
||||
- Do not display all the items that we restored at the end if there are more than 15. You can override this with `--verbose`.
|
||||
|
||||
### Known Issues
|
||||
|
||||
## [v0.8.0] (beta) - 2023-05-15
|
||||
|
||||
### Added
|
||||
@ -170,7 +170,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
### Changed
|
||||
- Item.Attachments are disabled from being restored for the patching of ([#2353](https://github.com/alcionai/corso/issues/2353))
|
||||
- BetaClient introduced. Enables Corso to be able to interact with SharePoint Page objects. Package located `/internal/connector/graph/betasdk`
|
||||
- BetaClient introduced. Enables Corso to be able to interact with SharePoint Page objects. Package located `/internal/connector/graph/betasdk`
|
||||
- Handle case where user's drive has not been initialized
|
||||
- Inline attachments (e.g. copy/paste ) are discovered and backed up correctly ([#2163](https://github.com/alcionai/corso/issues/2163))
|
||||
- Guest and External users (for cloud accounts) and non-on-premise users (for systems that use on-prem AD syncs) are now excluded from backup and restore operations.
|
||||
@ -207,7 +207,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
### Added
|
||||
|
||||
- Incremental backup support for Exchange ([#1777](https://github.com/alcionai/corso/issues/1777)). This is currently enabled by specifying the `--enable-incrementals`
|
||||
- Incremental backup support for Exchange ([#1777](https://github.com/alcionai/corso/issues/1777)). This is currently enabled by specifying the `--enable-incrementals`
|
||||
with the `backup create` command. This functionality will be enabled by default in an upcoming release.
|
||||
- Folder entries in backup details now include size and modified time for the hierarchy ([#1896](https://github.com/alcionai/corso/issues/1896))
|
||||
|
||||
@ -290,7 +290,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Miscellaneous
|
||||
- Optional usage statistics reporting ([RM-35](https://github.com/alcionai/corso-roadmap/issues/35))
|
||||
|
||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.7.0...HEAD
|
||||
[Unreleased]: https://github.com/alcionai/corso/compare/v0.9.0...HEAD
|
||||
[v0.9.0]: https://github.com/alcionai/corso/compare/v0.8.1...v0.9.0
|
||||
[v0.8.0]: https://github.com/alcionai/corso/compare/v0.7.1...v0.8.0
|
||||
[v0.7.0]: https://github.com/alcionai/corso/compare/v0.6.1...v0.7.0
|
||||
[v0.6.1]: https://github.com/alcionai/corso/compare/v0.5.0...v0.6.1
|
||||
[v0.5.0]: https://github.com/alcionai/corso/compare/v0.4.0...v0.5.0
|
||||
|
||||
@ -51,7 +51,6 @@ type dataBuilderFunc func(id, now, subject, body string) []byte
|
||||
func generateAndRestoreItems(
|
||||
ctx context.Context,
|
||||
gc *connector.GraphConnector,
|
||||
acct account.Account,
|
||||
service path.ServiceType,
|
||||
cat path.CategoryType,
|
||||
sel selectors.Selector,
|
||||
@ -99,7 +98,7 @@ func generateAndRestoreItems(
|
||||
|
||||
print.Infof(ctx, "Generating %d %s items in %s\n", howMany, cat, Destination)
|
||||
|
||||
return gc.ConsumeRestoreCollections(ctx, version.Backup, acct, sel, dest, opts, dataColls, errs)
|
||||
return gc.ConsumeRestoreCollections(ctx, version.Backup, sel, dest, opts, dataColls, errs)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------
|
||||
@ -188,7 +187,7 @@ func buildCollections(
|
||||
mc.Data[i] = c.items[i].data
|
||||
}
|
||||
|
||||
collections = append(collections, data.NotFoundRestoreCollection{Collection: mc})
|
||||
collections = append(collections, data.NoFetchRestoreCollection{Collection: mc})
|
||||
}
|
||||
|
||||
return collections, nil
|
||||
@ -233,14 +232,14 @@ func generateAndRestoreDriveItems(
|
||||
|
||||
switch service {
|
||||
case path.SharePointService:
|
||||
d, err := gc.Service.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil)
|
||||
d, err := gc.AC.Stable.Client().Sites().BySiteId(resourceOwner).Drive().Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting site's default drive")
|
||||
}
|
||||
|
||||
driveID = ptr.Val(d.GetId())
|
||||
default:
|
||||
d, err := gc.Service.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil)
|
||||
d, err := gc.AC.Stable.Client().Users().ByUserId(resourceOwner).Drive().Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting user's default drive")
|
||||
}
|
||||
@ -390,7 +389,6 @@ func generateAndRestoreDriveItems(
|
||||
}
|
||||
|
||||
config := connector.ConfigInfo{
|
||||
Acct: acct,
|
||||
Opts: opts,
|
||||
Resource: connector.Users,
|
||||
Service: service,
|
||||
@ -407,5 +405,5 @@ func generateAndRestoreDriveItems(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gc.ConsumeRestoreCollections(ctx, version.Backup, acct, sel, dest, opts, collections, errs)
|
||||
return gc.ConsumeRestoreCollections(ctx, version.Backup, sel, dest, opts, collections, errs)
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
gc, _, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -60,7 +60,6 @@ func handleExchangeEmailFactory(cmd *cobra.Command, args []string) error {
|
||||
deets, err := generateAndRestoreItems(
|
||||
ctx,
|
||||
gc,
|
||||
acct,
|
||||
service,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{User}).Selector,
|
||||
@ -99,7 +98,7 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
gc, _, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -107,7 +106,6 @@ func handleExchangeCalendarEventFactory(cmd *cobra.Command, args []string) error
|
||||
deets, err := generateAndRestoreItems(
|
||||
ctx,
|
||||
gc,
|
||||
acct,
|
||||
service,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{User}).Selector,
|
||||
@ -145,7 +143,7 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
gc, acct, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
gc, _, _, err := getGCAndVerifyResourceOwner(ctx, connector.Users, User)
|
||||
if err != nil {
|
||||
return Only(ctx, err)
|
||||
}
|
||||
@ -153,7 +151,6 @@ func handleExchangeContactFactory(cmd *cobra.Command, args []string) error {
|
||||
deets, err := generateAndRestoreItems(
|
||||
ctx,
|
||||
gc,
|
||||
acct,
|
||||
service,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{User}).Selector,
|
||||
|
||||
@ -71,16 +71,14 @@ func handleOneDriveCmd(cmd *cobra.Command, args []string) error {
|
||||
AzureTenantID: tid,
|
||||
}
|
||||
|
||||
// todo: swap to drive api client, when finished.
|
||||
adpt, err := graph.CreateAdapter(tid, creds.AzureClientID, creds.AzureClientSecret)
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "creating graph adapter"))
|
||||
}
|
||||
|
||||
svc := graph.NewService(adpt)
|
||||
gr := graph.NewNoTimeoutHTTPWrapper()
|
||||
|
||||
err = runDisplayM365JSON(ctx, svc, gr, creds, user, m365ID)
|
||||
ac, err := api.NewClient(creds)
|
||||
if err != nil {
|
||||
return Only(ctx, clues.Wrap(err, "getting api client"))
|
||||
}
|
||||
|
||||
err = runDisplayM365JSON(ctx, ac, gr, creds, user, m365ID)
|
||||
if err != nil {
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
@ -107,12 +105,12 @@ func (i itemPrintable) MinimumPrintable() any {
|
||||
|
||||
func runDisplayM365JSON(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
ac api.Client,
|
||||
gr graph.Requester,
|
||||
creds account.M365Config,
|
||||
user, itemID string,
|
||||
userID, itemID string,
|
||||
) error {
|
||||
drive, err := api.GetUsersDrive(ctx, srv, user)
|
||||
drive, err := ac.Users().GetDefaultDrive(ctx, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -121,7 +119,7 @@ func runDisplayM365JSON(
|
||||
|
||||
it := itemPrintable{}
|
||||
|
||||
item, err := api.GetDriveItem(ctx, srv, driveID, itemID)
|
||||
item, err := ac.Drives().GetItem(ctx, driveID, itemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -148,7 +146,7 @@ func runDisplayM365JSON(
|
||||
return err
|
||||
}
|
||||
|
||||
perms, err := api.GetItemPermission(ctx, srv, driveID, itemID)
|
||||
perms, err := ac.Drives().GetItemPermission(ctx, driveID, itemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -8,7 +8,7 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go v1.44.273
|
||||
github.com/aws/aws-sdk-go v1.44.275
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/google/uuid v1.3.0
|
||||
@ -19,7 +19,7 @@ require (
|
||||
github.com/microsoft/kiota-http-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-form-go v1.0.0
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.1
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.3.0
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rudderlabs/analytics-go v3.3.3+incompatible
|
||||
@ -34,7 +34,7 @@ require (
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.9.2
|
||||
golang.org/x/tools v0.9.3
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
12
src/go.sum
12
src/go.sum
@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.44.273 h1:CX8O0gK+cGrgUyv7bgJ6QQP9mQg7u5mweHdNzULH47c=
|
||||
github.com/aws/aws-sdk-go v1.44.273/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.275 h1:VqRULgqrigvQLll4e4hXuc568EQAtZQ6jmBzLlQHzSI=
|
||||
github.com/aws/aws-sdk-go v1.44.275/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
|
||||
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -285,8 +285,8 @@ github.com/microsoft/kiota-serialization-json-go v1.0.1 h1:nI3pLpqep7L6BLJPT7teC
|
||||
github.com/microsoft/kiota-serialization-json-go v1.0.1/go.mod h1:KS+eFtwtJGsosXRQr/Qilep7ZD1MRF+VtO7LnL7Oyuw=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA=
|
||||
github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.3.0 h1:SjMxt7Kg4l0Zxp2mLDmMrIiC6Um5rvKYwck6x6AVqP4=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.3.0/go.mod h1:U/B+zwNy0auluG4ZPhMWKyxHtqnxRqbg9RyerKXQqXw=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0 h1:ibNwMDEZ6HikA9BVXu+TljCzCiE+yFsD6wLpJbTc1tc=
|
||||
github.com/microsoftgraph/msgraph-sdk-go v1.4.0/go.mod h1:JIDL1xENx92B60NjO2ACyqGeKvtYkdl9rirgajIgryw=
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY=
|
||||
github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
@ -672,8 +672,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.9.2 h1:UXbndbirwCAx6TULftIfie/ygDNCwxEie+IiNP1IcNc=
|
||||
golang.org/x/tools v0.9.2/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@ -3,7 +3,6 @@ package connector
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
@ -17,7 +16,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -27,13 +25,6 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
)
|
||||
|
||||
const (
|
||||
// copyBufferSize is used for chunked upload
|
||||
// Microsoft recommends 5-10MB buffers
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession?view=graph-rest-1.0#best-practices
|
||||
copyBufferSize = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data Collections
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -51,7 +42,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
lastBackupVersion int,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, error) {
|
||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
||||
ctx, end := diagnostics.Span(
|
||||
ctx,
|
||||
"gc:produceBackupCollections",
|
||||
@ -66,25 +57,26 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
|
||||
err := verifyBackupInputs(sels, gc.IDNameLookup.IDs())
|
||||
if err != nil {
|
||||
return nil, nil, clues.Stack(err).WithClues(ctx)
|
||||
return nil, nil, false, clues.Stack(err).WithClues(ctx)
|
||||
}
|
||||
|
||||
serviceEnabled, canMakeDeltaQueries, err := checkServiceEnabled(
|
||||
ctx,
|
||||
gc.Discovery.Users(),
|
||||
gc.AC.Users(),
|
||||
path.ServiceType(sels.Service),
|
||||
sels.DiscreteOwner)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
if !serviceEnabled {
|
||||
return []data.BackupCollection{}, nil, nil
|
||||
return []data.BackupCollection{}, nil, false, nil
|
||||
}
|
||||
|
||||
var (
|
||||
colls []data.BackupCollection
|
||||
ssmb *prefixmatcher.StringSetMatcher
|
||||
colls []data.BackupCollection
|
||||
ssmb *prefixmatcher.StringSetMatcher
|
||||
canUsePreviousBackup bool
|
||||
)
|
||||
|
||||
if !canMakeDeltaQueries {
|
||||
@ -95,9 +87,9 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
|
||||
switch sels.Service {
|
||||
case selectors.ServiceExchange:
|
||||
colls, ssmb, err = exchange.DataCollections(
|
||||
colls, ssmb, canUsePreviousBackup, err = exchange.DataCollections(
|
||||
ctx,
|
||||
gc.Discovery,
|
||||
gc.AC,
|
||||
sels,
|
||||
gc.credentials.AzureTenantID,
|
||||
owner,
|
||||
@ -106,44 +98,42 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
case selectors.ServiceOneDrive:
|
||||
colls, ssmb, err = onedrive.DataCollections(
|
||||
colls, ssmb, canUsePreviousBackup, err = onedrive.DataCollections(
|
||||
ctx,
|
||||
gc.AC,
|
||||
sels,
|
||||
owner,
|
||||
metadata,
|
||||
lastBackupVersion,
|
||||
gc.credentials.AzureTenantID,
|
||||
gc.itemClient,
|
||||
gc.Service,
|
||||
gc.UpdateStatus,
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
case selectors.ServiceSharePoint:
|
||||
colls, ssmb, err = sharepoint.DataCollections(
|
||||
colls, ssmb, canUsePreviousBackup, err = sharepoint.DataCollections(
|
||||
ctx,
|
||||
gc.itemClient,
|
||||
gc.AC,
|
||||
sels,
|
||||
owner,
|
||||
metadata,
|
||||
gc.credentials,
|
||||
gc.Service,
|
||||
gc,
|
||||
ctrlOpts,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, nil, clues.Wrap(clues.New(sels.Service.String()), "service not supported").WithClues(ctx)
|
||||
return nil, nil, false, clues.Wrap(clues.New(sels.Service.String()), "service not supported").WithClues(ctx)
|
||||
}
|
||||
|
||||
for _, c := range colls {
|
||||
@ -158,7 +148,7 @@ func (gc *GraphConnector) ProduceBackupCollections(
|
||||
}
|
||||
}
|
||||
|
||||
return colls, ssmb, nil
|
||||
return colls, ssmb, canUsePreviousBackup, nil
|
||||
}
|
||||
|
||||
// IsBackupRunnable verifies that the users provided has the services enabled and
|
||||
@ -174,7 +164,7 @@ func (gc *GraphConnector) IsBackupRunnable(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
info, err := gc.Discovery.Users().GetInfo(ctx, resourceOwner)
|
||||
info, err := gc.AC.Users().GetInfo(ctx, resourceOwner)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -242,7 +232,6 @@ func checkServiceEnabled(
|
||||
func (gc *GraphConnector) ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
acct account.Account,
|
||||
sels selectors.Selector,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
@ -257,52 +246,31 @@ func (gc *GraphConnector) ConsumeRestoreCollections(
|
||||
var (
|
||||
status *support.ConnectorOperationStatus
|
||||
deets = &details.Builder{}
|
||||
err error
|
||||
)
|
||||
|
||||
creds, err := acct.M365Config()
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "malformed azure credentials")
|
||||
}
|
||||
|
||||
// Buffer pool for uploads
|
||||
pool := sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, copyBufferSize)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
switch sels.Service {
|
||||
case selectors.ServiceExchange:
|
||||
status, err = exchange.RestoreCollections(ctx,
|
||||
creds,
|
||||
gc.Discovery,
|
||||
gc.Service,
|
||||
dest,
|
||||
dcs,
|
||||
deets,
|
||||
errs)
|
||||
status, err = exchange.RestoreCollections(ctx, gc.AC, dest, dcs, deets, errs)
|
||||
case selectors.ServiceOneDrive:
|
||||
status, err = onedrive.RestoreCollections(ctx,
|
||||
creds,
|
||||
status, err = onedrive.RestoreCollections(
|
||||
ctx,
|
||||
onedrive.NewRestoreHandler(gc.AC),
|
||||
backupVersion,
|
||||
gc.Service,
|
||||
dest,
|
||||
opts,
|
||||
dcs,
|
||||
deets,
|
||||
&pool,
|
||||
errs)
|
||||
case selectors.ServiceSharePoint:
|
||||
status, err = sharepoint.RestoreCollections(ctx,
|
||||
status, err = sharepoint.RestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
creds,
|
||||
gc.Service,
|
||||
gc.AC,
|
||||
dest,
|
||||
opts,
|
||||
dcs,
|
||||
deets,
|
||||
&pool,
|
||||
errs)
|
||||
default:
|
||||
err = clues.Wrap(clues.New(sels.Service.String()), "service not supported")
|
||||
|
||||
@ -12,7 +12,6 @@ import (
|
||||
|
||||
inMock "github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
@ -128,7 +127,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
||||
ctrlOpts := control.Defaults()
|
||||
ctrlOpts.ToggleFeatures.DisableDelta = !canMakeDeltaQueries
|
||||
|
||||
collections, excludes, err := exchange.DataCollections(
|
||||
collections, excludes, canUsePreviousBackup, err := exchange.DataCollections(
|
||||
ctx,
|
||||
suite.ac,
|
||||
sel,
|
||||
@ -139,6 +138,7 @@ func (suite *DataCollectionIntgSuite) TestExchangeDataCollection() {
|
||||
ctrlOpts,
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
for range collections {
|
||||
@ -238,7 +238,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
collections, excludes, err := connector.ProduceBackupCollections(
|
||||
collections, excludes, canUsePreviousBackup, err := connector.ProduceBackupCollections(
|
||||
ctx,
|
||||
test.getSelector(t),
|
||||
test.getSelector(t),
|
||||
@ -247,6 +247,7 @@ func (suite *DataCollectionIntgSuite) TestDataCollections_invalidResourceOwner()
|
||||
control.Defaults(),
|
||||
fault.New(true))
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
assert.False(t, canUsePreviousBackup, "can use previous backup")
|
||||
assert.Empty(t, collections)
|
||||
assert.Nil(t, excludes)
|
||||
})
|
||||
@ -296,18 +297,18 @@ func (suite *DataCollectionIntgSuite) TestSharePointDataCollection() {
|
||||
|
||||
sel := test.getSelector()
|
||||
|
||||
collections, excludes, err := sharepoint.DataCollections(
|
||||
collections, excludes, canUsePreviousBackup, err := sharepoint.DataCollections(
|
||||
ctx,
|
||||
graph.NewNoTimeoutHTTPWrapper(),
|
||||
suite.ac,
|
||||
sel,
|
||||
sel,
|
||||
nil,
|
||||
connector.credentials,
|
||||
connector.Service,
|
||||
connector,
|
||||
control.Defaults(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
// Not expecting excludes as this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
@ -383,7 +384,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
||||
|
||||
sel.SetDiscreteOwnerIDName(id, name)
|
||||
|
||||
cols, excludes, err := gc.ProduceBackupCollections(
|
||||
cols, excludes, canUsePreviousBackup, err := gc.ProduceBackupCollections(
|
||||
ctx,
|
||||
inMock.NewProvider(id, name),
|
||||
sel.Selector,
|
||||
@ -392,6 +393,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Libraries() {
|
||||
control.Defaults(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
require.Len(t, cols, 2) // 1 collection, 1 path prefix directory to ensure the root path exists.
|
||||
// No excludes yet as this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
@ -429,7 +431,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||
|
||||
sel.SetDiscreteOwnerIDName(id, name)
|
||||
|
||||
cols, excludes, err := gc.ProduceBackupCollections(
|
||||
cols, excludes, canUsePreviousBackup, err := gc.ProduceBackupCollections(
|
||||
ctx,
|
||||
inMock.NewProvider(id, name),
|
||||
sel.Selector,
|
||||
@ -438,6 +440,7 @@ func (suite *SPCollectionIntgSuite) TestCreateSharePointCollection_Lists() {
|
||||
control.Defaults(),
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
assert.Less(t, 0, len(cols))
|
||||
// No excludes yet as this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -64,8 +65,7 @@ type DeltaPath struct {
|
||||
func parseMetadataCollections(
|
||||
ctx context.Context,
|
||||
colls []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
) (CatDeltaPaths, error) {
|
||||
) (CatDeltaPaths, bool, error) {
|
||||
// cdp stores metadata
|
||||
cdp := CatDeltaPaths{
|
||||
path.ContactsCategory: {},
|
||||
@ -81,6 +81,10 @@ func parseMetadataCollections(
|
||||
path.EventsCategory: {},
|
||||
}
|
||||
|
||||
// errors from metadata items should not stop the backup,
|
||||
// but it should prevent us from using previous backups
|
||||
errs := fault.New(true)
|
||||
|
||||
for _, coll := range colls {
|
||||
var (
|
||||
breakLoop bool
|
||||
@ -91,10 +95,10 @@ func parseMetadataCollections(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(ctx.Err(), "parsing collection metadata").WithClues(ctx)
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok {
|
||||
if !ok || errs.Failure() != nil {
|
||||
breakLoop = true
|
||||
break
|
||||
}
|
||||
@ -106,13 +110,13 @@ func parseMetadataCollections(
|
||||
|
||||
err := json.NewDecoder(item.ToReader()).Decode(&m)
|
||||
if err != nil {
|
||||
return nil, clues.New("decoding metadata json").WithClues(ctx)
|
||||
return nil, false, clues.New("decoding metadata json").WithClues(ctx)
|
||||
}
|
||||
|
||||
switch item.UUID() {
|
||||
case graph.PreviousPathFileName:
|
||||
if _, ok := found[category]["path"]; ok {
|
||||
return nil, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of path metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
for k, p := range m {
|
||||
@ -123,7 +127,7 @@ func parseMetadataCollections(
|
||||
|
||||
case graph.DeltaURLsFileName:
|
||||
if _, ok := found[category]["delta"]; ok {
|
||||
return nil, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(clues.New(category.String()), "multiple versions of delta metadata").WithClues(ctx)
|
||||
}
|
||||
|
||||
for k, d := range m {
|
||||
@ -142,6 +146,16 @@ func parseMetadataCollections(
|
||||
}
|
||||
}
|
||||
|
||||
if errs.Failure() != nil {
|
||||
logger.CtxErr(ctx, errs.Failure()).Info("reading metadata collection items")
|
||||
|
||||
return CatDeltaPaths{
|
||||
path.ContactsCategory: {},
|
||||
path.EmailCategory: {},
|
||||
path.EventsCategory: {},
|
||||
}, false, nil
|
||||
}
|
||||
|
||||
// Remove any entries that contain a path or a delta, but not both.
|
||||
// That metadata is considered incomplete, and needs to incur a
|
||||
// complete backup on the next run.
|
||||
@ -153,7 +167,7 @@ func parseMetadataCollections(
|
||||
}
|
||||
}
|
||||
|
||||
return cdp, nil
|
||||
return cdp, true, nil
|
||||
}
|
||||
|
||||
// DataCollections returns a DataCollection which the caller can
|
||||
@ -168,10 +182,10 @@ func DataCollections(
|
||||
su support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
eb, err := selector.ToExchangeBackup()
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
||||
return nil, nil, false, clues.Wrap(err, "exchange dataCollection selector").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -187,9 +201,9 @@ func DataCollections(
|
||||
graph.InitializeConcurrencyLimiter(ctrlOpts.Parallelism.ItemFetch)
|
||||
}
|
||||
|
||||
cdps, err := parseMetadataCollections(ctx, metadata, errs)
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, metadata)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
for _, scope := range eb.Scopes() {
|
||||
@ -228,13 +242,13 @@ func DataCollections(
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
collections = append(collections, baseCols...)
|
||||
}
|
||||
|
||||
return collections, nil, el.Failure()
|
||||
return collections, nil, canUsePreviousBackup, el.Failure()
|
||||
}
|
||||
|
||||
// createCollections - utility function that retrieves M365
|
||||
|
||||
@ -2,6 +2,7 @@ package exchange
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@ -42,18 +43,20 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
data []fileValues
|
||||
expect map[string]DeltaPath
|
||||
expectError assert.ErrorAssertionFunc
|
||||
name string
|
||||
data []fileValues
|
||||
expect map[string]DeltaPath
|
||||
canUsePreviousBackup bool
|
||||
expectError assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "delta urls only",
|
||||
data: []fileValues{
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
},
|
||||
expect: map[string]DeltaPath{},
|
||||
expectError: assert.NoError,
|
||||
expect: map[string]DeltaPath{},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "multiple delta urls",
|
||||
@ -61,7 +64,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.DeltaURLsFileName, "delta-link-2"},
|
||||
},
|
||||
expectError: assert.Error,
|
||||
canUsePreviousBackup: false,
|
||||
expectError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "previous path only",
|
||||
@ -74,7 +78,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
expectError: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "multiple previous paths",
|
||||
@ -82,7 +87,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
{graph.PreviousPathFileName, "prev-path"},
|
||||
{graph.PreviousPathFileName, "prev-path-2"},
|
||||
},
|
||||
expectError: assert.Error,
|
||||
canUsePreviousBackup: false,
|
||||
expectError: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "delta urls and previous paths",
|
||||
@ -96,7 +102,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
expectError: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls and empty previous paths",
|
||||
@ -104,8 +111,9 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
{graph.DeltaURLsFileName, "delta-link"},
|
||||
{graph.PreviousPathFileName, ""},
|
||||
},
|
||||
expect: map[string]DeltaPath{},
|
||||
expectError: assert.NoError,
|
||||
expect: map[string]DeltaPath{},
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "empty delta urls and previous paths",
|
||||
@ -119,7 +127,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
expectError: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with special chars",
|
||||
@ -133,7 +142,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
expectError: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with escaped chars",
|
||||
@ -147,7 +157,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
expectError: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "delta urls with newline char runes",
|
||||
@ -164,7 +175,8 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
Path: "prev-path",
|
||||
},
|
||||
},
|
||||
expectError: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
expectError: assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
@ -191,11 +203,13 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{Collection: coll},
|
||||
}, fault.New(true))
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: coll},
|
||||
})
|
||||
test.expectError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
emails := cdps[path.EmailCategory]
|
||||
|
||||
assert.Len(t, emails, len(test.expect))
|
||||
@ -208,6 +222,52 @@ func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections() {
|
||||
}
|
||||
}
|
||||
|
||||
type failingColl struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||
ic := make(chan data.Stream)
|
||||
defer close(ic)
|
||||
|
||||
errs.AddRecoverable(assert.AnError)
|
||||
|
||||
return ic
|
||||
}
|
||||
|
||||
func (f failingColl) FullPath() path.Path {
|
||||
tmp, err := path.Build(
|
||||
"tenant",
|
||||
"user",
|
||||
path.ExchangeService,
|
||||
path.EmailCategory,
|
||||
false,
|
||||
"inbox")
|
||||
require.NoError(f.t, err, clues.ToCore(err))
|
||||
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (f failingColl) FetchItemByName(context.Context, string) (data.Stream, error) {
|
||||
// no fetch calls will be made
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// This check is to ensure that we don't error out, but still return
|
||||
// canUsePreviousBackup as false on read errors
|
||||
func (suite *DataCollectionsUnitSuite) TestParseMetadataCollections_ReadFailure() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
fc := failingColl{t}
|
||||
|
||||
_, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{fc})
|
||||
require.NoError(t, err)
|
||||
require.False(t, canUsePreviousBackup)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Integration tests
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -401,10 +461,11 @@ func (suite *DataCollectionsIntegrationSuite) TestDelta() {
|
||||
|
||||
require.NotNil(t, metadata, "collections contains a metadata collection")
|
||||
|
||||
cdps, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{Collection: metadata},
|
||||
}, fault.New(true))
|
||||
cdps, canUsePreviousBackup, err := parseMetadataCollections(ctx, []data.RestoreCollection{
|
||||
data.NoFetchRestoreCollection{Collection: metadata},
|
||||
})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
dps := cdps[test.scope.Category().PathType()]
|
||||
|
||||
|
||||
@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -21,7 +20,6 @@ import (
|
||||
|
||||
type RestoreIntgSuite struct {
|
||||
tester.Suite
|
||||
gs graph.Servicer
|
||||
credentials account.M365Config
|
||||
ac api.Client
|
||||
}
|
||||
@ -44,14 +42,6 @@ func (suite *RestoreIntgSuite) SetupSuite() {
|
||||
suite.credentials = m365
|
||||
suite.ac, err = api.NewClient(m365)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
adpt, err := graph.CreateAdapter(
|
||||
m365.AzureTenantID,
|
||||
m365.AzureClientID,
|
||||
m365.AzureClientSecret)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.gs = graph.NewService(adpt)
|
||||
}
|
||||
|
||||
// TestRestoreContact ensures contact object can be created, placed into
|
||||
|
||||
@ -425,10 +425,9 @@ func checkMetadata(
|
||||
expect DeltaPaths,
|
||||
c data.BackupCollection,
|
||||
) {
|
||||
catPaths, err := parseMetadataCollections(
|
||||
catPaths, _, err := parseMetadataCollections(
|
||||
ctx,
|
||||
[]data.RestoreCollection{data.NotFoundRestoreCollection{Collection: c}},
|
||||
fault.New(true))
|
||||
[]data.RestoreCollection{data.NoFetchRestoreCollection{Collection: c}})
|
||||
if !assert.NoError(t, err, "getting metadata", clues.ToCore(err)) {
|
||||
return
|
||||
}
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -27,9 +26,7 @@ import (
|
||||
// store through GraphAPI.
|
||||
func RestoreCollections(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
ac api.Client,
|
||||
gs graph.Servicer,
|
||||
dest control.RestoreDestination,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
|
||||
@ -15,6 +15,11 @@ const (
|
||||
// number of uploads, but the max that can be specified. This is
|
||||
// added as a safeguard in case we misconfigure the values.
|
||||
maxConccurrentUploads = 20
|
||||
|
||||
// CopyBufferSize is used for chunked upload
|
||||
// Microsoft recommends 5-10MB buffers
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-createuploadsession?view=graph-rest-1.0#best-practices
|
||||
CopyBufferSize = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -169,9 +169,13 @@ func IsMalware(err error) bool {
|
||||
}
|
||||
|
||||
func IsMalwareResp(ctx context.Context, resp *http.Response) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// https://learn.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-wsshp/ba4ee7a8-704c-4e9c-ab14-fa44c574bdf4
|
||||
// https://learn.microsoft.com/en-us/openspecs/sharepoint_protocols/ms-wdvmoduu/6fa6d4a9-ac18-4cd7-b696-8a3b14a98291
|
||||
if resp.Header.Get("X-Virus-Infected") == "true" {
|
||||
if len(resp.Header) > 0 && resp.Header.Get("X-Virus-Infected") == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
m365api "github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -32,9 +32,7 @@ var (
|
||||
// GraphRequestAdapter from the msgraph-sdk-go. Additional fields are for
|
||||
// bookkeeping and interfacing with other component.
|
||||
type GraphConnector struct {
|
||||
Service graph.Servicer
|
||||
Discovery m365api.Client
|
||||
itemClient graph.Requester // configured to handle large item downloads
|
||||
AC api.Client
|
||||
|
||||
tenant string
|
||||
credentials account.M365Config
|
||||
@ -64,12 +62,7 @@ func NewGraphConnector(
|
||||
return nil, clues.Wrap(err, "retrieving m365 account configuration").WithClues(ctx)
|
||||
}
|
||||
|
||||
service, err := createService(creds)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating service connection").WithClues(ctx)
|
||||
}
|
||||
|
||||
ac, err := m365api.NewClient(creds)
|
||||
ac, err := api.NewClient(creds)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "creating api client").WithClues(ctx)
|
||||
}
|
||||
@ -80,12 +73,10 @@ func NewGraphConnector(
|
||||
}
|
||||
|
||||
gc := GraphConnector{
|
||||
Discovery: ac,
|
||||
AC: ac,
|
||||
IDNameLookup: idname.NewCache(nil),
|
||||
Service: service,
|
||||
|
||||
credentials: creds,
|
||||
itemClient: graph.NewNoTimeoutHTTPWrapper(),
|
||||
ownerLookup: rc,
|
||||
tenant: acct.ID(),
|
||||
wg: &sync.WaitGroup{},
|
||||
@ -94,23 +85,6 @@ func NewGraphConnector(
|
||||
return &gc, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Service Client
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// createService constructor for graphService component
|
||||
func createService(creds account.M365Config) (*graph.Service, error) {
|
||||
adapter, err := graph.CreateAdapter(
|
||||
creds.AzureTenantID,
|
||||
creds.AzureClientID,
|
||||
creds.AzureClientSecret)
|
||||
if err != nil {
|
||||
return &graph.Service{}, err
|
||||
}
|
||||
|
||||
return graph.NewService(adapter), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Processing Status
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -180,7 +154,7 @@ const (
|
||||
Sites
|
||||
)
|
||||
|
||||
func (r Resource) resourceClient(ac m365api.Client) (*resourceClient, error) {
|
||||
func (r Resource) resourceClient(ac api.Client) (*resourceClient, error) {
|
||||
switch r {
|
||||
case Users:
|
||||
return &resourceClient{enum: r, getter: ac.Users()}, nil
|
||||
@ -209,7 +183,7 @@ var _ getOwnerIDAndNamer = &resourceClient{}
|
||||
type getOwnerIDAndNamer interface {
|
||||
getOwnerIDAndNameFrom(
|
||||
ctx context.Context,
|
||||
discovery m365api.Client,
|
||||
discovery api.Client,
|
||||
owner string,
|
||||
ins idname.Cacher,
|
||||
) (
|
||||
@ -227,7 +201,7 @@ type getOwnerIDAndNamer interface {
|
||||
// (PrincipalName for users, WebURL for sites).
|
||||
func (r resourceClient) getOwnerIDAndNameFrom(
|
||||
ctx context.Context,
|
||||
discovery m365api.Client,
|
||||
discovery api.Client,
|
||||
owner string,
|
||||
ins idname.Cacher,
|
||||
) (string, string, error) {
|
||||
@ -275,7 +249,7 @@ func (gc *GraphConnector) PopulateOwnerIDAndNamesFrom(
|
||||
owner string, // input value, can be either id or name
|
||||
ins idname.Cacher,
|
||||
) (string, string, error) {
|
||||
id, name, err := gc.ownerLookup.getOwnerIDAndNameFrom(ctx, gc.Discovery, owner, ins)
|
||||
id, name, err := gc.ownerLookup.getOwnerIDAndNameFrom(ctx, gc.AC, owner, ins)
|
||||
if err != nil {
|
||||
return "", "", clues.Wrap(err, "identifying resource owner")
|
||||
}
|
||||
|
||||
@ -18,7 +18,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -44,8 +43,8 @@ var (
|
||||
func mustGetDefaultDriveID(
|
||||
t *testing.T,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
backupService path.ServiceType,
|
||||
service graph.Servicer,
|
||||
ac api.Client,
|
||||
service path.ServiceType,
|
||||
resourceOwner string,
|
||||
) string {
|
||||
var (
|
||||
@ -53,13 +52,13 @@ func mustGetDefaultDriveID(
|
||||
d models.Driveable
|
||||
)
|
||||
|
||||
switch backupService {
|
||||
switch service {
|
||||
case path.OneDriveService:
|
||||
d, err = api.GetUsersDrive(ctx, service, resourceOwner)
|
||||
d, err = ac.Users().GetDefaultDrive(ctx, resourceOwner)
|
||||
case path.SharePointService:
|
||||
d, err = api.GetSitesDefaultDrive(ctx, service, resourceOwner)
|
||||
d, err = ac.Sites().GetDefaultDrive(ctx, resourceOwner)
|
||||
default:
|
||||
assert.FailNowf(t, "unknown service type %s", backupService.String())
|
||||
assert.FailNowf(t, "unknown service type %s", service.String())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@ -75,19 +74,18 @@ func mustGetDefaultDriveID(
|
||||
}
|
||||
|
||||
type suiteInfo interface {
|
||||
Service() graph.Servicer
|
||||
Account() account.Account
|
||||
APIClient() api.Client
|
||||
Tenant() string
|
||||
// Returns (username, user ID) for the user. These values are used for
|
||||
// permissions.
|
||||
PrimaryUser() (string, string)
|
||||
SecondaryUser() (string, string)
|
||||
TertiaryUser() (string, string)
|
||||
// BackupResourceOwner returns the resource owner to run the backup/restore
|
||||
// ResourceOwner returns the resource owner to run the backup/restore
|
||||
// with. This can be different from the values used for permissions and it can
|
||||
// also be a site.
|
||||
BackupResourceOwner() string
|
||||
BackupService() path.ServiceType
|
||||
ResourceOwner() string
|
||||
Service() path.ServiceType
|
||||
Resource() Resource
|
||||
}
|
||||
|
||||
@ -97,25 +95,46 @@ type oneDriveSuite interface {
|
||||
}
|
||||
|
||||
type suiteInfoImpl struct {
|
||||
ac api.Client
|
||||
connector *GraphConnector
|
||||
resourceOwner string
|
||||
user string
|
||||
userID string
|
||||
resourceType Resource
|
||||
secondaryUser string
|
||||
secondaryUserID string
|
||||
service path.ServiceType
|
||||
tertiaryUser string
|
||||
tertiaryUserID string
|
||||
acct account.Account
|
||||
service path.ServiceType
|
||||
resourceType Resource
|
||||
user string
|
||||
userID string
|
||||
}
|
||||
|
||||
func (si suiteInfoImpl) Service() graph.Servicer {
|
||||
return si.connector.Service
|
||||
func NewSuiteInfoImpl(
|
||||
t *testing.T,
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
resourceOwner string,
|
||||
service path.ServiceType,
|
||||
) suiteInfoImpl {
|
||||
resource := Users
|
||||
if service == path.SharePointService {
|
||||
resource = Sites
|
||||
}
|
||||
|
||||
gc := loadConnector(ctx, t, resource)
|
||||
|
||||
return suiteInfoImpl{
|
||||
ac: gc.AC,
|
||||
connector: gc,
|
||||
resourceOwner: resourceOwner,
|
||||
resourceType: resource,
|
||||
secondaryUser: tester.SecondaryM365UserID(t),
|
||||
service: service,
|
||||
tertiaryUser: tester.TertiaryM365UserID(t),
|
||||
user: tester.M365UserID(t),
|
||||
}
|
||||
}
|
||||
|
||||
func (si suiteInfoImpl) Account() account.Account {
|
||||
return si.acct
|
||||
func (si suiteInfoImpl) APIClient() api.Client {
|
||||
return si.ac
|
||||
}
|
||||
|
||||
func (si suiteInfoImpl) Tenant() string {
|
||||
@ -134,11 +153,11 @@ func (si suiteInfoImpl) TertiaryUser() (string, string) {
|
||||
return si.tertiaryUser, si.tertiaryUserID
|
||||
}
|
||||
|
||||
func (si suiteInfoImpl) BackupResourceOwner() string {
|
||||
func (si suiteInfoImpl) ResourceOwner() string {
|
||||
return si.resourceOwner
|
||||
}
|
||||
|
||||
func (si suiteInfoImpl) BackupService() path.ServiceType {
|
||||
func (si suiteInfoImpl) Service() path.ServiceType {
|
||||
return si.service
|
||||
}
|
||||
|
||||
@ -162,8 +181,7 @@ func TestGraphConnectorSharePointIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &GraphConnectorSharePointIntegrationSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs},
|
||||
),
|
||||
[][]string{tester.M365AcctCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
@ -173,27 +191,18 @@ func (suite *GraphConnectorSharePointIntegrationSuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
si := suiteInfoImpl{
|
||||
connector: loadConnector(ctx, suite.T(), Sites),
|
||||
user: tester.M365UserID(suite.T()),
|
||||
secondaryUser: tester.SecondaryM365UserID(suite.T()),
|
||||
tertiaryUser: tester.TertiaryM365UserID(suite.T()),
|
||||
acct: tester.NewM365Account(suite.T()),
|
||||
service: path.SharePointService,
|
||||
resourceType: Sites,
|
||||
}
|
||||
si := NewSuiteInfoImpl(suite.T(), ctx, tester.M365SiteID(suite.T()), path.SharePointService)
|
||||
|
||||
si.resourceOwner = tester.M365SiteID(suite.T())
|
||||
|
||||
user, err := si.connector.Discovery.Users().GetByID(ctx, si.user)
|
||||
// users needed for permissions
|
||||
user, err := si.connector.AC.Users().GetByID(ctx, si.user)
|
||||
require.NoError(t, err, "fetching user", si.user, clues.ToCore(err))
|
||||
si.userID = ptr.Val(user.GetId())
|
||||
|
||||
secondaryUser, err := si.connector.Discovery.Users().GetByID(ctx, si.secondaryUser)
|
||||
secondaryUser, err := si.connector.AC.Users().GetByID(ctx, si.secondaryUser)
|
||||
require.NoError(t, err, "fetching user", si.secondaryUser, clues.ToCore(err))
|
||||
si.secondaryUserID = ptr.Val(secondaryUser.GetId())
|
||||
|
||||
tertiaryUser, err := si.connector.Discovery.Users().GetByID(ctx, si.tertiaryUser)
|
||||
tertiaryUser, err := si.connector.AC.Users().GetByID(ctx, si.tertiaryUser)
|
||||
require.NoError(t, err, "fetching user", si.tertiaryUser, clues.ToCore(err))
|
||||
si.tertiaryUserID = ptr.Val(tertiaryUser.GetId())
|
||||
|
||||
@ -233,8 +242,7 @@ func TestGraphConnectorOneDriveIntegrationSuite(t *testing.T) {
|
||||
suite.Run(t, &GraphConnectorOneDriveIntegrationSuite{
|
||||
Suite: tester.NewIntegrationSuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs},
|
||||
),
|
||||
[][]string{tester.M365AcctCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
@ -244,25 +252,20 @@ func (suite *GraphConnectorOneDriveIntegrationSuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
si := suiteInfoImpl{
|
||||
connector: loadConnector(ctx, t, Users),
|
||||
user: tester.M365UserID(t),
|
||||
secondaryUser: tester.SecondaryM365UserID(t),
|
||||
acct: tester.NewM365Account(t),
|
||||
service: path.OneDriveService,
|
||||
resourceType: Users,
|
||||
}
|
||||
si := NewSuiteInfoImpl(t, ctx, tester.M365UserID(t), path.OneDriveService)
|
||||
|
||||
si.resourceOwner = si.user
|
||||
|
||||
user, err := si.connector.Discovery.Users().GetByID(ctx, si.user)
|
||||
user, err := si.connector.AC.Users().GetByID(ctx, si.user)
|
||||
require.NoError(t, err, "fetching user", si.user, clues.ToCore(err))
|
||||
si.userID = ptr.Val(user.GetId())
|
||||
|
||||
secondaryUser, err := si.connector.Discovery.Users().GetByID(ctx, si.secondaryUser)
|
||||
secondaryUser, err := si.connector.AC.Users().GetByID(ctx, si.secondaryUser)
|
||||
require.NoError(t, err, "fetching user", si.secondaryUser, clues.ToCore(err))
|
||||
si.secondaryUserID = ptr.Val(secondaryUser.GetId())
|
||||
|
||||
tertiaryUser, err := si.connector.AC.Users().GetByID(ctx, si.tertiaryUser)
|
||||
require.NoError(t, err, "fetching user", si.tertiaryUser, clues.ToCore(err))
|
||||
si.tertiaryUserID = ptr.Val(tertiaryUser.GetId())
|
||||
|
||||
suite.suiteInfo = si
|
||||
}
|
||||
|
||||
@ -299,8 +302,7 @@ func TestGraphConnectorOneDriveNightlySuite(t *testing.T) {
|
||||
suite.Run(t, &GraphConnectorOneDriveNightlySuite{
|
||||
Suite: tester.NewNightlySuite(
|
||||
t,
|
||||
[][]string{tester.M365AcctCredEnvs},
|
||||
),
|
||||
[][]string{tester.M365AcctCredEnvs}),
|
||||
})
|
||||
}
|
||||
|
||||
@ -310,25 +312,20 @@ func (suite *GraphConnectorOneDriveNightlySuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
si := suiteInfoImpl{
|
||||
connector: loadConnector(ctx, t, Users),
|
||||
user: tester.M365UserID(t),
|
||||
secondaryUser: tester.SecondaryM365UserID(t),
|
||||
acct: tester.NewM365Account(t),
|
||||
service: path.OneDriveService,
|
||||
resourceType: Users,
|
||||
}
|
||||
si := NewSuiteInfoImpl(t, ctx, tester.M365UserID(t), path.OneDriveService)
|
||||
|
||||
si.resourceOwner = si.user
|
||||
|
||||
user, err := si.connector.Discovery.Users().GetByID(ctx, si.user)
|
||||
user, err := si.connector.AC.Users().GetByID(ctx, si.user)
|
||||
require.NoError(t, err, "fetching user", si.user, clues.ToCore(err))
|
||||
si.userID = ptr.Val(user.GetId())
|
||||
|
||||
secondaryUser, err := si.connector.Discovery.Users().GetByID(ctx, si.secondaryUser)
|
||||
secondaryUser, err := si.connector.AC.Users().GetByID(ctx, si.secondaryUser)
|
||||
require.NoError(t, err, "fetching user", si.secondaryUser, clues.ToCore(err))
|
||||
si.secondaryUserID = ptr.Val(secondaryUser.GetId())
|
||||
|
||||
tertiaryUser, err := si.connector.AC.Users().GetByID(ctx, si.tertiaryUser)
|
||||
require.NoError(t, err, "fetching user", si.tertiaryUser, clues.ToCore(err))
|
||||
si.tertiaryUserID = ptr.Val(tertiaryUser.GetId())
|
||||
|
||||
suite.suiteInfo = si
|
||||
}
|
||||
|
||||
@ -367,9 +364,9 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
|
||||
driveID := mustGetDefaultDriveID(
|
||||
t,
|
||||
ctx,
|
||||
suite.BackupService(),
|
||||
suite.APIClient(),
|
||||
suite.Service(),
|
||||
suite.BackupResourceOwner())
|
||||
suite.ResourceOwner())
|
||||
|
||||
rootPath := []string{
|
||||
odConsts.DrivesPathDir,
|
||||
@ -470,17 +467,17 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
|
||||
},
|
||||
}
|
||||
|
||||
expected, err := DataForInfo(suite.BackupService(), cols, version.Backup)
|
||||
expected, err := DataForInfo(suite.Service(), cols, version.Backup)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
for vn := startVersion; vn <= version.Backup; vn++ {
|
||||
suite.Run(fmt.Sprintf("Version%d", vn), func() {
|
||||
t := suite.T()
|
||||
input, err := DataForInfo(suite.BackupService(), cols, vn)
|
||||
input, err := DataForInfo(suite.Service(), cols, vn)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
testData := restoreBackupInfoMultiVersion{
|
||||
service: suite.BackupService(),
|
||||
service: suite.Service(),
|
||||
resource: suite.Resource(),
|
||||
backupVersion: vn,
|
||||
collectionsPrevious: input,
|
||||
@ -489,10 +486,9 @@ func testRestoreAndBackupMultipleFilesAndFoldersNoPermissions(
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
suite.Account(),
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.BackupResourceOwner()},
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
@ -513,9 +509,9 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
||||
driveID := mustGetDefaultDriveID(
|
||||
t,
|
||||
ctx,
|
||||
suite.BackupService(),
|
||||
suite.APIClient(),
|
||||
suite.Service(),
|
||||
suite.BackupResourceOwner())
|
||||
suite.ResourceOwner())
|
||||
|
||||
fileName2 := "test-file2.txt"
|
||||
folderCName := "folder-c"
|
||||
@ -683,9 +679,9 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
||||
},
|
||||
}
|
||||
|
||||
expected, err := DataForInfo(suite.BackupService(), cols, version.Backup)
|
||||
expected, err := DataForInfo(suite.Service(), cols, version.Backup)
|
||||
require.NoError(suite.T(), err)
|
||||
bss := suite.BackupService().String()
|
||||
bss := suite.Service().String()
|
||||
|
||||
for vn := startVersion; vn <= version.Backup; vn++ {
|
||||
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
|
||||
@ -693,11 +689,11 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
||||
// Ideally this can always be true or false and still
|
||||
// work, but limiting older versions to use emails so as
|
||||
// to validate that flow as well.
|
||||
input, err := DataForInfo(suite.BackupService(), cols, vn)
|
||||
input, err := DataForInfo(suite.Service(), cols, vn)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
testData := restoreBackupInfoMultiVersion{
|
||||
service: suite.BackupService(),
|
||||
service: suite.Service(),
|
||||
resource: suite.Resource(),
|
||||
backupVersion: vn,
|
||||
collectionsPrevious: input,
|
||||
@ -706,10 +702,9 @@ func testPermissionsRestoreAndBackup(suite oneDriveSuite, startVersion int) {
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
suite.Account(),
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.BackupResourceOwner()},
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
@ -730,9 +725,9 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
||||
driveID := mustGetDefaultDriveID(
|
||||
t,
|
||||
ctx,
|
||||
suite.BackupService(),
|
||||
suite.APIClient(),
|
||||
suite.Service(),
|
||||
suite.BackupResourceOwner())
|
||||
suite.ResourceOwner())
|
||||
|
||||
inputCols := []OnedriveColInfo{
|
||||
{
|
||||
@ -772,18 +767,18 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
||||
},
|
||||
}
|
||||
|
||||
expected, err := DataForInfo(suite.BackupService(), expectedCols, version.Backup)
|
||||
expected, err := DataForInfo(suite.Service(), expectedCols, version.Backup)
|
||||
require.NoError(suite.T(), err)
|
||||
bss := suite.BackupService().String()
|
||||
bss := suite.Service().String()
|
||||
|
||||
for vn := startVersion; vn <= version.Backup; vn++ {
|
||||
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
|
||||
t := suite.T()
|
||||
input, err := DataForInfo(suite.BackupService(), inputCols, vn)
|
||||
input, err := DataForInfo(suite.Service(), inputCols, vn)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
testData := restoreBackupInfoMultiVersion{
|
||||
service: suite.BackupService(),
|
||||
service: suite.Service(),
|
||||
resource: suite.Resource(),
|
||||
backupVersion: vn,
|
||||
collectionsPrevious: input,
|
||||
@ -792,10 +787,9 @@ func testPermissionsBackupAndNoRestore(suite oneDriveSuite, startVersion int) {
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
suite.Account(),
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.BackupResourceOwner()},
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: false,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
@ -819,9 +813,9 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
||||
driveID := mustGetDefaultDriveID(
|
||||
t,
|
||||
ctx,
|
||||
suite.BackupService(),
|
||||
suite.APIClient(),
|
||||
suite.Service(),
|
||||
suite.BackupResourceOwner())
|
||||
suite.ResourceOwner())
|
||||
|
||||
folderAName := "custom"
|
||||
folderBName := "inherited"
|
||||
@ -953,9 +947,9 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
||||
},
|
||||
}
|
||||
|
||||
expected, err := DataForInfo(suite.BackupService(), cols, version.Backup)
|
||||
expected, err := DataForInfo(suite.Service(), cols, version.Backup)
|
||||
require.NoError(suite.T(), err)
|
||||
bss := suite.BackupService().String()
|
||||
bss := suite.Service().String()
|
||||
|
||||
for vn := startVersion; vn <= version.Backup; vn++ {
|
||||
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
|
||||
@ -963,11 +957,11 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
||||
// Ideally this can always be true or false and still
|
||||
// work, but limiting older versions to use emails so as
|
||||
// to validate that flow as well.
|
||||
input, err := DataForInfo(suite.BackupService(), cols, vn)
|
||||
input, err := DataForInfo(suite.Service(), cols, vn)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
testData := restoreBackupInfoMultiVersion{
|
||||
service: suite.BackupService(),
|
||||
service: suite.Service(),
|
||||
resource: suite.Resource(),
|
||||
backupVersion: vn,
|
||||
collectionsPrevious: input,
|
||||
@ -976,10 +970,9 @@ func testPermissionsInheritanceRestoreAndBackup(suite oneDriveSuite, startVersio
|
||||
|
||||
runRestoreBackupTestVersions(
|
||||
t,
|
||||
suite.Account(),
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.BackupResourceOwner()},
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
@ -1001,9 +994,9 @@ func testRestoreFolderNamedFolderRegression(
|
||||
driveID := mustGetDefaultDriveID(
|
||||
suite.T(),
|
||||
ctx,
|
||||
suite.BackupService(),
|
||||
suite.APIClient(),
|
||||
suite.Service(),
|
||||
suite.BackupResourceOwner())
|
||||
suite.ResourceOwner())
|
||||
|
||||
rootPath := []string{
|
||||
odConsts.DrivesPathDir,
|
||||
@ -1072,30 +1065,29 @@ func testRestoreFolderNamedFolderRegression(
|
||||
},
|
||||
}
|
||||
|
||||
expected, err := DataForInfo(suite.BackupService(), cols, version.Backup)
|
||||
expected, err := DataForInfo(suite.Service(), cols, version.Backup)
|
||||
require.NoError(suite.T(), err)
|
||||
bss := suite.BackupService().String()
|
||||
bss := suite.Service().String()
|
||||
|
||||
for vn := startVersion; vn <= version.Backup; vn++ {
|
||||
suite.Run(fmt.Sprintf("%s-Version%d", bss, vn), func() {
|
||||
t := suite.T()
|
||||
input, err := DataForInfo(suite.BackupService(), cols, vn)
|
||||
input, err := DataForInfo(suite.Service(), cols, vn)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
testData := restoreBackupInfoMultiVersion{
|
||||
service: suite.BackupService(),
|
||||
service: suite.Service(),
|
||||
resource: suite.Resource(),
|
||||
backupVersion: vn,
|
||||
collectionsPrevious: input,
|
||||
collectionsLatest: expected,
|
||||
}
|
||||
|
||||
runRestoreTestWithVerion(
|
||||
runRestoreTestWithVersion(
|
||||
t,
|
||||
suite.Account(),
|
||||
testData,
|
||||
suite.Tenant(),
|
||||
[]string{suite.BackupResourceOwner()},
|
||||
[]string{suite.ResourceOwner()},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
|
||||
@ -19,7 +19,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -263,7 +262,6 @@ type GraphConnectorIntegrationSuite struct {
|
||||
connector *GraphConnector
|
||||
user string
|
||||
secondaryUser string
|
||||
acct account.Account
|
||||
}
|
||||
|
||||
func TestGraphConnectorIntegrationSuite(t *testing.T) {
|
||||
@ -284,7 +282,6 @@ func (suite *GraphConnectorIntegrationSuite) SetupSuite() {
|
||||
suite.connector = loadConnector(ctx, t, Users)
|
||||
suite.user = tester.M365UserID(t)
|
||||
suite.secondaryUser = tester.SecondaryM365UserID(t)
|
||||
suite.acct = tester.NewM365Account(t)
|
||||
|
||||
tester.LogTimeOfTest(t)
|
||||
}
|
||||
@ -296,7 +293,6 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() {
|
||||
defer flush()
|
||||
|
||||
var (
|
||||
acct = tester.NewM365Account(t)
|
||||
dest = tester.DefaultTestRestoreDestination("")
|
||||
sel = selectors.Selector{
|
||||
Service: selectors.ServiceUnknown,
|
||||
@ -306,7 +302,6 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreFailsBadService() {
|
||||
deets, err := suite.connector.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
version.Backup,
|
||||
acct,
|
||||
sel,
|
||||
dest,
|
||||
control.Options{
|
||||
@ -385,7 +380,6 @@ func (suite *GraphConnectorIntegrationSuite) TestEmptyCollections() {
|
||||
deets, err := suite.connector.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
version.Backup,
|
||||
suite.acct,
|
||||
test.sel,
|
||||
dest,
|
||||
control.Options{
|
||||
@ -429,7 +423,6 @@ func runRestore(
|
||||
deets, err := restoreGC.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
config.Acct,
|
||||
restoreSel,
|
||||
config.Dest,
|
||||
config.Opts,
|
||||
@ -494,7 +487,7 @@ func runBackupAndCompare(
|
||||
t.Logf("Selective backup of %s\n", backupSel)
|
||||
|
||||
start := time.Now()
|
||||
dcs, excludes, err := backupGC.ProduceBackupCollections(
|
||||
dcs, excludes, canUsePreviousBackup, err := backupGC.ProduceBackupCollections(
|
||||
ctx,
|
||||
backupSel,
|
||||
backupSel,
|
||||
@ -503,6 +496,7 @@ func runBackupAndCompare(
|
||||
config.Opts,
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
// No excludes yet because this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
@ -528,7 +522,6 @@ func runBackupAndCompare(
|
||||
|
||||
func runRestoreBackupTest(
|
||||
t *testing.T,
|
||||
acct account.Account,
|
||||
test restoreBackupInfo,
|
||||
tenant string,
|
||||
resourceOwners []string,
|
||||
@ -538,7 +531,6 @@ func runRestoreBackupTest(
|
||||
defer flush()
|
||||
|
||||
config := ConfigInfo{
|
||||
Acct: acct,
|
||||
Opts: opts,
|
||||
Resource: test.resource,
|
||||
Service: test.service,
|
||||
@ -573,9 +565,8 @@ func runRestoreBackupTest(
|
||||
}
|
||||
|
||||
// runRestoreTest restores with data using the test's backup version
|
||||
func runRestoreTestWithVerion(
|
||||
func runRestoreTestWithVersion(
|
||||
t *testing.T,
|
||||
acct account.Account,
|
||||
test restoreBackupInfoMultiVersion,
|
||||
tenant string,
|
||||
resourceOwners []string,
|
||||
@ -585,7 +576,6 @@ func runRestoreTestWithVerion(
|
||||
defer flush()
|
||||
|
||||
config := ConfigInfo{
|
||||
Acct: acct,
|
||||
Opts: opts,
|
||||
Resource: test.resource,
|
||||
Service: test.service,
|
||||
@ -614,7 +604,6 @@ func runRestoreTestWithVerion(
|
||||
// something that would be in the form of a newer backup.
|
||||
func runRestoreBackupTestVersions(
|
||||
t *testing.T,
|
||||
acct account.Account,
|
||||
test restoreBackupInfoMultiVersion,
|
||||
tenant string,
|
||||
resourceOwners []string,
|
||||
@ -624,7 +613,6 @@ func runRestoreBackupTestVersions(
|
||||
defer flush()
|
||||
|
||||
config := ConfigInfo{
|
||||
Acct: acct,
|
||||
Opts: opts,
|
||||
Resource: test.resource,
|
||||
Service: test.service,
|
||||
@ -920,15 +908,13 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup() {
|
||||
suite.Run(test.name, func() {
|
||||
runRestoreBackupTest(
|
||||
suite.T(),
|
||||
suite.acct,
|
||||
test,
|
||||
suite.connector.tenant,
|
||||
[]string{suite.user},
|
||||
control.Options{
|
||||
RestorePermissions: true,
|
||||
ToggleFeatures: control.Toggles{},
|
||||
},
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1044,7 +1030,6 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
||||
deets, err := restoreGC.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
version.Backup,
|
||||
suite.acct,
|
||||
restoreSel,
|
||||
dest,
|
||||
control.Options{
|
||||
@ -1075,7 +1060,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
||||
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
||||
t.Log("Selective backup of", backupSel)
|
||||
|
||||
dcs, excludes, err := backupGC.ProduceBackupCollections(
|
||||
dcs, excludes, canUsePreviousBackup, err := backupGC.ProduceBackupCollections(
|
||||
ctx,
|
||||
backupSel,
|
||||
backupSel,
|
||||
@ -1087,6 +1072,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
||||
},
|
||||
fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
// No excludes yet because this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
@ -1135,7 +1121,6 @@ func (suite *GraphConnectorIntegrationSuite) TestRestoreAndBackup_largeMailAttac
|
||||
|
||||
runRestoreBackupTest(
|
||||
suite.T(),
|
||||
suite.acct,
|
||||
test,
|
||||
suite.connector.tenant,
|
||||
[]string{suite.user},
|
||||
@ -1231,7 +1216,7 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections
|
||||
|
||||
backupSel.SetDiscreteOwnerIDName(id, name)
|
||||
|
||||
dcs, excludes, err := backupGC.ProduceBackupCollections(
|
||||
dcs, excludes, canUsePreviousBackup, err := backupGC.ProduceBackupCollections(
|
||||
ctx,
|
||||
inMock.NewProvider(id, name),
|
||||
backupSel,
|
||||
@ -1243,6 +1228,7 @@ func (suite *GraphConnectorIntegrationSuite) TestBackup_CreatesPrefixCollections
|
||||
},
|
||||
fault.New(true))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, canUsePreviousBackup, "can use previous backup")
|
||||
// No excludes yet because this isn't an incremental backup.
|
||||
assert.True(t, excludes.Empty())
|
||||
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
@ -39,7 +38,6 @@ type ItemInfo struct {
|
||||
}
|
||||
|
||||
type ConfigInfo struct {
|
||||
Acct account.Account
|
||||
Opts control.Options
|
||||
Resource Resource
|
||||
Service path.ServiceType
|
||||
@ -104,7 +102,7 @@ type mockRestoreCollection struct {
|
||||
auxItems map[string]data.Stream
|
||||
}
|
||||
|
||||
func (rc mockRestoreCollection) Fetch(
|
||||
func (rc mockRestoreCollection) FetchItemByName(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
) (data.Stream, error) {
|
||||
|
||||
@ -7,7 +7,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -39,9 +38,10 @@ func (gc GraphConnector) ProduceBackupCollections(
|
||||
) (
|
||||
[]data.BackupCollection,
|
||||
prefixmatcher.StringSetReader,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
return gc.Collections, gc.Exclude, gc.Err
|
||||
return gc.Collections, gc.Exclude, gc.Err == nil, gc.Err
|
||||
}
|
||||
|
||||
func (gc GraphConnector) IsBackupRunnable(
|
||||
@ -59,7 +59,6 @@ func (gc GraphConnector) Wait() *data.CollectionStats {
|
||||
func (gc GraphConnector) ConsumeRestoreCollections(
|
||||
_ context.Context,
|
||||
_ int,
|
||||
_ account.Account,
|
||||
_ selectors.Selector,
|
||||
_ control.RestoreDestination,
|
||||
_ control.Options,
|
||||
|
||||
@ -43,8 +43,7 @@ var (
|
||||
|
||||
// Collection represents a set of OneDrive objects retrieved from M365
|
||||
type Collection struct {
|
||||
// configured to handle large item downloads
|
||||
itemClient graph.Requester
|
||||
handler BackupHandler
|
||||
|
||||
// data is used to share data streams with the collection consumer
|
||||
data chan data.Stream
|
||||
@ -55,16 +54,11 @@ type Collection struct {
|
||||
driveItems map[string]models.DriveItemable
|
||||
|
||||
// Primary M365 ID of the drive this collection was created from
|
||||
driveID string
|
||||
// Display Name of the associated drive
|
||||
driveName string
|
||||
source driveSource
|
||||
service graph.Servicer
|
||||
statusUpdater support.StatusUpdater
|
||||
itemGetter itemGetterFunc
|
||||
itemReader itemReaderFunc
|
||||
itemMetaReader itemMetaReaderFunc
|
||||
ctrl control.Options
|
||||
driveID string
|
||||
driveName string
|
||||
|
||||
statusUpdater support.StatusUpdater
|
||||
ctrl control.Options
|
||||
|
||||
// PrevPath is the previous hierarchical path used by this collection.
|
||||
// It may be the same as fullPath, if the folder was not renamed or
|
||||
@ -92,29 +86,6 @@ type Collection struct {
|
||||
doNotMergeItems bool
|
||||
}
|
||||
|
||||
// itemGetterFunc gets a specified item
|
||||
type itemGetterFunc func(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
driveID, itemID string,
|
||||
) (models.DriveItemable, error)
|
||||
|
||||
// itemReadFunc returns a reader for the specified item
|
||||
type itemReaderFunc func(
|
||||
ctx context.Context,
|
||||
client graph.Requester,
|
||||
item models.DriveItemable,
|
||||
) (details.ItemInfo, io.ReadCloser, error)
|
||||
|
||||
// itemMetaReaderFunc returns a reader for the metadata of the
|
||||
// specified item
|
||||
type itemMetaReaderFunc func(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID string,
|
||||
item models.DriveItemable,
|
||||
) (io.ReadCloser, int, error)
|
||||
|
||||
func pathToLocation(p path.Path) (*path.Builder, error) {
|
||||
if p == nil {
|
||||
return nil, nil
|
||||
@ -130,13 +101,11 @@ func pathToLocation(p path.Path) (*path.Builder, error) {
|
||||
|
||||
// NewCollection creates a Collection
|
||||
func NewCollection(
|
||||
itemClient graph.Requester,
|
||||
handler BackupHandler,
|
||||
currPath path.Path,
|
||||
prevPath path.Path,
|
||||
driveID string,
|
||||
service graph.Servicer,
|
||||
statusUpdater support.StatusUpdater,
|
||||
source driveSource,
|
||||
ctrlOpts control.Options,
|
||||
colScope collectionScope,
|
||||
doNotMergeItems bool,
|
||||
@ -156,13 +125,11 @@ func NewCollection(
|
||||
}
|
||||
|
||||
c := newColl(
|
||||
itemClient,
|
||||
handler,
|
||||
currPath,
|
||||
prevPath,
|
||||
driveID,
|
||||
service,
|
||||
statusUpdater,
|
||||
source,
|
||||
ctrlOpts,
|
||||
colScope,
|
||||
doNotMergeItems)
|
||||
@ -174,26 +141,21 @@ func NewCollection(
|
||||
}
|
||||
|
||||
func newColl(
|
||||
gr graph.Requester,
|
||||
handler BackupHandler,
|
||||
currPath path.Path,
|
||||
prevPath path.Path,
|
||||
driveID string,
|
||||
service graph.Servicer,
|
||||
statusUpdater support.StatusUpdater,
|
||||
source driveSource,
|
||||
ctrlOpts control.Options,
|
||||
colScope collectionScope,
|
||||
doNotMergeItems bool,
|
||||
) *Collection {
|
||||
c := &Collection{
|
||||
itemClient: gr,
|
||||
itemGetter: api.GetDriveItem,
|
||||
handler: handler,
|
||||
folderPath: currPath,
|
||||
prevPath: prevPath,
|
||||
driveItems: map[string]models.DriveItemable{},
|
||||
driveID: driveID,
|
||||
source: source,
|
||||
service: service,
|
||||
data: make(chan data.Stream, graph.Parallelism(path.OneDriveMetadataService).CollectionBufferSize()),
|
||||
statusUpdater: statusUpdater,
|
||||
ctrl: ctrlOpts,
|
||||
@ -202,16 +164,6 @@ func newColl(
|
||||
doNotMergeItems: doNotMergeItems,
|
||||
}
|
||||
|
||||
// Allows tests to set a mock populator
|
||||
switch source {
|
||||
case SharePointSource:
|
||||
c.itemReader = sharePointItemReader
|
||||
c.itemMetaReader = sharePointItemMetaReader
|
||||
default:
|
||||
c.itemReader = oneDriveItemReader
|
||||
c.itemMetaReader = oneDriveItemMetaReader
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -222,7 +174,8 @@ func (oc *Collection) Add(item models.DriveItemable) bool {
|
||||
_, found := oc.driveItems[ptr.Val(item.GetId())]
|
||||
oc.driveItems[ptr.Val(item.GetId())] = item
|
||||
|
||||
return !found // !found = new
|
||||
// if !found, it's a new addition
|
||||
return !found
|
||||
}
|
||||
|
||||
// Remove removes a item from the collection
|
||||
@ -246,7 +199,7 @@ func (oc *Collection) IsEmpty() bool {
|
||||
// Items() returns the channel containing M365 Exchange objects
|
||||
func (oc *Collection) Items(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus, // TODO: currently unused while onedrive isn't up to date with clues/fault
|
||||
errs *fault.Bus,
|
||||
) <-chan data.Stream {
|
||||
go oc.populateItems(ctx, errs)
|
||||
return oc.data
|
||||
@ -274,21 +227,7 @@ func (oc Collection) PreviousLocationPath() details.LocationIDer {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ider details.LocationIDer
|
||||
|
||||
switch oc.source {
|
||||
case OneDriveSource:
|
||||
ider = details.NewOneDriveLocationIDer(
|
||||
oc.driveID,
|
||||
oc.prevLocPath.Elements()...)
|
||||
|
||||
default:
|
||||
ider = details.NewSharePointLocationIDer(
|
||||
oc.driveID,
|
||||
oc.prevLocPath.Elements()...)
|
||||
}
|
||||
|
||||
return ider
|
||||
return oc.handler.NewLocationIDer(oc.driveID, oc.prevLocPath.Elements()...)
|
||||
}
|
||||
|
||||
func (oc Collection) State() data.CollectionState {
|
||||
@ -328,14 +267,7 @@ func (oc *Collection) getDriveItemContent(
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
itemData, err := downloadContent(
|
||||
ctx,
|
||||
oc.service,
|
||||
oc.itemGetter,
|
||||
oc.itemReader,
|
||||
oc.itemClient,
|
||||
item,
|
||||
oc.driveID)
|
||||
itemData, err := downloadContent(ctx, oc.handler, item, oc.driveID)
|
||||
if err != nil {
|
||||
if clues.HasLabel(err, graph.LabelsMalware) || (item != nil && item.GetMalware() != nil) {
|
||||
logger.CtxErr(ctx, err).With("skipped_reason", fault.SkipMalware).Info("item flagged as malware")
|
||||
@ -377,19 +309,21 @@ func (oc *Collection) getDriveItemContent(
|
||||
return itemData, nil
|
||||
}
|
||||
|
||||
type itemAndAPIGetter interface {
|
||||
GetItemer
|
||||
api.Getter
|
||||
}
|
||||
|
||||
// downloadContent attempts to fetch the item content. If the content url
|
||||
// is expired (ie, returns a 401), it re-fetches the item to get a new download
|
||||
// url and tries again.
|
||||
func downloadContent(
|
||||
ctx context.Context,
|
||||
svc graph.Servicer,
|
||||
igf itemGetterFunc,
|
||||
irf itemReaderFunc,
|
||||
gr graph.Requester,
|
||||
iaag itemAndAPIGetter,
|
||||
item models.DriveItemable,
|
||||
driveID string,
|
||||
) (io.ReadCloser, error) {
|
||||
_, content, err := irf(ctx, gr, item)
|
||||
content, err := downloadItem(ctx, iaag, item)
|
||||
if err == nil {
|
||||
return content, nil
|
||||
} else if !graph.IsErrUnauthorized(err) {
|
||||
@ -400,12 +334,12 @@ func downloadContent(
|
||||
// token, and that we've overrun the available window to
|
||||
// download the actual file. Re-downloading the item will
|
||||
// refresh that download url.
|
||||
di, err := igf(ctx, svc, driveID, ptr.Val(item.GetId()))
|
||||
di, err := iaag.GetItem(ctx, driveID, ptr.Val(item.GetId()))
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "retrieving expired item")
|
||||
}
|
||||
|
||||
_, content, err = irf(ctx, gr, di)
|
||||
content, err = downloadItem(ctx, iaag, di)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "content download retry")
|
||||
}
|
||||
@ -428,16 +362,13 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
|
||||
|
||||
// Retrieve the OneDrive folder path to set later in
|
||||
// `details.OneDriveInfo`
|
||||
parentPathString, err := path.GetDriveFolderPath(oc.folderPath)
|
||||
parentPath, err := path.GetDriveFolderPath(oc.folderPath)
|
||||
if err != nil {
|
||||
oc.reportAsCompleted(ctx, 0, 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
queuedPath := "/" + parentPathString
|
||||
if oc.source == SharePointSource && len(oc.driveName) > 0 {
|
||||
queuedPath = "/" + oc.driveName + queuedPath
|
||||
}
|
||||
queuedPath := oc.handler.FormatDisplayPath(oc.driveName, parentPath)
|
||||
|
||||
folderProgress := observe.ProgressWithCount(
|
||||
ctx,
|
||||
@ -498,25 +429,13 @@ func (oc *Collection) populateItems(ctx context.Context, errs *fault.Bus) {
|
||||
}
|
||||
|
||||
// Fetch metadata for the file
|
||||
itemMeta, itemMetaSize, err = oc.itemMetaReader(
|
||||
ctx,
|
||||
oc.service,
|
||||
oc.driveID,
|
||||
item)
|
||||
|
||||
itemMeta, itemMetaSize, err = downloadItemMeta(ctx, oc.handler, oc.driveID, item)
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "getting item metadata").Label(fault.LabelForceNoBackupCreation))
|
||||
return
|
||||
}
|
||||
|
||||
switch oc.source {
|
||||
case SharePointSource:
|
||||
itemInfo.SharePoint = sharePointItemInfo(item, itemSize)
|
||||
itemInfo.SharePoint.ParentPath = parentPathString
|
||||
default:
|
||||
itemInfo.OneDrive = oneDriveItemInfo(item, itemSize)
|
||||
itemInfo.OneDrive.ParentPath = parentPathString
|
||||
}
|
||||
itemInfo = oc.handler.AugmentItemInfo(itemInfo, item, itemSize, parentPath)
|
||||
|
||||
ctx = clues.Add(ctx, "item_info", itemInfo)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
@ -25,14 +26,6 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type driveSource int
|
||||
|
||||
const (
|
||||
unknownDriveSource driveSource = iota
|
||||
OneDriveSource
|
||||
SharePointSource
|
||||
)
|
||||
|
||||
type collectionScope int
|
||||
|
||||
const (
|
||||
@ -47,21 +40,7 @@ const (
|
||||
CollectionScopePackage
|
||||
)
|
||||
|
||||
const (
|
||||
restrictedDirectory = "Site Pages"
|
||||
rootDrivePattern = "/drives/%s/root:"
|
||||
)
|
||||
|
||||
func (ds driveSource) toPathServiceCat() (path.ServiceType, path.CategoryType) {
|
||||
switch ds {
|
||||
case OneDriveSource:
|
||||
return path.OneDriveService, path.FilesCategory
|
||||
case SharePointSource:
|
||||
return path.SharePointService, path.LibrariesCategory
|
||||
default:
|
||||
return path.UnknownService, path.UnknownCategory
|
||||
}
|
||||
}
|
||||
const restrictedDirectory = "Site Pages"
|
||||
|
||||
type folderMatcher interface {
|
||||
IsAny() bool
|
||||
@ -71,14 +50,11 @@ type folderMatcher interface {
|
||||
// Collections is used to retrieve drive data for a
|
||||
// resource owner, which can be either a user or a sharepoint site.
|
||||
type Collections struct {
|
||||
// configured to handle large item downloads
|
||||
itemClient graph.Requester
|
||||
handler BackupHandler
|
||||
|
||||
tenant string
|
||||
tenantID string
|
||||
resourceOwner string
|
||||
source driveSource
|
||||
matcher folderMatcher
|
||||
service graph.Servicer
|
||||
statusUpdater support.StatusUpdater
|
||||
|
||||
ctrl control.Options
|
||||
@ -88,17 +64,6 @@ type Collections struct {
|
||||
// driveID -> itemID -> collection
|
||||
CollectionMap map[string]map[string]*Collection
|
||||
|
||||
// Not the most ideal, but allows us to change the pager function for testing
|
||||
// as needed. This will allow us to mock out some scenarios during testing.
|
||||
drivePagerFunc func(
|
||||
source driveSource,
|
||||
servicer graph.Servicer,
|
||||
resourceOwner string,
|
||||
fields []string,
|
||||
) (api.DrivePager, error)
|
||||
itemPagerFunc driveItemPagerFunc
|
||||
servicePathPfxFunc pathPrefixerFunc
|
||||
|
||||
// Track stats from drive enumeration. Represents the items backed up.
|
||||
NumItems int
|
||||
NumFiles int
|
||||
@ -106,36 +71,28 @@ type Collections struct {
|
||||
}
|
||||
|
||||
func NewCollections(
|
||||
itemClient graph.Requester,
|
||||
tenant string,
|
||||
bh BackupHandler,
|
||||
tenantID string,
|
||||
resourceOwner string,
|
||||
source driveSource,
|
||||
matcher folderMatcher,
|
||||
service graph.Servicer,
|
||||
statusUpdater support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
) *Collections {
|
||||
return &Collections{
|
||||
itemClient: itemClient,
|
||||
tenant: tenant,
|
||||
resourceOwner: resourceOwner,
|
||||
source: source,
|
||||
matcher: matcher,
|
||||
CollectionMap: map[string]map[string]*Collection{},
|
||||
drivePagerFunc: PagerForSource,
|
||||
itemPagerFunc: defaultItemPager,
|
||||
servicePathPfxFunc: pathPrefixerForSource(tenant, resourceOwner, source),
|
||||
service: service,
|
||||
statusUpdater: statusUpdater,
|
||||
ctrl: ctrlOpts,
|
||||
handler: bh,
|
||||
tenantID: tenantID,
|
||||
resourceOwner: resourceOwner,
|
||||
matcher: matcher,
|
||||
CollectionMap: map[string]map[string]*Collection{},
|
||||
statusUpdater: statusUpdater,
|
||||
ctrl: ctrlOpts,
|
||||
}
|
||||
}
|
||||
|
||||
func deserializeMetadata(
|
||||
ctx context.Context,
|
||||
cols []data.RestoreCollection,
|
||||
errs *fault.Bus,
|
||||
) (map[string]string, map[string]map[string]string, error) {
|
||||
) (map[string]string, map[string]map[string]string, bool, error) {
|
||||
logger.Ctx(ctx).Infow(
|
||||
"deserialzing previous backup metadata",
|
||||
"num_collections", len(cols))
|
||||
@ -143,11 +100,11 @@ func deserializeMetadata(
|
||||
var (
|
||||
prevDeltas = map[string]string{}
|
||||
prevFolders = map[string]map[string]string{}
|
||||
el = errs.Local()
|
||||
errs = fault.New(true) // metadata item reads should not fail backup
|
||||
)
|
||||
|
||||
for _, col := range cols {
|
||||
if el.Failure() != nil {
|
||||
if errs.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
@ -156,7 +113,7 @@ func deserializeMetadata(
|
||||
for breakLoop := false; !breakLoop; {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, clues.Wrap(ctx.Err(), "deserialzing previous backup metadata").WithClues(ctx)
|
||||
return nil, nil, false, clues.Wrap(ctx.Err(), "deserialzing previous backup metadata").WithClues(ctx)
|
||||
|
||||
case item, ok := <-items:
|
||||
if !ok {
|
||||
@ -196,7 +153,7 @@ func deserializeMetadata(
|
||||
// these cases. We can make the logic for deciding when to continue vs.
|
||||
// when to fail less strict in the future if needed.
|
||||
if err != nil {
|
||||
return nil, nil, clues.Stack(err).WithClues(ictx)
|
||||
return nil, nil, false, clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -228,7 +185,14 @@ func deserializeMetadata(
|
||||
}
|
||||
}
|
||||
|
||||
return prevDeltas, prevFolders, el.Failure()
|
||||
// if reads from items failed, return empty but no error
|
||||
if errs.Failure() != nil {
|
||||
logger.CtxErr(ctx, errs.Failure()).Info("reading metadata collection items")
|
||||
|
||||
return map[string]string{}, map[string]map[string]string{}, false, nil
|
||||
}
|
||||
|
||||
return prevDeltas, prevFolders, true, nil
|
||||
}
|
||||
|
||||
var errExistingMapping = clues.New("mapping already exists for same drive ID")
|
||||
@ -271,10 +235,10 @@ func (c *Collections) Get(
|
||||
prevMetadata []data.RestoreCollection,
|
||||
ssmb *prefixmatcher.StringSetMatchBuilder,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, error) {
|
||||
prevDeltas, oldPathsByDriveID, err := deserializeMetadata(ctx, prevMetadata, errs)
|
||||
) ([]data.BackupCollection, bool, error) {
|
||||
prevDeltas, oldPathsByDriveID, canUsePreviousBackup, err := deserializeMetadata(ctx, prevMetadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
driveTombstones := map[string]struct{}{}
|
||||
@ -287,14 +251,11 @@ func (c *Collections) Get(
|
||||
defer close(driveComplete)
|
||||
|
||||
// Enumerate drives for the specified resourceOwner
|
||||
pager, err := c.drivePagerFunc(c.source, c.service, c.resourceOwner, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
pager := c.handler.NewDrivePager(c.resourceOwner, nil)
|
||||
|
||||
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
var (
|
||||
@ -331,7 +292,7 @@ func (c *Collections) Get(
|
||||
|
||||
delta, paths, excluded, err := collectItems(
|
||||
ictx,
|
||||
c.itemPagerFunc(c.service, driveID, ""),
|
||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()),
|
||||
driveID,
|
||||
driveName,
|
||||
c.UpdateCollections,
|
||||
@ -339,7 +300,7 @@ func (c *Collections) Get(
|
||||
prevDelta,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Used for logging below.
|
||||
@ -370,16 +331,14 @@ func (c *Collections) Get(
|
||||
|
||||
// For both cases we don't need to do set difference on folder map if the
|
||||
// delta token was valid because we should see all the changes.
|
||||
if !delta.Reset && len(excluded) == 0 {
|
||||
continue
|
||||
} else if !delta.Reset {
|
||||
p, err := GetCanonicalPath(
|
||||
fmt.Sprintf(rootDrivePattern, driveID),
|
||||
c.tenant,
|
||||
c.resourceOwner,
|
||||
c.source)
|
||||
if !delta.Reset {
|
||||
if len(excluded) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
p, err := c.handler.CanonicalPath(odConsts.DriveFolderPrefixBuilder(driveID), c.tenantID, c.resourceOwner)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||
return nil, false, clues.Wrap(err, "making exclude prefix").WithClues(ictx)
|
||||
}
|
||||
|
||||
ssmb.Add(p.String(), excluded)
|
||||
@ -407,22 +366,20 @@ func (c *Collections) Get(
|
||||
prevPath, err := path.FromDataLayerPath(p, false)
|
||||
if err != nil {
|
||||
err = clues.Wrap(err, "invalid previous path").WithClues(ictx).With("deleted_path", p)
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
col, err := NewCollection(
|
||||
c.itemClient,
|
||||
c.handler,
|
||||
nil, // delete the folder
|
||||
prevPath,
|
||||
driveID,
|
||||
c.service,
|
||||
c.statusUpdater,
|
||||
c.source,
|
||||
c.ctrl,
|
||||
CollectionScopeUnknown,
|
||||
true)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "making collection").WithClues(ictx)
|
||||
return nil, false, clues.Wrap(err, "making collection").WithClues(ictx)
|
||||
}
|
||||
|
||||
c.CollectionMap[driveID][fldID] = col
|
||||
@ -442,33 +399,31 @@ func (c *Collections) Get(
|
||||
|
||||
// generate tombstones for drives that were removed.
|
||||
for driveID := range driveTombstones {
|
||||
prevDrivePath, err := c.servicePathPfxFunc(driveID)
|
||||
prevDrivePath, err := c.handler.PathPrefix(c.tenantID, c.resourceOwner, driveID)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "making drive tombstone previous path").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(err, "making drive tombstone for previous path").WithClues(ctx)
|
||||
}
|
||||
|
||||
coll, err := NewCollection(
|
||||
c.itemClient,
|
||||
c.handler,
|
||||
nil, // delete the drive
|
||||
prevDrivePath,
|
||||
driveID,
|
||||
c.service,
|
||||
c.statusUpdater,
|
||||
c.source,
|
||||
c.ctrl,
|
||||
CollectionScopeUnknown,
|
||||
true)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
||||
return nil, false, clues.Wrap(err, "making drive tombstone").WithClues(ctx)
|
||||
}
|
||||
|
||||
collections = append(collections, coll)
|
||||
}
|
||||
|
||||
// add metadata collections
|
||||
service, category := c.source.toPathServiceCat()
|
||||
service, category := c.handler.ServiceCat()
|
||||
md, err := graph.MakeMetadataCollection(
|
||||
c.tenant,
|
||||
c.tenantID,
|
||||
c.resourceOwner,
|
||||
service,
|
||||
category,
|
||||
@ -487,7 +442,7 @@ func (c *Collections) Get(
|
||||
collections = append(collections, md)
|
||||
}
|
||||
|
||||
return collections, nil
|
||||
return collections, canUsePreviousBackup, nil
|
||||
}
|
||||
|
||||
func updateCollectionPaths(
|
||||
@ -601,13 +556,11 @@ func (c *Collections) handleDelete(
|
||||
}
|
||||
|
||||
col, err := NewCollection(
|
||||
c.itemClient,
|
||||
nil,
|
||||
c.handler,
|
||||
nil, // deletes the collection
|
||||
prevPath,
|
||||
driveID,
|
||||
c.service,
|
||||
c.statusUpdater,
|
||||
c.source,
|
||||
c.ctrl,
|
||||
CollectionScopeUnknown,
|
||||
// DoNotMerge is not checked for deleted items.
|
||||
@ -629,14 +582,12 @@ func (c *Collections) getCollectionPath(
|
||||
item models.DriveItemable,
|
||||
) (path.Path, error) {
|
||||
var (
|
||||
collectionPathStr string
|
||||
isRoot = item.GetRoot() != nil
|
||||
isFile = item.GetFile() != nil
|
||||
pb = odConsts.DriveFolderPrefixBuilder(driveID)
|
||||
isRoot = item.GetRoot() != nil
|
||||
isFile = item.GetFile() != nil
|
||||
)
|
||||
|
||||
if isRoot {
|
||||
collectionPathStr = fmt.Sprintf(rootDrivePattern, driveID)
|
||||
} else {
|
||||
if !isRoot {
|
||||
if item.GetParentReference() == nil ||
|
||||
item.GetParentReference().GetPath() == nil {
|
||||
err := clues.New("no parent reference").
|
||||
@ -645,15 +596,10 @@ func (c *Collections) getCollectionPath(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
collectionPathStr = ptr.Val(item.GetParentReference().GetPath())
|
||||
pb = path.Builder{}.Append(path.Split(ptr.Val(item.GetParentReference().GetPath()))...)
|
||||
}
|
||||
|
||||
collectionPath, err := GetCanonicalPath(
|
||||
collectionPathStr,
|
||||
c.tenant,
|
||||
c.resourceOwner,
|
||||
c.source,
|
||||
)
|
||||
collectionPath, err := c.handler.CanonicalPath(pb, c.tenantID, c.resourceOwner)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "making item path")
|
||||
}
|
||||
@ -794,17 +740,14 @@ func (c *Collections) UpdateCollections(
|
||||
}
|
||||
|
||||
col, err := NewCollection(
|
||||
c.itemClient,
|
||||
c.handler,
|
||||
collectionPath,
|
||||
prevPath,
|
||||
driveID,
|
||||
c.service,
|
||||
c.statusUpdater,
|
||||
c.source,
|
||||
c.ctrl,
|
||||
colScope,
|
||||
invalidPrevDelta,
|
||||
)
|
||||
invalidPrevDelta)
|
||||
if err != nil {
|
||||
return clues.Stack(err).WithClues(ictx)
|
||||
}
|
||||
@ -889,33 +832,9 @@ func shouldSkipDrive(ctx context.Context, drivePath path.Path, m folderMatcher,
|
||||
(drivePath.Category() == path.LibrariesCategory && restrictedDirectory == driveName)
|
||||
}
|
||||
|
||||
// GetCanonicalPath constructs the standard path for the given source.
|
||||
func GetCanonicalPath(p, tenant, resourceOwner string, source driveSource) (path.Path, error) {
|
||||
var (
|
||||
pathBuilder = path.Builder{}.Append(strings.Split(p, "/")...)
|
||||
result path.Path
|
||||
err error
|
||||
)
|
||||
|
||||
switch source {
|
||||
case OneDriveSource:
|
||||
result, err = pathBuilder.ToDataLayerOneDrivePath(tenant, resourceOwner, false)
|
||||
case SharePointSource:
|
||||
result, err = pathBuilder.ToDataLayerSharePointPath(tenant, resourceOwner, path.LibrariesCategory, false)
|
||||
default:
|
||||
return nil, clues.New("unrecognized data source")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "converting to canonical path")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func includePath(ctx context.Context, m folderMatcher, folderPath path.Path) bool {
|
||||
// Check if the folder is allowed by the scope.
|
||||
folderPathString, err := path.GetDriveFolderPath(folderPath)
|
||||
pb, err := path.GetDriveFolderPath(folderPath)
|
||||
if err != nil {
|
||||
logger.Ctx(ctx).With("err", err).Error("getting drive folder path")
|
||||
return true
|
||||
@ -923,11 +842,11 @@ func includePath(ctx context.Context, m folderMatcher, folderPath path.Path) boo
|
||||
|
||||
// Hack for the edge case where we're looking at the root folder and can
|
||||
// select any folder. Right now the root folder has an empty folder path.
|
||||
if len(folderPathString) == 0 && m.IsAny() {
|
||||
if len(pb.Elements()) == 0 && m.IsAny() {
|
||||
return true
|
||||
}
|
||||
|
||||
return m.Matches(folderPathString)
|
||||
return m.Matches(pb.String())
|
||||
}
|
||||
|
||||
func updatePath(paths map[string]string, id, newPath string) {
|
||||
|
||||
@ -2,8 +2,6 @@ package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
@ -18,7 +16,9 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
pmMock "github.com/alcionai/corso/src/internal/common/prefixmatcher/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -27,7 +27,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
apiMock "github.com/alcionai/corso/src/pkg/services/m365/api/mock"
|
||||
)
|
||||
|
||||
type statePath struct {
|
||||
@ -38,6 +38,7 @@ type statePath struct {
|
||||
|
||||
func getExpectedStatePathGenerator(
|
||||
t *testing.T,
|
||||
bh BackupHandler,
|
||||
tenant, user, base string,
|
||||
) func(data.CollectionState, ...string) statePath {
|
||||
return func(state data.CollectionState, pths ...string) statePath {
|
||||
@ -53,11 +54,13 @@ func getExpectedStatePathGenerator(
|
||||
require.Len(t, pths, 1, "invalid number of paths to getExpectedStatePathGenerator")
|
||||
} else {
|
||||
require.Len(t, pths, 2, "invalid number of paths to getExpectedStatePathGenerator")
|
||||
p2, err = GetCanonicalPath(base+pths[1], tenant, user, OneDriveSource)
|
||||
pb := path.Builder{}.Append(path.Split(base + pths[1])...)
|
||||
p2, err = bh.CanonicalPath(pb, tenant, user)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
p1, err = GetCanonicalPath(base+pths[0], tenant, user, OneDriveSource)
|
||||
pb := path.Builder{}.Append(path.Split(base + pths[0])...)
|
||||
p1, err = bh.CanonicalPath(pb, tenant, user)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
switch state {
|
||||
@ -81,14 +84,17 @@ func getExpectedStatePathGenerator(
|
||||
}
|
||||
}
|
||||
|
||||
func getExpectedPathGenerator(t *testing.T,
|
||||
func getExpectedPathGenerator(
|
||||
t *testing.T,
|
||||
bh BackupHandler,
|
||||
tenant, user, base string,
|
||||
) func(string) string {
|
||||
return func(path string) string {
|
||||
p, err := GetCanonicalPath(base+path, tenant, user, OneDriveSource)
|
||||
return func(p string) string {
|
||||
pb := path.Builder{}.Append(path.Split(base + p)...)
|
||||
cp, err := bh.CanonicalPath(pb, tenant, user)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return p.String()
|
||||
return cp.String()
|
||||
}
|
||||
}
|
||||
|
||||
@ -100,52 +106,6 @@ func TestOneDriveCollectionsUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &OneDriveCollectionsUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *OneDriveCollectionsUnitSuite) TestGetCanonicalPath() {
|
||||
tenant, resourceOwner := "tenant", "resourceOwner"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
source driveSource
|
||||
dir []string
|
||||
expect string
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "onedrive",
|
||||
source: OneDriveSource,
|
||||
dir: []string{"onedrive"},
|
||||
expect: "tenant/onedrive/resourceOwner/files/onedrive",
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "sharepoint",
|
||||
source: SharePointSource,
|
||||
dir: []string{"sharepoint"},
|
||||
expect: "tenant/sharepoint/resourceOwner/libraries/sharepoint",
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "unknown",
|
||||
source: unknownDriveSource,
|
||||
dir: []string{"unknown"},
|
||||
expectErr: assert.Error,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
p := strings.Join(test.dir, "/")
|
||||
|
||||
result, err := GetCanonicalPath(p, tenant, resourceOwner, test.source)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
if result != nil {
|
||||
assert.Equal(t, test.expect, result.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getDelList(files ...string) map[string]struct{} {
|
||||
delList := map[string]struct{}{}
|
||||
for _, file := range files {
|
||||
@ -168,9 +128,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
pkg = "/package"
|
||||
)
|
||||
|
||||
testBaseDrivePath := fmt.Sprintf(rootDrivePattern, "driveID1")
|
||||
expectedPath := getExpectedPathGenerator(suite.T(), tenant, user, testBaseDrivePath)
|
||||
expectedStatePath := getExpectedStatePathGenerator(suite.T(), tenant, user, testBaseDrivePath)
|
||||
bh := itemBackupHandler{}
|
||||
testBaseDrivePath := odConsts.DriveFolderPrefixBuilder("driveID1").String()
|
||||
expectedPath := getExpectedPathGenerator(suite.T(), bh, tenant, user, testBaseDrivePath)
|
||||
expectedStatePath := getExpectedStatePathGenerator(suite.T(), bh, tenant, user, testBaseDrivePath)
|
||||
|
||||
tests := []struct {
|
||||
testCase string
|
||||
@ -782,12 +743,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestUpdateCollections() {
|
||||
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
||||
|
||||
c := NewCollections(
|
||||
graph.NewNoTimeoutHTTPWrapper(),
|
||||
&itemBackupHandler{api.Drives{}},
|
||||
tenant,
|
||||
user,
|
||||
OneDriveSource,
|
||||
testFolderMatcher{tt.scope},
|
||||
&MockGraphService{},
|
||||
nil,
|
||||
control.Options{ToggleFeatures: control.Toggles{}})
|
||||
|
||||
@ -844,10 +803,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
table := []struct {
|
||||
name string
|
||||
// Each function returns the set of files for a single data.Collection.
|
||||
cols []func() []graph.MetadataCollectionEntry
|
||||
expectedDeltas map[string]string
|
||||
expectedPaths map[string]map[string]string
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
cols []func() []graph.MetadataCollectionEntry
|
||||
expectedDeltas map[string]string
|
||||
expectedPaths map[string]map[string]string
|
||||
canUsePreviousBackup bool
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "SuccessOneDriveAllOneCollection",
|
||||
@ -877,7 +837,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
folderID1: path1,
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "MissingPaths",
|
||||
@ -891,9 +852,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
},
|
||||
},
|
||||
expectedDeltas: map[string]string{},
|
||||
expectedPaths: map[string]map[string]string{},
|
||||
errCheck: assert.NoError,
|
||||
expectedDeltas: map[string]string{},
|
||||
expectedPaths: map[string]map[string]string{},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "MissingDeltas",
|
||||
@ -917,7 +879,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
folderID1: path1,
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
// An empty path map but valid delta results in metadata being returned
|
||||
@ -940,9 +903,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
},
|
||||
},
|
||||
expectedDeltas: map[string]string{},
|
||||
expectedPaths: map[string]map[string]string{driveID1: {}},
|
||||
errCheck: assert.NoError,
|
||||
expectedDeltas: map[string]string{},
|
||||
expectedPaths: map[string]map[string]string{driveID1: {}},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
// An empty delta map but valid path results in no metadata for that drive
|
||||
@ -975,7 +939,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
folderID1: path1,
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "SuccessTwoDrivesTwoCollections",
|
||||
@ -1025,7 +990,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
folderID2: path2,
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
// Bad formats are logged but skip adding entries to the maps and don't
|
||||
@ -1041,7 +1007,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
},
|
||||
},
|
||||
errCheck: assert.Error,
|
||||
canUsePreviousBackup: false,
|
||||
errCheck: assert.Error,
|
||||
},
|
||||
{
|
||||
// Unexpected files are logged and skipped. They don't cause an error to
|
||||
@ -1077,7 +1044,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
folderID1: path1,
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "DriveAlreadyFound_Paths",
|
||||
@ -1111,9 +1079,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
},
|
||||
},
|
||||
expectedDeltas: nil,
|
||||
expectedPaths: nil,
|
||||
errCheck: assert.Error,
|
||||
expectedDeltas: nil,
|
||||
expectedPaths: nil,
|
||||
canUsePreviousBackup: false,
|
||||
errCheck: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "DriveAlreadyFound_Deltas",
|
||||
@ -1143,9 +1112,10 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
},
|
||||
},
|
||||
expectedDeltas: nil,
|
||||
expectedPaths: nil,
|
||||
errCheck: assert.Error,
|
||||
expectedDeltas: nil,
|
||||
expectedPaths: nil,
|
||||
canUsePreviousBackup: false,
|
||||
errCheck: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1168,11 +1138,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
func(*support.ConnectorOperationStatus) {})
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
cols = append(cols, data.NotFoundRestoreCollection{Collection: mc})
|
||||
cols = append(cols, data.NoFetchRestoreCollection{Collection: mc})
|
||||
}
|
||||
|
||||
deltas, paths, err := deserializeMetadata(ctx, cols, fault.New(true))
|
||||
deltas, paths, canUsePreviousBackup, err := deserializeMetadata(ctx, cols)
|
||||
test.errCheck(t, err)
|
||||
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||
|
||||
assert.Equal(t, test.expectedDeltas, deltas, "deltas")
|
||||
assert.Equal(t, test.expectedPaths, paths, "paths")
|
||||
@ -1180,6 +1151,34 @@ func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata() {
|
||||
}
|
||||
}
|
||||
|
||||
type failingColl struct{}
|
||||
|
||||
func (f failingColl) Items(ctx context.Context, errs *fault.Bus) <-chan data.Stream {
|
||||
ic := make(chan data.Stream)
|
||||
defer close(ic)
|
||||
|
||||
errs.AddRecoverable(assert.AnError)
|
||||
|
||||
return ic
|
||||
}
|
||||
func (f failingColl) FullPath() path.Path { return nil }
|
||||
func (f failingColl) FetchItemByName(context.Context, string) (data.Stream, error) { return nil, nil }
|
||||
|
||||
// This check is to ensure that we don't error out, but still return
|
||||
// canUsePreviousBackup as false on read errors
|
||||
func (suite *OneDriveCollectionsUnitSuite) TestDeserializeMetadata_ReadFailure() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
fc := failingColl{}
|
||||
|
||||
_, _, canUsePreviousBackup, err := deserializeMetadata(ctx, []data.RestoreCollection{fc})
|
||||
require.NoError(t, err)
|
||||
require.False(t, canUsePreviousBackup)
|
||||
}
|
||||
|
||||
type mockDeltaPageLinker struct {
|
||||
link *string
|
||||
delta *string
|
||||
@ -1267,11 +1266,13 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
drive2.SetName(&driveID2)
|
||||
|
||||
var (
|
||||
driveBasePath1 = fmt.Sprintf(rootDrivePattern, driveID1)
|
||||
driveBasePath2 = fmt.Sprintf(rootDrivePattern, driveID2)
|
||||
bh = itemBackupHandler{}
|
||||
|
||||
expectedPath1 = getExpectedPathGenerator(suite.T(), tenant, user, driveBasePath1)
|
||||
expectedPath2 = getExpectedPathGenerator(suite.T(), tenant, user, driveBasePath2)
|
||||
driveBasePath1 = odConsts.DriveFolderPrefixBuilder(driveID1).String()
|
||||
driveBasePath2 = odConsts.DriveFolderPrefixBuilder(driveID2).String()
|
||||
|
||||
expectedPath1 = getExpectedPathGenerator(suite.T(), bh, tenant, user, driveBasePath1)
|
||||
expectedPath2 = getExpectedPathGenerator(suite.T(), bh, tenant, user, driveBasePath2)
|
||||
|
||||
rootFolderPath1 = expectedPath1("")
|
||||
folderPath1 = expectedPath1("/folder")
|
||||
@ -1281,11 +1282,12 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
)
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
drives []models.Driveable
|
||||
items map[string][]deltaPagerResult
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
prevFolderPaths map[string]map[string]string
|
||||
name string
|
||||
drives []models.Driveable
|
||||
items map[string][]deltaPagerResult
|
||||
canUsePreviousBackup bool
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
prevFolderPaths map[string]map[string]string
|
||||
// Collection name -> set of item IDs. We can't check item data because
|
||||
// that's not mocked out. Metadata is checked separately.
|
||||
expectedCollections map[string]map[data.CollectionState][]string
|
||||
@ -1312,7 +1314,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {"root": rootFolderPath1},
|
||||
},
|
||||
@ -1343,7 +1346,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {"root": rootFolderPath1},
|
||||
},
|
||||
@ -1375,8 +1379,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{},
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NewState: {}},
|
||||
folderPath1: {data.NewState: {"folder", "file"}},
|
||||
@ -1412,8 +1417,9 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{},
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{},
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NewState: {}},
|
||||
folderPath1: {data.NewState: {"folder", "file"}},
|
||||
@ -1449,7 +1455,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
@ -1487,7 +1494,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -1531,7 +1539,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -1582,7 +1591,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
driveID2: {},
|
||||
@ -1643,7 +1653,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
driveID2: {},
|
||||
@ -1686,7 +1697,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.Error,
|
||||
canUsePreviousBackup: false,
|
||||
errCheck: assert.Error,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -1712,7 +1724,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||
},
|
||||
@ -1754,7 +1767,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
expectedCollections: map[string]map[data.CollectionState][]string{
|
||||
rootFolderPath1: {data.NotMovedState: {"file"}},
|
||||
expectedPath1("/folder"): {data.NewState: {"folder", "file2"}},
|
||||
@ -1796,7 +1810,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
@ -1838,7 +1853,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
@ -1884,7 +1900,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
@ -1940,7 +1957,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -1992,7 +2010,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
@ -2038,7 +2057,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
@ -2080,7 +2100,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {
|
||||
"root": rootFolderPath1,
|
||||
@ -2125,7 +2146,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -2167,7 +2189,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -2204,7 +2227,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -2238,7 +2262,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {},
|
||||
},
|
||||
@ -2271,7 +2296,8 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
},
|
||||
},
|
||||
},
|
||||
errCheck: assert.NoError,
|
||||
canUsePreviousBackup: true,
|
||||
errCheck: assert.NoError,
|
||||
prevFolderPaths: map[string]map[string]string{
|
||||
driveID1: {"root": rootFolderPath1},
|
||||
driveID2: {"root": rootFolderPath2},
|
||||
@ -2297,42 +2323,31 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
drivePagerFunc := func(
|
||||
source driveSource,
|
||||
servicer graph.Servicer,
|
||||
resourceOwner string,
|
||||
fields []string,
|
||||
) (api.DrivePager, error) {
|
||||
return &mock.DrivePager{
|
||||
ToReturn: []mock.PagerResult{
|
||||
{
|
||||
Drives: test.drives,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
mockDrivePager := &apiMock.DrivePager{
|
||||
ToReturn: []apiMock.PagerResult{
|
||||
{Drives: test.drives},
|
||||
},
|
||||
}
|
||||
|
||||
itemPagerFunc := func(
|
||||
servicer graph.Servicer,
|
||||
driveID, link string,
|
||||
) itemPager {
|
||||
return &mockItemPager{
|
||||
itemPagers := map[string]api.DriveItemEnumerator{}
|
||||
|
||||
for driveID := range test.items {
|
||||
itemPagers[driveID] = &mockItemPager{
|
||||
toReturn: test.items[driveID],
|
||||
}
|
||||
}
|
||||
|
||||
mbh := mock.DefaultOneDriveBH()
|
||||
mbh.DrivePagerV = mockDrivePager
|
||||
mbh.ItemPagerV = itemPagers
|
||||
|
||||
c := NewCollections(
|
||||
graph.NewNoTimeoutHTTPWrapper(),
|
||||
mbh,
|
||||
tenant,
|
||||
user,
|
||||
OneDriveSource,
|
||||
testFolderMatcher{anyFolder},
|
||||
&MockGraphService{},
|
||||
func(*support.ConnectorOperationStatus) {},
|
||||
control.Options{ToggleFeatures: control.Toggles{}},
|
||||
)
|
||||
c.drivePagerFunc = drivePagerFunc
|
||||
c.itemPagerFunc = itemPagerFunc
|
||||
control.Options{ToggleFeatures: control.Toggles{}})
|
||||
|
||||
prevDelta := "prev-delta"
|
||||
mc, err := graph.MakeMetadataCollection(
|
||||
@ -2355,13 +2370,14 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
)
|
||||
assert.NoError(t, err, "creating metadata collection", clues.ToCore(err))
|
||||
|
||||
prevMetadata := []data.RestoreCollection{data.NotFoundRestoreCollection{Collection: mc}}
|
||||
prevMetadata := []data.RestoreCollection{data.NoFetchRestoreCollection{Collection: mc}}
|
||||
errs := fault.New(true)
|
||||
|
||||
delList := prefixmatcher.NewStringSetBuilder()
|
||||
|
||||
cols, err := c.Get(ctx, prevMetadata, delList, errs)
|
||||
cols, canUsePreviousBackup, err := c.Get(ctx, prevMetadata, delList, errs)
|
||||
test.errCheck(t, err)
|
||||
assert.Equal(t, test.canUsePreviousBackup, canUsePreviousBackup, "can use previous backup")
|
||||
assert.Equal(t, test.expectedSkippedCount, len(errs.Skipped()))
|
||||
|
||||
if err != nil {
|
||||
@ -2378,12 +2394,11 @@ func (suite *OneDriveCollectionsUnitSuite) TestGet() {
|
||||
}
|
||||
|
||||
if folderPath == metadataPath.String() {
|
||||
deltas, paths, err := deserializeMetadata(
|
||||
deltas, paths, _, err := deserializeMetadata(
|
||||
ctx,
|
||||
[]data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{Collection: baseCol},
|
||||
},
|
||||
fault.New(true))
|
||||
data.NoFetchRestoreCollection{Collection: baseCol},
|
||||
})
|
||||
if !assert.NoError(t, err, "deserializing metadata", clues.ToCore(err)) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
package onedrive
|
||||
|
||||
import "github.com/alcionai/corso/src/pkg/path"
|
||||
|
||||
const (
|
||||
// const used as the root dir for the drive portion of a path prefix.
|
||||
// eg: tid/onedrive/ro/files/drives/driveid/...
|
||||
@ -10,3 +12,7 @@ const (
|
||||
// root id for drive items
|
||||
RootID = "root"
|
||||
)
|
||||
|
||||
func DriveFolderPrefixBuilder(driveID string) *path.Builder {
|
||||
return path.Builder{}.Append(DrivesPathDir, driveID, RootPathDir)
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type odFolderMatcher struct {
|
||||
@ -34,27 +35,28 @@ func (fm odFolderMatcher) Matches(dir string) bool {
|
||||
// for the specified user
|
||||
func DataCollections(
|
||||
ctx context.Context,
|
||||
ac api.Client,
|
||||
selector selectors.Selector,
|
||||
user idname.Provider,
|
||||
metadata []data.RestoreCollection,
|
||||
lastBackupVersion int,
|
||||
tenant string,
|
||||
itemClient graph.Requester,
|
||||
service graph.Servicer,
|
||||
su support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
odb, err := selector.ToOneDriveBackup()
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "parsing selector").WithClues(ctx)
|
||||
return nil, nil, false, clues.Wrap(err, "parsing selector").WithClues(ctx)
|
||||
}
|
||||
|
||||
var (
|
||||
el = errs.Local()
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
collections = []data.BackupCollection{}
|
||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||
el = errs.Local()
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
collections = []data.BackupCollection{}
|
||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||
odcs []data.BackupCollection
|
||||
canUsePreviousBackup bool
|
||||
)
|
||||
|
||||
// for each scope that includes oneDrive items, get all
|
||||
@ -66,16 +68,14 @@ func DataCollections(
|
||||
logger.Ctx(ctx).Debug("creating OneDrive collections")
|
||||
|
||||
nc := NewCollections(
|
||||
itemClient,
|
||||
&itemBackupHandler{ac.Drives()},
|
||||
tenant,
|
||||
user.ID(),
|
||||
OneDriveSource,
|
||||
odFolderMatcher{scope},
|
||||
service,
|
||||
su,
|
||||
ctrlOpts)
|
||||
|
||||
odcs, err := nc.Get(ctx, metadata, ssmb, errs)
|
||||
odcs, canUsePreviousBackup, err = nc.Get(ctx, metadata, ssmb, errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Stack(err).Label(fault.LabelForceNoBackupCreation))
|
||||
}
|
||||
@ -86,14 +86,13 @@ func DataCollections(
|
||||
}
|
||||
|
||||
mcs, err := migrationCollections(
|
||||
service,
|
||||
lastBackupVersion,
|
||||
tenant,
|
||||
user,
|
||||
su,
|
||||
ctrlOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
collections = append(collections, mcs...)
|
||||
@ -109,18 +108,17 @@ func DataCollections(
|
||||
su,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
collections = append(collections, baseCols...)
|
||||
}
|
||||
|
||||
return collections, ssmb.ToReader(), el.Failure()
|
||||
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
||||
}
|
||||
|
||||
// adds data migrations to the collection set.
|
||||
func migrationCollections(
|
||||
svc graph.Servicer,
|
||||
lastBackupVersion int,
|
||||
tenant string,
|
||||
user idname.Provider,
|
||||
|
||||
@ -85,7 +85,7 @@ func (suite *DataCollectionsUnitSuite) TestMigrationCollections() {
|
||||
ToggleFeatures: control.Toggles{},
|
||||
}
|
||||
|
||||
mc, err := migrationCollections(nil, test.version, "t", u, nil, opts)
|
||||
mc, err := migrationCollections(test.version, "t", u, nil, opts)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
if test.expectLen == 0 {
|
||||
|
||||
@ -2,32 +2,20 @@ package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
const (
|
||||
maxDrivesRetries = 3
|
||||
|
||||
// nextLinkKey is used to find the next link in a paged
|
||||
// graph response
|
||||
nextLinkKey = "@odata.nextLink"
|
||||
itemChildrenRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s/children"
|
||||
itemNotFoundErrorCode = "itemNotFound"
|
||||
)
|
||||
const maxDrivesRetries = 3
|
||||
|
||||
// DeltaUpdate holds the results of a current delta token. It normally
|
||||
// gets produced when aggregating the addition and removal of items in
|
||||
@ -40,41 +28,6 @@ type DeltaUpdate struct {
|
||||
Reset bool
|
||||
}
|
||||
|
||||
func PagerForSource(
|
||||
source driveSource,
|
||||
servicer graph.Servicer,
|
||||
resourceOwner string,
|
||||
fields []string,
|
||||
) (api.DrivePager, error) {
|
||||
switch source {
|
||||
case OneDriveSource:
|
||||
return api.NewUserDrivePager(servicer, resourceOwner, fields), nil
|
||||
case SharePointSource:
|
||||
return api.NewSiteDrivePager(servicer, resourceOwner, fields), nil
|
||||
default:
|
||||
return nil, clues.New("unrecognized drive data source")
|
||||
}
|
||||
}
|
||||
|
||||
type pathPrefixerFunc func(driveID string) (path.Path, error)
|
||||
|
||||
func pathPrefixerForSource(
|
||||
tenantID, resourceOwner string,
|
||||
source driveSource,
|
||||
) pathPrefixerFunc {
|
||||
cat := path.FilesCategory
|
||||
serv := path.OneDriveService
|
||||
|
||||
if source == SharePointSource {
|
||||
cat = path.LibrariesCategory
|
||||
serv = path.SharePointService
|
||||
}
|
||||
|
||||
return func(driveID string) (path.Path, error) {
|
||||
return path.Build(tenantID, resourceOwner, serv, cat, false, odConsts.DrivesPathDir, driveID, odConsts.RootPathDir)
|
||||
}
|
||||
}
|
||||
|
||||
// itemCollector functions collect the items found in a drive
|
||||
type itemCollector func(
|
||||
ctx context.Context,
|
||||
@ -88,36 +41,22 @@ type itemCollector func(
|
||||
errs *fault.Bus,
|
||||
) error
|
||||
|
||||
type driveItemPagerFunc func(
|
||||
servicer graph.Servicer,
|
||||
driveID, link string,
|
||||
) itemPager
|
||||
|
||||
type itemPager interface {
|
||||
GetPage(context.Context) (api.DeltaPageLinker, error)
|
||||
SetNext(nextLink string)
|
||||
Reset()
|
||||
ValuesIn(api.DeltaPageLinker) ([]models.DriveItemable, error)
|
||||
}
|
||||
|
||||
func defaultItemPager(
|
||||
servicer graph.Servicer,
|
||||
driveID, link string,
|
||||
) itemPager {
|
||||
return api.NewItemPager(servicer, driveID, link, api.DriveItemSelectDefault())
|
||||
}
|
||||
|
||||
// collectItems will enumerate all items in the specified drive and hand them to the
|
||||
// provided `collector` method
|
||||
func collectItems(
|
||||
ctx context.Context,
|
||||
pager itemPager,
|
||||
pager api.DriveItemEnumerator,
|
||||
driveID, driveName string,
|
||||
collector itemCollector,
|
||||
oldPaths map[string]string,
|
||||
prevDelta string,
|
||||
errs *fault.Bus,
|
||||
) (DeltaUpdate, map[string]string, map[string]struct{}, error) {
|
||||
) (
|
||||
DeltaUpdate,
|
||||
map[string]string, // newPaths
|
||||
map[string]struct{}, // excluded
|
||||
error,
|
||||
) {
|
||||
var (
|
||||
newDeltaURL = ""
|
||||
newPaths = map[string]string{}
|
||||
@ -196,28 +135,8 @@ func collectItems(
|
||||
return DeltaUpdate{URL: newDeltaURL, Reset: invalidPrevDelta}, newPaths, excluded, nil
|
||||
}
|
||||
|
||||
// Create a new item in the specified folder
|
||||
func CreateItem(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID, parentFolderID string,
|
||||
newItem models.DriveItemable,
|
||||
) (models.DriveItemable, error) {
|
||||
// Graph SDK doesn't yet provide a POST method for `/children` so we set the `rawUrl` ourselves as recommended
|
||||
// here: https://github.com/microsoftgraph/msgraph-sdk-go/issues/155#issuecomment-1136254310
|
||||
rawURL := fmt.Sprintf(itemChildrenRawURLFmt, driveID, parentFolderID)
|
||||
builder := drives.NewItemItemsRequestBuilder(rawURL, service.Adapter())
|
||||
|
||||
newItem, err := builder.Post(ctx, newItem, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "creating item")
|
||||
}
|
||||
|
||||
return newItem, nil
|
||||
}
|
||||
|
||||
// newItem initializes a `models.DriveItemable` that can be used as input to `createItem`
|
||||
func newItem(name string, folder bool) models.DriveItemable {
|
||||
func newItem(name string, folder bool) *models.DriveItem {
|
||||
itemToCreate := models.NewDriveItem()
|
||||
itemToCreate.SetName(&name)
|
||||
|
||||
@ -243,12 +162,12 @@ func (op *Displayable) GetDisplayName() *string {
|
||||
// are a subfolder or top-level folder in the hierarchy.
|
||||
func GetAllFolders(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
bh BackupHandler,
|
||||
pager api.DrivePager,
|
||||
prefix string,
|
||||
errs *fault.Bus,
|
||||
) ([]*Displayable, error) {
|
||||
drvs, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
||||
ds, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting OneDrive folders")
|
||||
}
|
||||
@ -258,14 +177,14 @@ func GetAllFolders(
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
for _, d := range drvs {
|
||||
for _, drive := range ds {
|
||||
if el.Failure() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var (
|
||||
id = ptr.Val(d.GetId())
|
||||
name = ptr.Val(d.GetName())
|
||||
id = ptr.Val(drive.GetId())
|
||||
name = ptr.Val(drive.GetName())
|
||||
)
|
||||
|
||||
ictx := clues.Add(ctx, "drive_id", id, "drive_name", clues.Hide(name))
|
||||
@ -311,7 +230,7 @@ func GetAllFolders(
|
||||
|
||||
_, _, _, err = collectItems(
|
||||
ictx,
|
||||
defaultItemPager(gs, id, ""),
|
||||
bh.NewItemPager(id, "", nil),
|
||||
id,
|
||||
name,
|
||||
collector,
|
||||
|
||||
@ -286,6 +286,7 @@ type OneDriveIntgSuite struct {
|
||||
tester.Suite
|
||||
userID string
|
||||
creds account.M365Config
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func TestOneDriveSuite(t *testing.T) {
|
||||
@ -303,9 +304,12 @@ func (suite *OneDriveIntgSuite) SetupSuite() {
|
||||
|
||||
acct := tester.NewM365Account(t)
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.creds = creds
|
||||
|
||||
suite.ac, err = api.NewClient(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||
@ -318,11 +322,9 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||
folderIDs = []string{}
|
||||
folderName1 = "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
folderElements = []string{folderName1}
|
||||
gs = loadTestService(t)
|
||||
)
|
||||
|
||||
pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
pager := suite.ac.Drives().NewUserDrivePager(suite.userID, nil)
|
||||
|
||||
drives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -337,14 +339,14 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||
|
||||
// deletes require unique http clients
|
||||
// https://github.com/alcionai/corso/issues/2707
|
||||
err := api.DeleteDriveItem(ictx, loadTestService(t), driveID, id)
|
||||
err := suite.ac.Drives().DeleteItem(ictx, driveID, id)
|
||||
if err != nil {
|
||||
logger.CtxErr(ictx, err).Errorw("deleting folder")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
rootFolder, err := api.GetDriveRoot(ctx, gs, driveID)
|
||||
rootFolder, err := suite.ac.Drives().GetRootFolder(ctx, driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
restoreDir := path.Builder{}.Append(folderElements...)
|
||||
@ -357,7 +359,9 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||
caches := NewRestoreCaches()
|
||||
caches.DriveIDToRootFolderID[driveID] = ptr.Val(rootFolder.GetId())
|
||||
|
||||
folderID, err := createRestoreFolders(ctx, gs, &drivePath, restoreDir, caches)
|
||||
rh := NewRestoreHandler(suite.ac)
|
||||
|
||||
folderID, err := createRestoreFolders(ctx, rh, &drivePath, restoreDir, caches)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
folderIDs = append(folderIDs, folderID)
|
||||
@ -365,7 +369,7 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||
folderName2 := "Corso_Folder_Test_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
restoreDir = restoreDir.Append(folderName2)
|
||||
|
||||
folderID, err = createRestoreFolders(ctx, gs, &drivePath, restoreDir, caches)
|
||||
folderID, err = createRestoreFolders(ctx, rh, &drivePath, restoreDir, caches)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
folderIDs = append(folderIDs, folderID)
|
||||
@ -387,11 +391,13 @@ func (suite *OneDriveIntgSuite) TestCreateGetDeleteFolder() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
bh := itemBackupHandler{suite.ac.Drives()}
|
||||
pager := suite.ac.Drives().NewUserDrivePager(suite.userID, nil)
|
||||
|
||||
pager, err := PagerForSource(OneDriveSource, gs, suite.userID, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
allFolders, err := GetAllFolders(ctx, gs, pager, test.prefix, fault.New(true))
|
||||
allFolders, err := GetAllFolders(ctx, bh, pager, test.prefix, fault.New(true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
foundFolderIDs := []string{}
|
||||
@ -454,12 +460,10 @@ func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
||||
)
|
||||
|
||||
colls := NewCollections(
|
||||
graph.NewNoTimeoutHTTPWrapper(),
|
||||
&itemBackupHandler{suite.ac.Drives()},
|
||||
creds.AzureTenantID,
|
||||
test.user,
|
||||
OneDriveSource,
|
||||
testFolderMatcher{scope},
|
||||
service,
|
||||
service.updateStatus,
|
||||
control.Options{
|
||||
ToggleFeatures: control.Toggles{},
|
||||
@ -467,7 +471,7 @@ func (suite *OneDriveIntgSuite) TestOneDriveNewCollections() {
|
||||
|
||||
ssmb := prefixmatcher.NewStringSetBuilder()
|
||||
|
||||
odcs, err := colls.Get(ctx, nil, ssmb, fault.New(true))
|
||||
odcs, _, err := colls.Get(ctx, nil, ssmb, fault.New(true))
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
// Don't expect excludes as this isn't an incremental backup.
|
||||
assert.True(t, ssmb.Empty())
|
||||
|
||||
@ -1,25 +0,0 @@
|
||||
// Code generated by "stringer -type=driveSource"; DO NOT EDIT.
|
||||
|
||||
package onedrive
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[unknownDriveSource-0]
|
||||
_ = x[OneDriveSource-1]
|
||||
_ = x[SharePointSource-2]
|
||||
}
|
||||
|
||||
const _driveSource_name = "unknownDriveSourceOneDriveSourceSharePointSource"
|
||||
|
||||
var _driveSource_index = [...]uint8{0, 18, 32, 48}
|
||||
|
||||
func (i driveSource) String() string {
|
||||
if i < 0 || i >= driveSource(len(_driveSource_index)-1) {
|
||||
return "driveSource(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _driveSource_name[_driveSource_index[i]:_driveSource_index[i+1]]
|
||||
}
|
||||
132
src/internal/connector/onedrive/handlers.go
Normal file
132
src/internal/connector/onedrive/handlers.go
Normal file
@ -0,0 +1,132 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type ItemInfoAugmenter interface {
|
||||
// AugmentItemInfo will populate a details.<Service>Info struct
|
||||
// with properties from the drive item. ItemSize is passed in
|
||||
// separately for restore processes because the local itemable
|
||||
// doesn't have its size value updated as a side effect of creation,
|
||||
// and kiota drops any SetSize update.
|
||||
AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
size int64,
|
||||
parentPath *path.Builder,
|
||||
) details.ItemInfo
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// backup
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type BackupHandler interface {
|
||||
ItemInfoAugmenter
|
||||
api.Getter
|
||||
GetItemPermissioner
|
||||
GetItemer
|
||||
|
||||
// PathPrefix constructs the service and category specific path prefix for
|
||||
// the given values.
|
||||
PathPrefix(tenantID, resourceOwner, driveID string) (path.Path, error)
|
||||
|
||||
// CanonicalPath constructs the service and category specific path for
|
||||
// the given values.
|
||||
CanonicalPath(
|
||||
folders *path.Builder,
|
||||
tenantID, resourceOwner string,
|
||||
) (path.Path, error)
|
||||
|
||||
// ServiceCat returns the service and category used by this implementation.
|
||||
ServiceCat() (path.ServiceType, path.CategoryType)
|
||||
NewDrivePager(resourceOwner string, fields []string) api.DrivePager
|
||||
NewItemPager(driveID, link string, fields []string) api.DriveItemEnumerator
|
||||
// FormatDisplayPath creates a human-readable string to represent the
|
||||
// provided path.
|
||||
FormatDisplayPath(driveName string, parentPath *path.Builder) string
|
||||
NewLocationIDer(driveID string, elems ...string) details.LocationIDer
|
||||
}
|
||||
|
||||
type GetItemPermissioner interface {
|
||||
GetItemPermission(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.PermissionCollectionResponseable, error)
|
||||
}
|
||||
|
||||
type GetItemer interface {
|
||||
GetItem(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.DriveItemable, error)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type RestoreHandler interface {
|
||||
DeleteItemPermissioner
|
||||
GetFolderByNamer
|
||||
GetRootFolderer
|
||||
ItemInfoAugmenter
|
||||
NewItemContentUploader
|
||||
PostItemInContainerer
|
||||
UpdateItemPermissioner
|
||||
}
|
||||
|
||||
type NewItemContentUploader interface {
|
||||
// NewItemContentUpload creates an upload session which is used as a writer
|
||||
// for large item content.
|
||||
NewItemContentUpload(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.UploadSessionable, error)
|
||||
}
|
||||
|
||||
type DeleteItemPermissioner interface {
|
||||
DeleteItemPermission(
|
||||
ctx context.Context,
|
||||
driveID, itemID, permissionID string,
|
||||
) error
|
||||
}
|
||||
|
||||
type UpdateItemPermissioner interface {
|
||||
PostItemPermissionUpdate(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
body *drives.ItemItemsItemInvitePostRequestBody,
|
||||
) (drives.ItemItemsItemInviteResponseable, error)
|
||||
}
|
||||
|
||||
type PostItemInContainerer interface {
|
||||
PostItemInContainer(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID string,
|
||||
newItem models.DriveItemable,
|
||||
) (models.DriveItemable, error)
|
||||
}
|
||||
|
||||
type GetFolderByNamer interface {
|
||||
GetFolderByName(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID, folderID string,
|
||||
) (models.DriveItemable, error)
|
||||
}
|
||||
|
||||
type GetRootFolderer interface {
|
||||
// GetRootFolder gets the root folder for the drive.
|
||||
GetRootFolder(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
) (models.DriveItemable, error)
|
||||
}
|
||||
@ -5,17 +5,14 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
@ -25,58 +22,80 @@ var downloadURLKeys = []string{
|
||||
"@content.downloadUrl",
|
||||
}
|
||||
|
||||
// sharePointItemReader will return a io.ReadCloser for the specified item
|
||||
// It crafts this by querying M365 for a download URL for the item
|
||||
// and using a http client to initialize a reader
|
||||
// TODO: Add metadata fetching to SharePoint
|
||||
func sharePointItemReader(
|
||||
func downloadItem(
|
||||
ctx context.Context,
|
||||
client graph.Requester,
|
||||
ag api.Getter,
|
||||
item models.DriveItemable,
|
||||
) (details.ItemInfo, io.ReadCloser, error) {
|
||||
resp, err := downloadItem(ctx, client, item)
|
||||
if err != nil {
|
||||
return details.ItemInfo{}, nil, clues.Wrap(err, "sharepoint reader")
|
||||
) (io.ReadCloser, error) {
|
||||
if item == nil {
|
||||
return nil, clues.New("nil item")
|
||||
}
|
||||
|
||||
dii := details.ItemInfo{
|
||||
SharePoint: sharePointItemInfo(item, ptr.Val(item.GetSize())),
|
||||
}
|
||||
|
||||
return dii, resp.Body, nil
|
||||
}
|
||||
|
||||
func oneDriveItemMetaReader(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID string,
|
||||
item models.DriveItemable,
|
||||
) (io.ReadCloser, int, error) {
|
||||
return baseItemMetaReader(ctx, service, driveID, item)
|
||||
}
|
||||
|
||||
func sharePointItemMetaReader(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID string,
|
||||
item models.DriveItemable,
|
||||
) (io.ReadCloser, int, error) {
|
||||
// TODO: include permissions
|
||||
return baseItemMetaReader(ctx, service, driveID, item)
|
||||
}
|
||||
|
||||
func baseItemMetaReader(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID string,
|
||||
item models.DriveItemable,
|
||||
) (io.ReadCloser, int, error) {
|
||||
var (
|
||||
perms []metadata.Permission
|
||||
err error
|
||||
meta = metadata.Metadata{FileName: ptr.Val(item.GetName())}
|
||||
rc io.ReadCloser
|
||||
isFile = item.GetFile() != nil
|
||||
err error
|
||||
)
|
||||
|
||||
if isFile {
|
||||
var (
|
||||
url string
|
||||
ad = item.GetAdditionalData()
|
||||
)
|
||||
|
||||
for _, key := range downloadURLKeys {
|
||||
if v, err := str.AnyValueToString(key, ad); err == nil {
|
||||
url = v
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
rc, err = downloadFile(ctx, ag, url)
|
||||
if err != nil {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
func downloadFile(
|
||||
ctx context.Context,
|
||||
ag api.Getter,
|
||||
url string,
|
||||
) (io.ReadCloser, error) {
|
||||
if len(url) == 0 {
|
||||
return nil, clues.New("empty file url")
|
||||
}
|
||||
|
||||
resp, err := ag.Get(ctx, url, nil)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "getting file")
|
||||
}
|
||||
|
||||
if graph.IsMalwareResp(ctx, resp) {
|
||||
return nil, clues.New("malware detected").Label(graph.LabelsMalware)
|
||||
}
|
||||
|
||||
if (resp.StatusCode / 100) != 2 {
|
||||
// upstream error checks can compare the status with
|
||||
// clues.HasLabel(err, graph.LabelStatus(http.KnownStatusCode))
|
||||
return nil, clues.
|
||||
Wrap(clues.New(resp.Status), "non-2xx http response").
|
||||
Label(graph.LabelStatus(resp.StatusCode))
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func downloadItemMeta(
|
||||
ctx context.Context,
|
||||
gip GetItemPermissioner,
|
||||
driveID string,
|
||||
item models.DriveItemable,
|
||||
) (io.ReadCloser, int, error) {
|
||||
meta := metadata.Metadata{FileName: ptr.Val(item.GetName())}
|
||||
|
||||
if item.GetShared() == nil {
|
||||
meta.SharingMode = metadata.SharingModeInherited
|
||||
} else {
|
||||
@ -84,12 +103,12 @@ func baseItemMetaReader(
|
||||
}
|
||||
|
||||
if meta.SharingMode == metadata.SharingModeCustom {
|
||||
perms, err = driveItemPermissionInfo(ctx, service, driveID, ptr.Val(item.GetId()))
|
||||
perm, err := gip.GetItemPermission(ctx, driveID, ptr.Val(item.GetId()))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
meta.Permissions = perms
|
||||
meta.Permissions = metadata.FilterPermissions(ctx, perm.GetValue())
|
||||
}
|
||||
|
||||
metaJSON, err := json.Marshal(meta)
|
||||
@ -100,283 +119,25 @@ func baseItemMetaReader(
|
||||
return io.NopCloser(bytes.NewReader(metaJSON)), len(metaJSON), nil
|
||||
}
|
||||
|
||||
// oneDriveItemReader will return a io.ReadCloser for the specified item
|
||||
// It crafts this by querying M365 for a download URL for the item
|
||||
// and using a http client to initialize a reader
|
||||
func oneDriveItemReader(
|
||||
// driveItemWriter is used to initialize and return an io.Writer to upload data for the specified item
|
||||
// It does so by creating an upload session and using that URL to initialize an `itemWriter`
|
||||
// TODO: @vkamra verify if var session is the desired input
|
||||
func driveItemWriter(
|
||||
ctx context.Context,
|
||||
client graph.Requester,
|
||||
item models.DriveItemable,
|
||||
) (details.ItemInfo, io.ReadCloser, error) {
|
||||
var (
|
||||
rc io.ReadCloser
|
||||
isFile = item.GetFile() != nil
|
||||
)
|
||||
nicu NewItemContentUploader,
|
||||
driveID, itemID string,
|
||||
itemSize int64,
|
||||
) (io.Writer, string, error) {
|
||||
ctx = clues.Add(ctx, "upload_item_id", itemID)
|
||||
|
||||
if isFile {
|
||||
resp, err := downloadItem(ctx, client, item)
|
||||
if err != nil {
|
||||
return details.ItemInfo{}, nil, clues.Wrap(err, "onedrive reader")
|
||||
}
|
||||
|
||||
rc = resp.Body
|
||||
}
|
||||
|
||||
dii := details.ItemInfo{
|
||||
OneDrive: oneDriveItemInfo(item, ptr.Val(item.GetSize())),
|
||||
}
|
||||
|
||||
return dii, rc, nil
|
||||
}
|
||||
|
||||
func downloadItem(
|
||||
ctx context.Context,
|
||||
client graph.Requester,
|
||||
item models.DriveItemable,
|
||||
) (*http.Response, error) {
|
||||
var url string
|
||||
|
||||
for _, key := range downloadURLKeys {
|
||||
tmp, ok := item.GetAdditionalData()[key].(*string)
|
||||
if ok {
|
||||
url = ptr.Val(tmp)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(url) == 0 {
|
||||
return nil, clues.New("extracting file url").With("item_id", ptr.Val(item.GetId()))
|
||||
}
|
||||
|
||||
resp, err := client.Request(ctx, http.MethodGet, url, nil, nil)
|
||||
icu, err := nicu.NewItemContentUpload(ctx, driveID, itemID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, "", clues.Stack(err)
|
||||
}
|
||||
|
||||
if (resp.StatusCode / 100) == 2 {
|
||||
return resp, nil
|
||||
}
|
||||
iw := graph.NewLargeItemWriter(itemID, ptr.Val(icu.GetUploadUrl()), itemSize)
|
||||
|
||||
if graph.IsMalwareResp(ctx, resp) {
|
||||
return nil, clues.New("malware detected").Label(graph.LabelsMalware)
|
||||
}
|
||||
|
||||
// upstream error checks can compare the status with
|
||||
// clues.HasLabel(err, graph.LabelStatus(http.KnownStatusCode))
|
||||
cerr := clues.Wrap(clues.New(resp.Status), "non-2xx http response").
|
||||
Label(graph.LabelStatus(resp.StatusCode))
|
||||
|
||||
return resp, cerr
|
||||
}
|
||||
|
||||
// oneDriveItemInfo will populate a details.OneDriveInfo struct
|
||||
// with properties from the drive item. ItemSize is specified
|
||||
// separately for restore processes because the local itemable
|
||||
// doesn't have its size value updated as a side effect of creation,
|
||||
// and kiota drops any SetSize update.
|
||||
func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDriveInfo {
|
||||
var email, driveName, driveID string
|
||||
|
||||
if di.GetCreatedBy() != nil && di.GetCreatedBy().GetUser() != nil {
|
||||
// User is sometimes not available when created via some
|
||||
// external applications (like backup/restore solutions)
|
||||
ed, ok := di.GetCreatedBy().GetUser().GetAdditionalData()["email"]
|
||||
if ok {
|
||||
email = *ed.(*string)
|
||||
}
|
||||
}
|
||||
|
||||
if di.GetParentReference() != nil {
|
||||
driveID = ptr.Val(di.GetParentReference().GetDriveId())
|
||||
driveName = strings.TrimSpace(ptr.Val(di.GetParentReference().GetName()))
|
||||
}
|
||||
|
||||
return &details.OneDriveInfo{
|
||||
Created: ptr.Val(di.GetCreatedDateTime()),
|
||||
DriveID: driveID,
|
||||
DriveName: driveName,
|
||||
ItemName: ptr.Val(di.GetName()),
|
||||
ItemType: details.OneDriveItem,
|
||||
Modified: ptr.Val(di.GetLastModifiedDateTime()),
|
||||
Owner: email,
|
||||
Size: itemSize,
|
||||
}
|
||||
}
|
||||
|
||||
// driveItemPermissionInfo will fetch the permission information
|
||||
// for a drive item given a drive and item id.
|
||||
func driveItemPermissionInfo(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID string,
|
||||
itemID string,
|
||||
) ([]metadata.Permission, error) {
|
||||
perm, err := api.GetItemPermission(ctx, service, driveID, itemID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uperms := filterUserPermissions(ctx, perm.GetValue())
|
||||
|
||||
return uperms, nil
|
||||
}
|
||||
|
||||
func filterUserPermissions(ctx context.Context, perms []models.Permissionable) []metadata.Permission {
|
||||
up := []metadata.Permission{}
|
||||
|
||||
for _, p := range perms {
|
||||
if p.GetGrantedToV2() == nil {
|
||||
// For link shares, we get permissions without a user
|
||||
// specified
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
// Below are the mapping from roles to "Advanced" permissions
|
||||
// screen entries:
|
||||
//
|
||||
// owner - Full Control
|
||||
// write - Design | Edit | Contribute (no difference in /permissions api)
|
||||
// read - Read
|
||||
// empty - Restricted View
|
||||
//
|
||||
// helpful docs:
|
||||
// https://devblogs.microsoft.com/microsoft365dev/controlling-app-access-on-specific-sharepoint-site-collections/
|
||||
roles = p.GetRoles()
|
||||
gv2 = p.GetGrantedToV2()
|
||||
entityID string
|
||||
gv2t metadata.GV2Type
|
||||
)
|
||||
|
||||
switch true {
|
||||
case gv2.GetUser() != nil:
|
||||
gv2t = metadata.GV2User
|
||||
entityID = ptr.Val(gv2.GetUser().GetId())
|
||||
case gv2.GetSiteUser() != nil:
|
||||
gv2t = metadata.GV2SiteUser
|
||||
entityID = ptr.Val(gv2.GetSiteUser().GetId())
|
||||
case gv2.GetGroup() != nil:
|
||||
gv2t = metadata.GV2Group
|
||||
entityID = ptr.Val(gv2.GetGroup().GetId())
|
||||
case gv2.GetSiteGroup() != nil:
|
||||
gv2t = metadata.GV2SiteGroup
|
||||
entityID = ptr.Val(gv2.GetSiteGroup().GetId())
|
||||
case gv2.GetApplication() != nil:
|
||||
gv2t = metadata.GV2App
|
||||
entityID = ptr.Val(gv2.GetApplication().GetId())
|
||||
case gv2.GetDevice() != nil:
|
||||
gv2t = metadata.GV2Device
|
||||
entityID = ptr.Val(gv2.GetDevice().GetId())
|
||||
default:
|
||||
logger.Ctx(ctx).Info("untracked permission")
|
||||
}
|
||||
|
||||
// Technically GrantedToV2 can also contain devices, but the
|
||||
// documentation does not mention about devices in permissions
|
||||
if entityID == "" {
|
||||
// This should ideally not be hit
|
||||
continue
|
||||
}
|
||||
|
||||
up = append(up, metadata.Permission{
|
||||
ID: ptr.Val(p.GetId()),
|
||||
Roles: roles,
|
||||
EntityID: entityID,
|
||||
EntityType: gv2t,
|
||||
Expiration: p.GetExpirationDateTime(),
|
||||
})
|
||||
}
|
||||
|
||||
return up
|
||||
}
|
||||
|
||||
// sharePointItemInfo will populate a details.SharePointInfo struct
|
||||
// with properties from the drive item. ItemSize is specified
|
||||
// separately for restore processes because the local itemable
|
||||
// doesn't have its size value updated as a side effect of creation,
|
||||
// and kiota drops any SetSize update.
|
||||
// TODO: Update drive name during Issue #2071
|
||||
func sharePointItemInfo(di models.DriveItemable, itemSize int64) *details.SharePointInfo {
|
||||
var driveName, siteID, driveID, weburl, creatorEmail string
|
||||
|
||||
// TODO: we rely on this info for details/restore lookups,
|
||||
// so if it's nil we have an issue, and will need an alternative
|
||||
// way to source the data.
|
||||
if di.GetCreatedBy() != nil && di.GetCreatedBy().GetUser() != nil {
|
||||
// User is sometimes not available when created via some
|
||||
// external applications (like backup/restore solutions)
|
||||
additionalData := di.GetCreatedBy().GetUser().GetAdditionalData()
|
||||
ed, ok := additionalData["email"]
|
||||
|
||||
if !ok {
|
||||
ed = additionalData["displayName"]
|
||||
}
|
||||
|
||||
if ed != nil {
|
||||
creatorEmail = *ed.(*string)
|
||||
}
|
||||
}
|
||||
|
||||
gsi := di.GetSharepointIds()
|
||||
if gsi != nil {
|
||||
siteID = ptr.Val(gsi.GetSiteId())
|
||||
weburl = ptr.Val(gsi.GetSiteUrl())
|
||||
|
||||
if len(weburl) == 0 {
|
||||
weburl = constructWebURL(di.GetAdditionalData())
|
||||
}
|
||||
}
|
||||
|
||||
if di.GetParentReference() != nil {
|
||||
driveID = ptr.Val(di.GetParentReference().GetDriveId())
|
||||
driveName = strings.TrimSpace(ptr.Val(di.GetParentReference().GetName()))
|
||||
}
|
||||
|
||||
return &details.SharePointInfo{
|
||||
ItemType: details.SharePointLibrary,
|
||||
ItemName: ptr.Val(di.GetName()),
|
||||
Created: ptr.Val(di.GetCreatedDateTime()),
|
||||
Modified: ptr.Val(di.GetLastModifiedDateTime()),
|
||||
DriveID: driveID,
|
||||
DriveName: driveName,
|
||||
Size: itemSize,
|
||||
Owner: creatorEmail,
|
||||
WebURL: weburl,
|
||||
SiteID: siteID,
|
||||
}
|
||||
}
|
||||
|
||||
// constructWebURL helper function for recreating the webURL
|
||||
// for the originating SharePoint site. Uses additional data map
|
||||
// from a models.DriveItemable that possesses a downloadURL within the map.
|
||||
// Returns "" if map nil or key is not present.
|
||||
func constructWebURL(adtl map[string]any) string {
|
||||
var (
|
||||
desiredKey = "@microsoft.graph.downloadUrl"
|
||||
sep = `/_layouts`
|
||||
url string
|
||||
)
|
||||
|
||||
if adtl == nil {
|
||||
return url
|
||||
}
|
||||
|
||||
r := adtl[desiredKey]
|
||||
point, ok := r.(*string)
|
||||
|
||||
if !ok {
|
||||
return url
|
||||
}
|
||||
|
||||
value := ptr.Val(point)
|
||||
if len(value) == 0 {
|
||||
return url
|
||||
}
|
||||
|
||||
temp := strings.Split(value, sep)
|
||||
url = temp[0]
|
||||
|
||||
return url
|
||||
return iw, ptr.Val(icu.GetUploadUrl()), nil
|
||||
}
|
||||
|
||||
func setName(orig models.ItemReferenceable, driveName string) models.ItemReferenceable {
|
||||
|
||||
227
src/internal/connector/onedrive/item_handler.go
Normal file
227
src/internal/connector/onedrive/item_handler.go
Normal file
@ -0,0 +1,227 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// backup
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var _ BackupHandler = &itemBackupHandler{}
|
||||
|
||||
type itemBackupHandler struct {
|
||||
ac api.Drives
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) Get(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
headers map[string]string,
|
||||
) (*http.Response, error) {
|
||||
return h.ac.Get(ctx, url, headers)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) PathPrefix(
|
||||
tenantID, resourceOwner, driveID string,
|
||||
) (path.Path, error) {
|
||||
return path.Build(
|
||||
tenantID,
|
||||
resourceOwner,
|
||||
path.OneDriveService,
|
||||
path.FilesCategory,
|
||||
false,
|
||||
odConsts.DrivesPathDir,
|
||||
driveID,
|
||||
odConsts.RootPathDir)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) CanonicalPath(
|
||||
folders *path.Builder,
|
||||
tenantID, resourceOwner string,
|
||||
) (path.Path, error) {
|
||||
return folders.ToDataLayerOneDrivePath(tenantID, resourceOwner, false)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) ServiceCat() (path.ServiceType, path.CategoryType) {
|
||||
return path.OneDriveService, path.FilesCategory
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) NewDrivePager(
|
||||
resourceOwner string, fields []string,
|
||||
) api.DrivePager {
|
||||
return h.ac.NewUserDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) NewItemPager(
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) api.DriveItemEnumerator {
|
||||
return h.ac.NewItemPager(driveID, link, fields)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
size int64,
|
||||
parentPath *path.Builder,
|
||||
) details.ItemInfo {
|
||||
return augmentItemInfo(dii, item, size, parentPath)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) FormatDisplayPath(
|
||||
_ string, // drive name not displayed for onedrive
|
||||
pb *path.Builder,
|
||||
) string {
|
||||
return "/" + pb.String()
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) NewLocationIDer(
|
||||
driveID string,
|
||||
elems ...string,
|
||||
) details.LocationIDer {
|
||||
return details.NewOneDriveLocationIDer(driveID, elems...)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) GetItemPermission(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.PermissionCollectionResponseable, error) {
|
||||
return h.ac.GetItemPermission(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
func (h itemBackupHandler) GetItem(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetItem(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var _ RestoreHandler = &itemRestoreHandler{}
|
||||
|
||||
type itemRestoreHandler struct {
|
||||
ac api.Drives
|
||||
}
|
||||
|
||||
func NewRestoreHandler(ac api.Client) *itemRestoreHandler {
|
||||
return &itemRestoreHandler{ac.Drives()}
|
||||
}
|
||||
|
||||
// AugmentItemInfo will populate a details.OneDriveInfo struct
|
||||
// with properties from the drive item. ItemSize is specified
|
||||
// separately for restore processes because the local itemable
|
||||
// doesn't have its size value updated as a side effect of creation,
|
||||
// and kiota drops any SetSize update.
|
||||
func (h itemRestoreHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
size int64,
|
||||
parentPath *path.Builder,
|
||||
) details.ItemInfo {
|
||||
return augmentItemInfo(dii, item, size, parentPath)
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) NewItemContentUpload(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.UploadSessionable, error) {
|
||||
return h.ac.NewItemContentUpload(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) DeleteItemPermission(
|
||||
ctx context.Context,
|
||||
driveID, itemID, permissionID string,
|
||||
) error {
|
||||
return h.ac.DeleteItemPermission(ctx, driveID, itemID, permissionID)
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) PostItemPermissionUpdate(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
body *drives.ItemItemsItemInvitePostRequestBody,
|
||||
) (drives.ItemItemsItemInviteResponseable, error) {
|
||||
return h.ac.PostItemPermissionUpdate(ctx, driveID, itemID, body)
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) PostItemInContainer(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID string,
|
||||
newItem models.DriveItemable,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.PostItemInContainer(ctx, driveID, parentFolderID, newItem)
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) GetFolderByName(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID, folderName string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetFolderByName(ctx, driveID, parentFolderID, folderName)
|
||||
}
|
||||
|
||||
func (h itemRestoreHandler) GetRootFolder(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetRootFolder(ctx, driveID)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Common
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func augmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
size int64,
|
||||
parentPath *path.Builder,
|
||||
) details.ItemInfo {
|
||||
var email, driveName, driveID string
|
||||
|
||||
if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil {
|
||||
// User is sometimes not available when created via some
|
||||
// external applications (like backup/restore solutions)
|
||||
ed, ok := item.GetCreatedBy().GetUser().GetAdditionalData()["email"]
|
||||
if ok {
|
||||
email = *ed.(*string)
|
||||
}
|
||||
}
|
||||
|
||||
if item.GetParentReference() != nil {
|
||||
driveID = ptr.Val(item.GetParentReference().GetDriveId())
|
||||
driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName()))
|
||||
}
|
||||
|
||||
var pps string
|
||||
if parentPath != nil {
|
||||
pps = parentPath.String()
|
||||
}
|
||||
|
||||
dii.OneDrive = &details.OneDriveInfo{
|
||||
Created: ptr.Val(item.GetCreatedDateTime()),
|
||||
DriveID: driveID,
|
||||
DriveName: driveName,
|
||||
ItemName: ptr.Val(item.GetName()),
|
||||
ItemType: details.OneDriveItem,
|
||||
Modified: ptr.Val(item.GetLastModifiedDateTime()),
|
||||
Owner: email,
|
||||
ParentPath: pps,
|
||||
Size: size,
|
||||
}
|
||||
|
||||
return dii
|
||||
}
|
||||
58
src/internal/connector/onedrive/item_handler_test.go
Normal file
58
src/internal/connector/onedrive/item_handler_test.go
Normal file
@ -0,0 +1,58 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
type ItemBackupHandlerUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestItemBackupHandlerUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &ItemBackupHandlerUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ItemBackupHandlerUnitSuite) TestCanonicalPath() {
|
||||
tenantID, resourceOwner := "tenant", "resourceOwner"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
expect string
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "onedrive",
|
||||
expect: "tenant/onedrive/resourceOwner/files/prefix",
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
h := itemBackupHandler{}
|
||||
p := path.Builder{}.Append("prefix")
|
||||
|
||||
result, err := h.CanonicalPath(p, tenantID, resourceOwner)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
if result != nil {
|
||||
assert.Equal(t, test.expect, result.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ItemBackupHandlerUnitSuite) TestServiceCat() {
|
||||
t := suite.T()
|
||||
|
||||
s, c := itemBackupHandler{}.ServiceCat()
|
||||
assert.Equal(t, path.OneDriveService, s)
|
||||
assert.Equal(t, path.FilesCategory, c)
|
||||
}
|
||||
@ -4,18 +4,16 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/dttm"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -25,7 +23,7 @@ type ItemIntegrationSuite struct {
|
||||
tester.Suite
|
||||
user string
|
||||
userDriveID string
|
||||
service graph.Servicer
|
||||
service *oneDriveService
|
||||
}
|
||||
|
||||
func TestItemIntegrationSuite(t *testing.T) {
|
||||
@ -46,8 +44,7 @@ func (suite *ItemIntegrationSuite) SetupSuite() {
|
||||
suite.service = loadTestService(t)
|
||||
suite.user = tester.SecondaryM365UserID(t)
|
||||
|
||||
pager, err := PagerForSource(OneDriveSource, suite.service, suite.user, nil)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
pager := suite.service.ac.Drives().NewUserDrivePager(suite.user, nil)
|
||||
|
||||
odDrives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
@ -83,6 +80,10 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||
_ bool,
|
||||
_ *fault.Bus,
|
||||
) error {
|
||||
if driveItem != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if item.GetFile() != nil && ptr.Val(item.GetSize()) > 0 {
|
||||
driveItem = item
|
||||
@ -92,12 +93,14 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
ip := suite.service.ac.
|
||||
Drives().
|
||||
NewItemPager(suite.userDriveID, "", api.DriveItemSelectDefault())
|
||||
|
||||
_, _, _, err := collectItems(
|
||||
ctx,
|
||||
defaultItemPager(
|
||||
suite.service,
|
||||
suite.userDriveID,
|
||||
""),
|
||||
ip,
|
||||
suite.userDriveID,
|
||||
"General",
|
||||
itemCollector,
|
||||
@ -114,19 +117,15 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
||||
suite.user,
|
||||
suite.userDriveID)
|
||||
|
||||
// Read data for the file
|
||||
itemInfo, itemData, err := oneDriveItemReader(ctx, graph.NewNoTimeoutHTTPWrapper(), driveItem)
|
||||
bh := itemBackupHandler{suite.service.ac.Drives()}
|
||||
|
||||
// Read data for the file
|
||||
itemData, err := downloadItem(ctx, bh, driveItem)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, itemInfo.OneDrive)
|
||||
require.NotEmpty(t, itemInfo.OneDrive.ItemName)
|
||||
|
||||
size, err := io.Copy(io.Discard, itemData)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotZero(t, size)
|
||||
require.Equal(t, size, itemInfo.OneDrive.Size)
|
||||
|
||||
t.Logf("Read %d bytes from file %s.", size, itemInfo.OneDrive.ItemName)
|
||||
}
|
||||
|
||||
// TestItemWriter is an integration test for uploading data to OneDrive
|
||||
@ -148,21 +147,19 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
rh := NewRestoreHandler(suite.service.ac)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
srv := suite.service
|
||||
|
||||
root, err := api.GetDriveRoot(ctx, srv, test.driveID)
|
||||
root, err := suite.service.ac.Drives().GetRootFolder(ctx, test.driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
newFolderName := tester.DefaultTestRestoreDestination("folder").ContainerName
|
||||
t.Logf("creating folder %s", newFolderName)
|
||||
|
||||
newFolder, err := CreateItem(
|
||||
newFolder, err := rh.PostItemInContainer(
|
||||
ctx,
|
||||
srv,
|
||||
test.driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
newItem(newFolderName, true))
|
||||
@ -172,9 +169,8 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
newItemName := "testItem_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
t.Logf("creating item %s", newItemName)
|
||||
|
||||
newItem, err := CreateItem(
|
||||
newItem, err := rh.PostItemInContainer(
|
||||
ctx,
|
||||
srv,
|
||||
test.driveID,
|
||||
ptr.Val(newFolder.GetId()),
|
||||
newItem(newItemName, false))
|
||||
@ -183,19 +179,24 @@ func (suite *ItemIntegrationSuite) TestItemWriter() {
|
||||
|
||||
// HACK: Leveraging this to test getFolder behavior for a file. `getFolder()` on the
|
||||
// newly created item should fail because it's a file not a folder
|
||||
_, err = api.GetFolderByName(ctx, srv, test.driveID, ptr.Val(newFolder.GetId()), newItemName)
|
||||
_, err = suite.service.ac.Drives().GetFolderByName(
|
||||
ctx,
|
||||
test.driveID,
|
||||
ptr.Val(newFolder.GetId()),
|
||||
newItemName)
|
||||
require.ErrorIs(t, err, api.ErrFolderNotFound, clues.ToCore(err))
|
||||
|
||||
// Initialize a 100KB mockDataProvider
|
||||
td, writeSize := mockDataReader(int64(100 * 1024))
|
||||
|
||||
itemID := ptr.Val(newItem.GetId())
|
||||
|
||||
r, err := api.PostDriveItem(ctx, srv, test.driveID, itemID)
|
||||
w, _, err := driveItemWriter(
|
||||
ctx,
|
||||
rh,
|
||||
test.driveID,
|
||||
ptr.Val(newItem.GetId()),
|
||||
writeSize)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
w := graph.NewLargeItemWriter(itemID, ptr.Val(r.GetUploadUrl()), writeSize)
|
||||
|
||||
// Using a 32 KB buffer for the copy allows us to validate the
|
||||
// multi-part upload. `io.CopyBuffer` will only write 32 KB at
|
||||
// a time
|
||||
@ -235,72 +236,40 @@ func (suite *ItemIntegrationSuite) TestDriveGetFolder() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
srv := suite.service
|
||||
|
||||
root, err := api.GetDriveRoot(ctx, srv, test.driveID)
|
||||
root, err := suite.service.ac.Drives().GetRootFolder(ctx, test.driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// Lookup a folder that doesn't exist
|
||||
_, err = api.GetFolderByName(ctx, srv, test.driveID, ptr.Val(root.GetId()), "FolderDoesNotExist")
|
||||
_, err = suite.service.ac.Drives().GetFolderByName(
|
||||
ctx,
|
||||
test.driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
"FolderDoesNotExist")
|
||||
require.ErrorIs(t, err, api.ErrFolderNotFound, clues.ToCore(err))
|
||||
|
||||
// Lookup a folder that does exist
|
||||
_, err = api.GetFolderByName(ctx, srv, test.driveID, ptr.Val(root.GetId()), "")
|
||||
_, err = suite.service.ac.Drives().GetFolderByName(
|
||||
ctx,
|
||||
test.driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
"")
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getPermsAndResourceOwnerPerms(
|
||||
permID, resourceOwner string,
|
||||
gv2t metadata.GV2Type,
|
||||
scopes []string,
|
||||
) (models.Permissionable, metadata.Permission) {
|
||||
sharepointIdentitySet := models.NewSharePointIdentitySet()
|
||||
// Unit tests
|
||||
|
||||
switch gv2t {
|
||||
case metadata.GV2App, metadata.GV2Device, metadata.GV2Group, metadata.GV2User:
|
||||
identity := models.NewIdentity()
|
||||
identity.SetId(&resourceOwner)
|
||||
identity.SetAdditionalData(map[string]any{"email": &resourceOwner})
|
||||
type mockGetter struct {
|
||||
GetFunc func(ctx context.Context, url string) (*http.Response, error)
|
||||
}
|
||||
|
||||
switch gv2t {
|
||||
case metadata.GV2User:
|
||||
sharepointIdentitySet.SetUser(identity)
|
||||
case metadata.GV2Group:
|
||||
sharepointIdentitySet.SetGroup(identity)
|
||||
case metadata.GV2App:
|
||||
sharepointIdentitySet.SetApplication(identity)
|
||||
case metadata.GV2Device:
|
||||
sharepointIdentitySet.SetDevice(identity)
|
||||
}
|
||||
|
||||
case metadata.GV2SiteUser, metadata.GV2SiteGroup:
|
||||
spIdentity := models.NewSharePointIdentity()
|
||||
spIdentity.SetId(&resourceOwner)
|
||||
spIdentity.SetAdditionalData(map[string]any{"email": &resourceOwner})
|
||||
|
||||
switch gv2t {
|
||||
case metadata.GV2SiteUser:
|
||||
sharepointIdentitySet.SetSiteUser(spIdentity)
|
||||
case metadata.GV2SiteGroup:
|
||||
sharepointIdentitySet.SetSiteGroup(spIdentity)
|
||||
}
|
||||
}
|
||||
|
||||
perm := models.NewPermission()
|
||||
perm.SetId(&permID)
|
||||
perm.SetRoles([]string{"read"})
|
||||
perm.SetGrantedToV2(sharepointIdentitySet)
|
||||
|
||||
ownersPerm := metadata.Permission{
|
||||
ID: permID,
|
||||
Roles: []string{"read"},
|
||||
EntityID: resourceOwner,
|
||||
EntityType: gv2t,
|
||||
}
|
||||
|
||||
return perm, ownersPerm
|
||||
func (m mockGetter) Get(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
headers map[string]string,
|
||||
) (*http.Response, error) {
|
||||
return m.GetFunc(ctx, url)
|
||||
}
|
||||
|
||||
type ItemUnitTestSuite struct {
|
||||
@ -311,134 +280,153 @@ func TestItemUnitTestSuite(t *testing.T) {
|
||||
suite.Run(t, &ItemUnitTestSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *ItemUnitTestSuite) TestDrivePermissionsFilter() {
|
||||
var (
|
||||
pID = "fakePermId"
|
||||
uID = "fakeuser@provider.com"
|
||||
uID2 = "fakeuser2@provider.com"
|
||||
own = []string{"owner"}
|
||||
r = []string{"read"}
|
||||
rw = []string{"read", "write"}
|
||||
)
|
||||
func (suite *ItemUnitTestSuite) TestDownloadItem() {
|
||||
testRc := io.NopCloser(bytes.NewReader([]byte("test")))
|
||||
url := "https://example.com"
|
||||
|
||||
userOwnerPerm, userOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2User, own)
|
||||
userReadPerm, userReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2User, r)
|
||||
userReadWritePerm, userReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2User, rw)
|
||||
siteUserOwnerPerm, siteUserOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteUser, own)
|
||||
siteUserReadPerm, siteUserReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteUser, r)
|
||||
siteUserReadWritePerm, siteUserReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2SiteUser, rw)
|
||||
|
||||
groupReadPerm, groupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2Group, r)
|
||||
groupReadWritePerm, groupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2Group, rw)
|
||||
siteGroupReadPerm, siteGroupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, metadata.GV2SiteGroup, r)
|
||||
siteGroupReadWritePerm, siteGroupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, metadata.GV2SiteGroup, rw)
|
||||
|
||||
noPerm, _ := getPermsAndResourceOwnerPerms(pID, uID, "user", []string{"read"})
|
||||
noPerm.SetGrantedToV2(nil) // eg: link shares
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
graphPermissions []models.Permissionable
|
||||
parsedPermissions []metadata.Permission
|
||||
table := []struct {
|
||||
name string
|
||||
itemFunc func() models.DriveItemable
|
||||
GetFunc func(ctx context.Context, url string) (*http.Response, error)
|
||||
errorExpected require.ErrorAssertionFunc
|
||||
rcExpected require.ValueAssertionFunc
|
||||
label string
|
||||
}{
|
||||
{
|
||||
name: "no perms",
|
||||
graphPermissions: []models.Permissionable{},
|
||||
parsedPermissions: []metadata.Permission{},
|
||||
name: "nil item",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
return nil
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
return nil, nil
|
||||
},
|
||||
errorExpected: require.Error,
|
||||
rcExpected: require.Nil,
|
||||
},
|
||||
{
|
||||
name: "no user bound to perms",
|
||||
graphPermissions: []models.Permissionable{noPerm},
|
||||
parsedPermissions: []metadata.Permission{},
|
||||
},
|
||||
name: "success",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]interface{}{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
|
||||
// user
|
||||
{
|
||||
name: "user with read permissions",
|
||||
graphPermissions: []models.Permissionable{userReadPerm},
|
||||
parsedPermissions: []metadata.Permission{userReadROperm},
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: testRc,
|
||||
}, nil
|
||||
},
|
||||
errorExpected: require.NoError,
|
||||
rcExpected: require.NotNil,
|
||||
},
|
||||
{
|
||||
name: "user with owner permissions",
|
||||
graphPermissions: []models.Permissionable{userOwnerPerm},
|
||||
parsedPermissions: []metadata.Permission{userOwnerROperm},
|
||||
},
|
||||
{
|
||||
name: "user with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{userReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{userReadWriteROperm},
|
||||
},
|
||||
{
|
||||
name: "multiple users with separate permissions",
|
||||
graphPermissions: []models.Permissionable{userReadPerm, userReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{userReadROperm, userReadWriteROperm},
|
||||
},
|
||||
name: "success, content url set instead of download url",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]interface{}{
|
||||
"@content.downloadUrl": url,
|
||||
})
|
||||
|
||||
// site-user
|
||||
{
|
||||
name: "site user with read permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserReadPerm},
|
||||
parsedPermissions: []metadata.Permission{siteUserReadROperm},
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: testRc,
|
||||
}, nil
|
||||
},
|
||||
errorExpected: require.NoError,
|
||||
rcExpected: require.NotNil,
|
||||
},
|
||||
{
|
||||
name: "site user with owner permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserOwnerPerm},
|
||||
parsedPermissions: []metadata.Permission{siteUserOwnerROperm},
|
||||
},
|
||||
{
|
||||
name: "site user with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{siteUserReadWriteROperm},
|
||||
},
|
||||
{
|
||||
name: "multiple site users with separate permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserReadPerm, siteUserReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{siteUserReadROperm, siteUserReadWriteROperm},
|
||||
},
|
||||
name: "api getter returns error",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]interface{}{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
|
||||
// group
|
||||
{
|
||||
name: "group with read permissions",
|
||||
graphPermissions: []models.Permissionable{groupReadPerm},
|
||||
parsedPermissions: []metadata.Permission{groupReadROperm},
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
return nil, clues.New("test error")
|
||||
},
|
||||
errorExpected: require.Error,
|
||||
rcExpected: require.Nil,
|
||||
},
|
||||
{
|
||||
name: "group with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{groupReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{groupReadWriteROperm},
|
||||
name: "download url is empty",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: testRc,
|
||||
}, nil
|
||||
},
|
||||
errorExpected: require.Error,
|
||||
rcExpected: require.Nil,
|
||||
},
|
||||
{
|
||||
name: "multiple groups with separate permissions",
|
||||
graphPermissions: []models.Permissionable{groupReadPerm, groupReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{groupReadROperm, groupReadWriteROperm},
|
||||
},
|
||||
name: "malware",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]interface{}{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
|
||||
// site-group
|
||||
{
|
||||
name: "site group with read permissions",
|
||||
graphPermissions: []models.Permissionable{siteGroupReadPerm},
|
||||
parsedPermissions: []metadata.Permission{siteGroupReadROperm},
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
Header: http.Header{
|
||||
"X-Virus-Infected": []string{"true"},
|
||||
},
|
||||
StatusCode: http.StatusOK,
|
||||
Body: testRc,
|
||||
}, nil
|
||||
},
|
||||
errorExpected: require.Error,
|
||||
rcExpected: require.Nil,
|
||||
},
|
||||
{
|
||||
name: "site group with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{siteGroupReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{siteGroupReadWriteROperm},
|
||||
},
|
||||
{
|
||||
name: "multiple site groups with separate permissions",
|
||||
graphPermissions: []models.Permissionable{siteGroupReadPerm, siteGroupReadWritePerm},
|
||||
parsedPermissions: []metadata.Permission{siteGroupReadROperm, siteGroupReadWriteROperm},
|
||||
name: "non-2xx http response",
|
||||
itemFunc: func() models.DriveItemable {
|
||||
di := newItem("test", false)
|
||||
di.SetAdditionalData(map[string]interface{}{
|
||||
"@microsoft.graph.downloadUrl": url,
|
||||
})
|
||||
|
||||
return di
|
||||
},
|
||||
GetFunc: func(ctx context.Context, url string) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Body: nil,
|
||||
}, nil
|
||||
},
|
||||
errorExpected: require.Error,
|
||||
rcExpected: require.Nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
suite.Run(tc.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
actual := filterUserPermissions(ctx, tc.graphPermissions)
|
||||
assert.ElementsMatch(t, tc.parsedPermissions, actual)
|
||||
mg := mockGetter{
|
||||
GetFunc: test.GetFunc,
|
||||
}
|
||||
rc, err := downloadItem(ctx, mg, test.itemFunc())
|
||||
test.errorExpected(t, err, clues.ToCore(err))
|
||||
test.rcExpected(t, rc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,9 +1,14 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
)
|
||||
|
||||
type SharingMode int
|
||||
@ -100,3 +105,72 @@ func DiffPermissions(before, after []Permission) ([]Permission, []Permission) {
|
||||
|
||||
return added, removed
|
||||
}
|
||||
|
||||
func FilterPermissions(ctx context.Context, perms []models.Permissionable) []Permission {
|
||||
up := []Permission{}
|
||||
|
||||
for _, p := range perms {
|
||||
if p.GetGrantedToV2() == nil {
|
||||
// For link shares, we get permissions without a user
|
||||
// specified
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
// Below are the mapping from roles to "Advanced" permissions
|
||||
// screen entries:
|
||||
//
|
||||
// owner - Full Control
|
||||
// write - Design | Edit | Contribute (no difference in /permissions api)
|
||||
// read - Read
|
||||
// empty - Restricted View
|
||||
//
|
||||
// helpful docs:
|
||||
// https://devblogs.microsoft.com/microsoft365dev/controlling-app-access-on-specific-sharepoint-site-collections/
|
||||
roles = p.GetRoles()
|
||||
gv2 = p.GetGrantedToV2()
|
||||
entityID string
|
||||
gv2t GV2Type
|
||||
)
|
||||
|
||||
switch true {
|
||||
case gv2.GetUser() != nil:
|
||||
gv2t = GV2User
|
||||
entityID = ptr.Val(gv2.GetUser().GetId())
|
||||
case gv2.GetSiteUser() != nil:
|
||||
gv2t = GV2SiteUser
|
||||
entityID = ptr.Val(gv2.GetSiteUser().GetId())
|
||||
case gv2.GetGroup() != nil:
|
||||
gv2t = GV2Group
|
||||
entityID = ptr.Val(gv2.GetGroup().GetId())
|
||||
case gv2.GetSiteGroup() != nil:
|
||||
gv2t = GV2SiteGroup
|
||||
entityID = ptr.Val(gv2.GetSiteGroup().GetId())
|
||||
case gv2.GetApplication() != nil:
|
||||
gv2t = GV2App
|
||||
entityID = ptr.Val(gv2.GetApplication().GetId())
|
||||
case gv2.GetDevice() != nil:
|
||||
gv2t = GV2Device
|
||||
entityID = ptr.Val(gv2.GetDevice().GetId())
|
||||
default:
|
||||
logger.Ctx(ctx).Info("untracked permission")
|
||||
}
|
||||
|
||||
// Technically GrantedToV2 can also contain devices, but the
|
||||
// documentation does not mention about devices in permissions
|
||||
if entityID == "" {
|
||||
// This should ideally not be hit
|
||||
continue
|
||||
}
|
||||
|
||||
up = append(up, Permission{
|
||||
ID: ptr.Val(p.GetId()),
|
||||
Roles: roles,
|
||||
EntityID: entityID,
|
||||
EntityType: gv2t,
|
||||
Expiration: p.GetExpirationDateTime(),
|
||||
})
|
||||
}
|
||||
|
||||
return up
|
||||
}
|
||||
|
||||
@ -3,6 +3,7 @@ package metadata
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
@ -147,3 +148,187 @@ func (suite *PermissionsUnitTestSuite) TestDiffPermissions() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getPermsAndResourceOwnerPerms(
|
||||
permID, resourceOwner string,
|
||||
gv2t GV2Type,
|
||||
scopes []string,
|
||||
) (models.Permissionable, Permission) {
|
||||
sharepointIdentitySet := models.NewSharePointIdentitySet()
|
||||
|
||||
switch gv2t {
|
||||
case GV2App, GV2Device, GV2Group, GV2User:
|
||||
identity := models.NewIdentity()
|
||||
identity.SetId(&resourceOwner)
|
||||
identity.SetAdditionalData(map[string]any{"email": &resourceOwner})
|
||||
|
||||
switch gv2t {
|
||||
case GV2User:
|
||||
sharepointIdentitySet.SetUser(identity)
|
||||
case GV2Group:
|
||||
sharepointIdentitySet.SetGroup(identity)
|
||||
case GV2App:
|
||||
sharepointIdentitySet.SetApplication(identity)
|
||||
case GV2Device:
|
||||
sharepointIdentitySet.SetDevice(identity)
|
||||
}
|
||||
|
||||
case GV2SiteUser, GV2SiteGroup:
|
||||
spIdentity := models.NewSharePointIdentity()
|
||||
spIdentity.SetId(&resourceOwner)
|
||||
spIdentity.SetAdditionalData(map[string]any{"email": &resourceOwner})
|
||||
|
||||
switch gv2t {
|
||||
case GV2SiteUser:
|
||||
sharepointIdentitySet.SetSiteUser(spIdentity)
|
||||
case GV2SiteGroup:
|
||||
sharepointIdentitySet.SetSiteGroup(spIdentity)
|
||||
}
|
||||
}
|
||||
|
||||
perm := models.NewPermission()
|
||||
perm.SetId(&permID)
|
||||
perm.SetRoles([]string{"read"})
|
||||
perm.SetGrantedToV2(sharepointIdentitySet)
|
||||
|
||||
ownersPerm := Permission{
|
||||
ID: permID,
|
||||
Roles: []string{"read"},
|
||||
EntityID: resourceOwner,
|
||||
EntityType: gv2t,
|
||||
}
|
||||
|
||||
return perm, ownersPerm
|
||||
}
|
||||
|
||||
func (suite *PermissionsUnitTestSuite) TestDrivePermissionsFilter() {
|
||||
var (
|
||||
pID = "fakePermId"
|
||||
uID = "fakeuser@provider.com"
|
||||
uID2 = "fakeuser2@provider.com"
|
||||
own = []string{"owner"}
|
||||
r = []string{"read"}
|
||||
rw = []string{"read", "write"}
|
||||
)
|
||||
|
||||
userOwnerPerm, userOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, GV2User, own)
|
||||
userReadPerm, userReadROperm := getPermsAndResourceOwnerPerms(pID, uID, GV2User, r)
|
||||
userReadWritePerm, userReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, GV2User, rw)
|
||||
siteUserOwnerPerm, siteUserOwnerROperm := getPermsAndResourceOwnerPerms(pID, uID, GV2SiteUser, own)
|
||||
siteUserReadPerm, siteUserReadROperm := getPermsAndResourceOwnerPerms(pID, uID, GV2SiteUser, r)
|
||||
siteUserReadWritePerm, siteUserReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, GV2SiteUser, rw)
|
||||
|
||||
groupReadPerm, groupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, GV2Group, r)
|
||||
groupReadWritePerm, groupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, GV2Group, rw)
|
||||
siteGroupReadPerm, siteGroupReadROperm := getPermsAndResourceOwnerPerms(pID, uID, GV2SiteGroup, r)
|
||||
siteGroupReadWritePerm, siteGroupReadWriteROperm := getPermsAndResourceOwnerPerms(pID, uID2, GV2SiteGroup, rw)
|
||||
|
||||
noPerm, _ := getPermsAndResourceOwnerPerms(pID, uID, "user", []string{"read"})
|
||||
noPerm.SetGrantedToV2(nil) // eg: link shares
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
graphPermissions []models.Permissionable
|
||||
parsedPermissions []Permission
|
||||
}{
|
||||
{
|
||||
name: "no perms",
|
||||
graphPermissions: []models.Permissionable{},
|
||||
parsedPermissions: []Permission{},
|
||||
},
|
||||
{
|
||||
name: "no user bound to perms",
|
||||
graphPermissions: []models.Permissionable{noPerm},
|
||||
parsedPermissions: []Permission{},
|
||||
},
|
||||
|
||||
// user
|
||||
{
|
||||
name: "user with read permissions",
|
||||
graphPermissions: []models.Permissionable{userReadPerm},
|
||||
parsedPermissions: []Permission{userReadROperm},
|
||||
},
|
||||
{
|
||||
name: "user with owner permissions",
|
||||
graphPermissions: []models.Permissionable{userOwnerPerm},
|
||||
parsedPermissions: []Permission{userOwnerROperm},
|
||||
},
|
||||
{
|
||||
name: "user with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{userReadWritePerm},
|
||||
parsedPermissions: []Permission{userReadWriteROperm},
|
||||
},
|
||||
{
|
||||
name: "multiple users with separate permissions",
|
||||
graphPermissions: []models.Permissionable{userReadPerm, userReadWritePerm},
|
||||
parsedPermissions: []Permission{userReadROperm, userReadWriteROperm},
|
||||
},
|
||||
|
||||
// site-user
|
||||
{
|
||||
name: "site user with read permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserReadPerm},
|
||||
parsedPermissions: []Permission{siteUserReadROperm},
|
||||
},
|
||||
{
|
||||
name: "site user with owner permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserOwnerPerm},
|
||||
parsedPermissions: []Permission{siteUserOwnerROperm},
|
||||
},
|
||||
{
|
||||
name: "site user with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserReadWritePerm},
|
||||
parsedPermissions: []Permission{siteUserReadWriteROperm},
|
||||
},
|
||||
{
|
||||
name: "multiple site users with separate permissions",
|
||||
graphPermissions: []models.Permissionable{siteUserReadPerm, siteUserReadWritePerm},
|
||||
parsedPermissions: []Permission{siteUserReadROperm, siteUserReadWriteROperm},
|
||||
},
|
||||
|
||||
// group
|
||||
{
|
||||
name: "group with read permissions",
|
||||
graphPermissions: []models.Permissionable{groupReadPerm},
|
||||
parsedPermissions: []Permission{groupReadROperm},
|
||||
},
|
||||
{
|
||||
name: "group with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{groupReadWritePerm},
|
||||
parsedPermissions: []Permission{groupReadWriteROperm},
|
||||
},
|
||||
{
|
||||
name: "multiple groups with separate permissions",
|
||||
graphPermissions: []models.Permissionable{groupReadPerm, groupReadWritePerm},
|
||||
parsedPermissions: []Permission{groupReadROperm, groupReadWriteROperm},
|
||||
},
|
||||
|
||||
// site-group
|
||||
{
|
||||
name: "site group with read permissions",
|
||||
graphPermissions: []models.Permissionable{siteGroupReadPerm},
|
||||
parsedPermissions: []Permission{siteGroupReadROperm},
|
||||
},
|
||||
{
|
||||
name: "site group with read and write permissions",
|
||||
graphPermissions: []models.Permissionable{siteGroupReadWritePerm},
|
||||
parsedPermissions: []Permission{siteGroupReadWriteROperm},
|
||||
},
|
||||
{
|
||||
name: "multiple site groups with separate permissions",
|
||||
graphPermissions: []models.Permissionable{siteGroupReadPerm, siteGroupReadWritePerm},
|
||||
parsedPermissions: []Permission{siteGroupReadROperm, siteGroupReadWriteROperm},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
suite.Run(tc.name, func() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
actual := FilterPermissions(ctx, tc.graphPermissions)
|
||||
assert.ElementsMatch(t, tc.parsedPermissions, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
54
src/internal/connector/onedrive/metadata/testdata/permissions.go
vendored
Normal file
54
src/internal/connector/onedrive/metadata/testdata/permissions.go
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package testdata
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
)
|
||||
|
||||
func AssertMetadataEqual(t *testing.T, expect, got metadata.Metadata) {
|
||||
assert.Equal(t, expect.FileName, got.FileName, "fileName")
|
||||
assert.Equal(t, expect.SharingMode, got.SharingMode, "sharingMode")
|
||||
assert.Equal(t, len(expect.Permissions), len(got.Permissions), "permissions count")
|
||||
|
||||
for i, ep := range expect.Permissions {
|
||||
gp := got.Permissions[i]
|
||||
|
||||
assert.Equal(t, ep.EntityType, gp.EntityType, "permission %d entityType", i)
|
||||
assert.Equal(t, ep.EntityID, gp.EntityID, "permission %d entityID", i)
|
||||
assert.Equal(t, ep.ID, gp.ID, "permission %d ID", i)
|
||||
assert.ElementsMatch(t, ep.Roles, gp.Roles, "permission %d roles", i)
|
||||
}
|
||||
}
|
||||
|
||||
func NewStubPermissionResponse(
|
||||
gv2 metadata.GV2Type,
|
||||
permID, entityID string,
|
||||
roles []string,
|
||||
) models.PermissionCollectionResponseable {
|
||||
var (
|
||||
p = models.NewPermission()
|
||||
pcr = models.NewPermissionCollectionResponse()
|
||||
spis = models.NewSharePointIdentitySet()
|
||||
)
|
||||
|
||||
switch gv2 {
|
||||
case metadata.GV2User:
|
||||
i := models.NewIdentity()
|
||||
i.SetId(&entityID)
|
||||
i.SetDisplayName(&entityID)
|
||||
|
||||
spis.SetUser(i)
|
||||
}
|
||||
|
||||
p.SetGrantedToV2(spis)
|
||||
p.SetId(&permID)
|
||||
p.SetRoles(roles)
|
||||
|
||||
pcr.SetValue([]models.Permissionable{p})
|
||||
|
||||
return pcr
|
||||
}
|
||||
217
src/internal/connector/onedrive/mock/handlers.go
Normal file
217
src/internal/connector/onedrive/mock/handlers.go
Normal file
@ -0,0 +1,217 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Backup Handler
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type BackupHandler struct {
|
||||
ItemInfo details.ItemInfo
|
||||
|
||||
GI GetsItem
|
||||
GIP GetsItemPermission
|
||||
|
||||
PathPrefixFn pathPrefixer
|
||||
PathPrefixErr error
|
||||
|
||||
CanonPathFn canonPather
|
||||
CanonPathErr error
|
||||
|
||||
Service path.ServiceType
|
||||
Category path.CategoryType
|
||||
|
||||
DrivePagerV api.DrivePager
|
||||
// driveID -> itemPager
|
||||
ItemPagerV map[string]api.DriveItemEnumerator
|
||||
|
||||
LocationIDFn locationIDer
|
||||
|
||||
getCall int
|
||||
GetResps []*http.Response
|
||||
GetErrs []error
|
||||
}
|
||||
|
||||
func DefaultOneDriveBH() *BackupHandler {
|
||||
return &BackupHandler{
|
||||
ItemInfo: details.ItemInfo{OneDrive: &details.OneDriveInfo{}},
|
||||
GI: GetsItem{Err: clues.New("not defined")},
|
||||
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
||||
PathPrefixFn: defaultOneDrivePathPrefixer,
|
||||
CanonPathFn: defaultOneDriveCanonPather,
|
||||
Service: path.OneDriveService,
|
||||
Category: path.FilesCategory,
|
||||
LocationIDFn: defaultOneDriveLocationIDer,
|
||||
GetResps: []*http.Response{nil},
|
||||
GetErrs: []error{clues.New("not defined")},
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultSharePointBH() *BackupHandler {
|
||||
return &BackupHandler{
|
||||
ItemInfo: details.ItemInfo{SharePoint: &details.SharePointInfo{}},
|
||||
GI: GetsItem{Err: clues.New("not defined")},
|
||||
GIP: GetsItemPermission{Err: clues.New("not defined")},
|
||||
PathPrefixFn: defaultSharePointPathPrefixer,
|
||||
CanonPathFn: defaultSharePointCanonPather,
|
||||
Service: path.SharePointService,
|
||||
Category: path.LibrariesCategory,
|
||||
LocationIDFn: defaultSharePointLocationIDer,
|
||||
GetResps: []*http.Response{nil},
|
||||
GetErrs: []error{clues.New("not defined")},
|
||||
}
|
||||
}
|
||||
|
||||
func (h BackupHandler) PathPrefix(tID, ro, driveID string) (path.Path, error) {
|
||||
pp, err := h.PathPrefixFn(tID, ro, driveID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pp, h.PathPrefixErr
|
||||
}
|
||||
|
||||
func (h BackupHandler) CanonicalPath(pb *path.Builder, tID, ro string) (path.Path, error) {
|
||||
cp, err := h.CanonPathFn(pb, tID, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cp, h.CanonPathErr
|
||||
}
|
||||
|
||||
func (h BackupHandler) ServiceCat() (path.ServiceType, path.CategoryType) {
|
||||
return h.Service, h.Category
|
||||
}
|
||||
|
||||
func (h BackupHandler) NewDrivePager(string, []string) api.DrivePager {
|
||||
return h.DrivePagerV
|
||||
}
|
||||
|
||||
func (h BackupHandler) NewItemPager(driveID string, _ string, _ []string) api.DriveItemEnumerator {
|
||||
return h.ItemPagerV[driveID]
|
||||
}
|
||||
|
||||
func (h BackupHandler) FormatDisplayPath(_ string, pb *path.Builder) string {
|
||||
return "/" + pb.String()
|
||||
}
|
||||
|
||||
func (h BackupHandler) NewLocationIDer(driveID string, elems ...string) details.LocationIDer {
|
||||
return h.LocationIDFn(driveID, elems...)
|
||||
}
|
||||
|
||||
func (h BackupHandler) AugmentItemInfo(details.ItemInfo, models.DriveItemable, int64, *path.Builder) details.ItemInfo {
|
||||
return h.ItemInfo
|
||||
}
|
||||
|
||||
func (h *BackupHandler) Get(context.Context, string, map[string]string) (*http.Response, error) {
|
||||
c := h.getCall
|
||||
h.getCall++
|
||||
|
||||
// allows mockers to only populate the errors slice
|
||||
if h.GetErrs[c] != nil {
|
||||
return nil, h.GetErrs[c]
|
||||
}
|
||||
|
||||
return h.GetResps[c], h.GetErrs[c]
|
||||
}
|
||||
|
||||
func (h BackupHandler) GetItem(ctx context.Context, _, _ string) (models.DriveItemable, error) {
|
||||
return h.GI.GetItem(ctx, "", "")
|
||||
}
|
||||
|
||||
func (h BackupHandler) GetItemPermission(
|
||||
ctx context.Context,
|
||||
_, _ string,
|
||||
) (models.PermissionCollectionResponseable, error) {
|
||||
return h.GIP.GetItemPermission(ctx, "", "")
|
||||
}
|
||||
|
||||
type canonPather func(*path.Builder, string, string) (path.Path, error)
|
||||
|
||||
var defaultOneDriveCanonPather = func(pb *path.Builder, tID, ro string) (path.Path, error) {
|
||||
return pb.ToDataLayerOneDrivePath(tID, ro, false)
|
||||
}
|
||||
|
||||
var defaultSharePointCanonPather = func(pb *path.Builder, tID, ro string) (path.Path, error) {
|
||||
return pb.ToDataLayerSharePointPath(tID, ro, path.LibrariesCategory, false)
|
||||
}
|
||||
|
||||
type pathPrefixer func(tID, ro, driveID string) (path.Path, error)
|
||||
|
||||
var defaultOneDrivePathPrefixer = func(tID, ro, driveID string) (path.Path, error) {
|
||||
return path.Build(
|
||||
tID,
|
||||
ro,
|
||||
path.OneDriveService,
|
||||
path.FilesCategory,
|
||||
false,
|
||||
odConsts.DrivesPathDir,
|
||||
driveID,
|
||||
odConsts.RootPathDir)
|
||||
}
|
||||
|
||||
var defaultSharePointPathPrefixer = func(tID, ro, driveID string) (path.Path, error) {
|
||||
return path.Build(
|
||||
tID,
|
||||
ro,
|
||||
path.SharePointService,
|
||||
path.LibrariesCategory,
|
||||
false,
|
||||
odConsts.DrivesPathDir,
|
||||
driveID,
|
||||
odConsts.RootPathDir)
|
||||
}
|
||||
|
||||
type locationIDer func(string, ...string) details.LocationIDer
|
||||
|
||||
var defaultOneDriveLocationIDer = func(driveID string, elems ...string) details.LocationIDer {
|
||||
return details.NewOneDriveLocationIDer(driveID, elems...)
|
||||
}
|
||||
|
||||
var defaultSharePointLocationIDer = func(driveID string, elems ...string) details.LocationIDer {
|
||||
return details.NewSharePointLocationIDer(driveID, elems...)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Get Itemer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type GetsItem struct {
|
||||
Item models.DriveItemable
|
||||
Err error
|
||||
}
|
||||
|
||||
func (m GetsItem) GetItem(
|
||||
_ context.Context,
|
||||
_, _ string,
|
||||
) (models.DriveItemable, error) {
|
||||
return m.Item, m.Err
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Get Item Permissioner
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type GetsItemPermission struct {
|
||||
Perm models.PermissionCollectionResponseable
|
||||
Err error
|
||||
}
|
||||
|
||||
func (m GetsItemPermission) GetItemPermission(
|
||||
_ context.Context,
|
||||
_, _ string,
|
||||
) (models.PermissionCollectionResponseable, error) {
|
||||
return m.Perm, m.Err
|
||||
}
|
||||
47
src/internal/connector/onedrive/mock/item.go
Normal file
47
src/internal/connector/onedrive/mock/item.go
Normal file
@ -0,0 +1,47 @@
|
||||
package mock
|
||||
|
||||
//nolint:lll
|
||||
const DriveFilePayloadData = `{
|
||||
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('b%22-8wC6Jt04EWvKr1fQUDOyw5Gk8jIUJdEjzqonlSRf48i67LJdwopT4-6kiycJ5AV')/items/$entity",
|
||||
"@microsoft.graph.downloadUrl": "https://test-my.sharepoint.com/personal/brunhilda_test_onmicrosoft_com/_layouts/15/download.aspx?UniqueId=deadbeef-1b6a-4d13-aae6-bf5f9b07d424&Translate=false&tempauth=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJhdWQiOiIwMDAwMDAwMy0wMDAwLTBmZjEtY2UwMC0wMDAwMDAwMDAwMDAvMTBycWMyLW15LnNoYXJlcG9pbnQuY29tQGZiOGFmYmFhLWU5NGMtNGVhNS04YThhLTI0YWZmMDRkNzg3NCIsImlzcyI6IjAwMDAwMDAzLTAwMDAtMGZmMS1jZTAwLTAwMDAwMDAwMDAwMCIsIm5iZiI6IjE2ODUxMjk1MzIiLCJleHAiOiIxNjg1MTMzMTMyIiwiZW5kcG9pbnR1cmwiOiJkTStxblBIQitkNDMzS0ErTHVTUVZMRi9IaVliSkI2eHJWN0tuYk45aXQ0PSIsImVuZHBvaW50dXJsTGVuZ3RoIjoiMTYxIiwiaXNsb29wYmFjayI6IlRydWUiLCJjaWQiOiJOVFl4TXpNMFkyWXRZVFk0TVMwMFpXUmxMVGt5TjJZdFlXVmpNVGMwTldWbU16TXgiLCJ2ZXIiOiJoYXNoZWRwcm9vZnRva2VuIiwic2l0ZWlkIjoiWlRnd01tTmpabUl0TnpRNVlpMDBOV1V3TFdGbU1tRXRZbVExWmpReE5EQmpaV05pIiwiYXBwX2Rpc3BsYXluYW1lIjoiS2VlcGVyc19Mb2NhbCIsIm5hbWVpZCI6ImFkYjk3MTQ2LTcxYTctNDkxYS05YWMwLWUzOGFkNzdkZWViNkBmYjhhZmJhYS1lOTRjLTRlYTUtOGE4YS0yNGFmZjA0ZDc4NzQiLCJyb2xlcyI6ImFsbHNpdGVzLndyaXRlIGFsbHNpdGVzLm1hbmFnZSBhbGxmaWxlcy53cml0ZSBhbGxzaXRlcy5mdWxsY29udHJvbCBhbGxwcm9maWxlcy5yZWFkIiwidHQiOiIxIiwidXNlUGVyc2lzdGVudENvb2tpZSI6bnVsbCwiaXBhZGRyIjoiMjA1MTkwLjE1Ny4zMCJ9.lN7Vpfzk1abEyE0M3gyRyZXEaGQ3JMXCyaXUBNbD5Vo&ApiVersion=2.0",
|
||||
"createdDateTime": "2023-04-25T21:32:58Z",
|
||||
"eTag": "\"{DEADBEEF-1B6A-4D13-AAE6-BF5F9B07D424},1\"",
|
||||
"id": "017W47IH3FQVEFI23QCNG2VZV7L6NQPVBE",
|
||||
"lastModifiedDateTime": "2023-04-25T21:32:58Z",
|
||||
"name": "huehuehue.GIF",
|
||||
"webUrl": "https://test-my.sharepoint.com/personal/brunhilda_test_onmicrosoft_com/Documents/test/huehuehue.GIF",
|
||||
"cTag": "\"c:{DEADBEEF-1B6A-4D13-AAE6-BF5F9B07D424},1\"",
|
||||
"size": 88843,
|
||||
"createdBy": {
|
||||
"user": {
|
||||
"email": "brunhilda@test.onmicrosoft.com",
|
||||
"id": "DEADBEEF-4c80-4da4-86ef-a08d8d6f0f94",
|
||||
"displayName": "BrunHilda"
|
||||
}
|
||||
},
|
||||
"lastModifiedBy": {
|
||||
"user": {
|
||||
"email": "brunhilda@10rqc2.onmicrosoft.com",
|
||||
"id": "DEADBEEF-4c80-4da4-86ef-a08d8d6f0f94",
|
||||
"displayName": "BrunHilda"
|
||||
}
|
||||
},
|
||||
"parentReference": {
|
||||
"driveType": "business",
|
||||
"driveId": "b!-8wC6Jt04EWvKr1fQUDOyw5Gk8jIUJdEjzqonlSRf48i67LJdwopT4-6kiycJ5VA",
|
||||
"id": "017W47IH6DRQF2GS2N6NGWLZRS7RUJ2DIP",
|
||||
"path": "/drives/b!-8wC6Jt04EWvKr1fQUDOyw5Gk8jIUJdEjzqonlSRf48i67LJdwopT4-6kiycJ5VA/root:/test",
|
||||
"siteId": "DEADBEEF-749b-45e0-af2a-bd5f4140cecb"
|
||||
},
|
||||
"file": {
|
||||
"mimeType": "image/gif",
|
||||
"hashes": {
|
||||
"quickXorHash": "sU5rmXOvVFn6zJHpCPro9cYaK+Q="
|
||||
}
|
||||
},
|
||||
"fileSystemInfo": {
|
||||
"createdDateTime": "2023-04-25T21:32:58Z",
|
||||
"lastModifiedDateTime": "2023-04-25T21:32:58Z"
|
||||
},
|
||||
"image": {}
|
||||
}`
|
||||
@ -8,13 +8,10 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
func getParentMetadata(
|
||||
@ -132,12 +129,16 @@ func computeParentPermissions(
|
||||
}
|
||||
}
|
||||
|
||||
type updateDeleteItemPermissioner interface {
|
||||
DeleteItemPermissioner
|
||||
UpdateItemPermissioner
|
||||
}
|
||||
|
||||
// UpdatePermissions takes in the set of permission to be added and
|
||||
// removed from an item to bring it to the desired state.
|
||||
func UpdatePermissions(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
service graph.Servicer,
|
||||
udip updateDeleteItemPermissioner,
|
||||
driveID string,
|
||||
itemID string,
|
||||
permAdded, permRemoved []metadata.Permission,
|
||||
@ -161,9 +162,8 @@ func UpdatePermissions(
|
||||
return clues.New("no new permission id").WithClues(ctx)
|
||||
}
|
||||
|
||||
err := api.DeleteDriveItemPermission(
|
||||
err := udip.DeleteItemPermission(
|
||||
ictx,
|
||||
creds,
|
||||
driveID,
|
||||
itemID,
|
||||
pid)
|
||||
@ -216,7 +216,7 @@ func UpdatePermissions(
|
||||
|
||||
pbody.SetRecipients([]models.DriveRecipientable{rec})
|
||||
|
||||
newPerm, err := api.PostItemPermissionUpdate(ictx, service, driveID, itemID, pbody)
|
||||
newPerm, err := udip.PostItemPermissionUpdate(ictx, driveID, itemID, pbody)
|
||||
if err != nil {
|
||||
return clues.Stack(err)
|
||||
}
|
||||
@ -233,8 +233,7 @@ func UpdatePermissions(
|
||||
// on onedrive items.
|
||||
func RestorePermissions(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
service graph.Servicer,
|
||||
rh RestoreHandler,
|
||||
driveID string,
|
||||
itemID string,
|
||||
itemPath path.Path,
|
||||
@ -256,8 +255,7 @@ func RestorePermissions(
|
||||
|
||||
return UpdatePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
service,
|
||||
rh,
|
||||
driveID,
|
||||
itemID,
|
||||
permAdded,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@ -9,6 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
@ -36,8 +36,8 @@ func runComputeParentPermissionsTest(
|
||||
category path.CategoryType,
|
||||
resourceOwner string,
|
||||
) {
|
||||
entryPath := fmt.Sprintf(rootDrivePattern, "drive-id") + "/level0/level1/level2/entry"
|
||||
rootEntryPath := fmt.Sprintf(rootDrivePattern, "drive-id") + "/entry"
|
||||
entryPath := odConsts.DriveFolderPrefixBuilder("drive-id").String() + "/level0/level1/level2/entry"
|
||||
rootEntryPath := odConsts.DriveFolderPrefixBuilder("drive-id").String() + "/entry"
|
||||
|
||||
entry, err := path.Build(
|
||||
"tenant",
|
||||
|
||||
@ -22,7 +22,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/version"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
@ -41,6 +40,7 @@ type restoreCaches struct {
|
||||
ParentDirToMeta map[string]metadata.Metadata
|
||||
OldPermIDToNewID map[string]string
|
||||
DriveIDToRootFolderID map[string]string
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func NewRestoreCaches() *restoreCaches {
|
||||
@ -49,20 +49,25 @@ func NewRestoreCaches() *restoreCaches {
|
||||
ParentDirToMeta: map[string]metadata.Metadata{},
|
||||
OldPermIDToNewID: map[string]string{},
|
||||
DriveIDToRootFolderID: map[string]string{},
|
||||
// Buffer pool for uploads
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, graph.CopyBufferSize)
|
||||
return &b
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreCollections will restore the specified data collections into OneDrive
|
||||
func RestoreCollections(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
rh RestoreHandler,
|
||||
backupVersion int,
|
||||
service graph.Servicer,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
pool *sync.Pool,
|
||||
errs *fault.Bus,
|
||||
) (*support.ConnectorOperationStatus, error) {
|
||||
var (
|
||||
@ -99,16 +104,13 @@ func RestoreCollections(
|
||||
|
||||
metrics, err = RestoreCollection(
|
||||
ictx,
|
||||
creds,
|
||||
rh,
|
||||
backupVersion,
|
||||
service,
|
||||
dc,
|
||||
caches,
|
||||
OneDriveSource,
|
||||
dest.ContainerName,
|
||||
deets,
|
||||
opts.RestorePermissions,
|
||||
pool,
|
||||
errs)
|
||||
if err != nil {
|
||||
el.AddRecoverable(err)
|
||||
@ -138,16 +140,13 @@ func RestoreCollections(
|
||||
// - error, if any besides recoverable
|
||||
func RestoreCollection(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
rh RestoreHandler,
|
||||
backupVersion int,
|
||||
service graph.Servicer,
|
||||
dc data.RestoreCollection,
|
||||
caches *restoreCaches,
|
||||
source driveSource,
|
||||
restoreContainerName string,
|
||||
deets *details.Builder,
|
||||
restorePerms bool,
|
||||
pool *sync.Pool,
|
||||
errs *fault.Bus,
|
||||
) (support.CollectionMetrics, error) {
|
||||
var (
|
||||
@ -170,7 +169,7 @@ func RestoreCollection(
|
||||
}
|
||||
|
||||
if _, ok := caches.DriveIDToRootFolderID[drivePath.DriveID]; !ok {
|
||||
root, err := api.GetDriveRoot(ctx, service, drivePath.DriveID)
|
||||
root, err := rh.GetRootFolder(ctx, drivePath.DriveID)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "getting drive root id")
|
||||
}
|
||||
@ -207,8 +206,7 @@ func RestoreCollection(
|
||||
// Create restore folders and get the folder ID of the folder the data stream will be restored in
|
||||
restoreFolderID, err := CreateRestoreFolders(
|
||||
ctx,
|
||||
creds,
|
||||
service,
|
||||
rh,
|
||||
drivePath,
|
||||
restoreDir,
|
||||
dc.FullPath(),
|
||||
@ -267,11 +265,10 @@ func RestoreCollection(
|
||||
defer wg.Done()
|
||||
defer func() { <-semaphoreCh }()
|
||||
|
||||
copyBufferPtr := pool.Get().(*[]byte)
|
||||
defer pool.Put(copyBufferPtr)
|
||||
copyBufferPtr := caches.pool.Get().(*[]byte)
|
||||
defer caches.pool.Put(copyBufferPtr)
|
||||
|
||||
copyBuffer := *copyBufferPtr
|
||||
|
||||
ictx := clues.Add(ctx, "restore_item_id", itemData.UUID())
|
||||
|
||||
itemPath, err := dc.FullPath().AppendItem(itemData.UUID())
|
||||
@ -282,11 +279,9 @@ func RestoreCollection(
|
||||
|
||||
itemInfo, skipped, err := restoreItem(
|
||||
ictx,
|
||||
creds,
|
||||
rh,
|
||||
dc,
|
||||
backupVersion,
|
||||
source,
|
||||
service,
|
||||
drivePath,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
@ -332,11 +327,9 @@ func RestoreCollection(
|
||||
// returns the item info, a bool (true = restore was skipped), and an error
|
||||
func restoreItem(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
dc data.RestoreCollection,
|
||||
rh RestoreHandler,
|
||||
fibn data.FetchItemByNamer,
|
||||
backupVersion int,
|
||||
source driveSource,
|
||||
service graph.Servicer,
|
||||
drivePath *path.DrivePath,
|
||||
restoreFolderID string,
|
||||
copyBuffer []byte,
|
||||
@ -351,10 +344,9 @@ func restoreItem(
|
||||
if backupVersion < version.OneDrive1DataAndMetaFiles {
|
||||
itemInfo, err := restoreV0File(
|
||||
ctx,
|
||||
source,
|
||||
service,
|
||||
rh,
|
||||
drivePath,
|
||||
dc,
|
||||
fibn,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
itemData)
|
||||
@ -401,11 +393,9 @@ func restoreItem(
|
||||
if backupVersion < version.OneDrive6NameInMeta {
|
||||
itemInfo, err := restoreV1File(
|
||||
ctx,
|
||||
source,
|
||||
creds,
|
||||
service,
|
||||
rh,
|
||||
drivePath,
|
||||
dc,
|
||||
fibn,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
restorePerms,
|
||||
@ -423,11 +413,9 @@ func restoreItem(
|
||||
|
||||
itemInfo, err := restoreV6File(
|
||||
ctx,
|
||||
source,
|
||||
creds,
|
||||
service,
|
||||
rh,
|
||||
drivePath,
|
||||
dc,
|
||||
fibn,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
restorePerms,
|
||||
@ -443,24 +431,22 @@ func restoreItem(
|
||||
|
||||
func restoreV0File(
|
||||
ctx context.Context,
|
||||
source driveSource,
|
||||
service graph.Servicer,
|
||||
rh RestoreHandler,
|
||||
drivePath *path.DrivePath,
|
||||
fetcher fileFetcher,
|
||||
fibn data.FetchItemByNamer,
|
||||
restoreFolderID string,
|
||||
copyBuffer []byte,
|
||||
itemData data.Stream,
|
||||
) (details.ItemInfo, error) {
|
||||
_, itemInfo, err := restoreData(
|
||||
ctx,
|
||||
service,
|
||||
fetcher,
|
||||
rh,
|
||||
fibn,
|
||||
itemData.UUID(),
|
||||
itemData,
|
||||
drivePath.DriveID,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
source)
|
||||
copyBuffer)
|
||||
if err != nil {
|
||||
return itemInfo, clues.Wrap(err, "restoring file")
|
||||
}
|
||||
@ -468,17 +454,11 @@ func restoreV0File(
|
||||
return itemInfo, nil
|
||||
}
|
||||
|
||||
type fileFetcher interface {
|
||||
Fetch(ctx context.Context, name string) (data.Stream, error)
|
||||
}
|
||||
|
||||
func restoreV1File(
|
||||
ctx context.Context,
|
||||
source driveSource,
|
||||
creds account.M365Config,
|
||||
service graph.Servicer,
|
||||
rh RestoreHandler,
|
||||
drivePath *path.DrivePath,
|
||||
fetcher fileFetcher,
|
||||
fibn data.FetchItemByNamer,
|
||||
restoreFolderID string,
|
||||
copyBuffer []byte,
|
||||
restorePerms bool,
|
||||
@ -490,14 +470,13 @@ func restoreV1File(
|
||||
|
||||
itemID, itemInfo, err := restoreData(
|
||||
ctx,
|
||||
service,
|
||||
fetcher,
|
||||
rh,
|
||||
fibn,
|
||||
trimmedName,
|
||||
itemData,
|
||||
drivePath.DriveID,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
source)
|
||||
copyBuffer)
|
||||
if err != nil {
|
||||
return details.ItemInfo{}, err
|
||||
}
|
||||
@ -511,15 +490,14 @@ func restoreV1File(
|
||||
// Fetch item permissions from the collection and restore them.
|
||||
metaName := trimmedName + metadata.MetaFileSuffix
|
||||
|
||||
meta, err := fetchAndReadMetadata(ctx, fetcher, metaName)
|
||||
meta, err := fetchAndReadMetadata(ctx, fibn, metaName)
|
||||
if err != nil {
|
||||
return details.ItemInfo{}, clues.Wrap(err, "restoring file")
|
||||
}
|
||||
|
||||
err = RestorePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
service,
|
||||
rh,
|
||||
drivePath.DriveID,
|
||||
itemID,
|
||||
itemPath,
|
||||
@ -534,11 +512,9 @@ func restoreV1File(
|
||||
|
||||
func restoreV6File(
|
||||
ctx context.Context,
|
||||
source driveSource,
|
||||
creds account.M365Config,
|
||||
service graph.Servicer,
|
||||
rh RestoreHandler,
|
||||
drivePath *path.DrivePath,
|
||||
fetcher fileFetcher,
|
||||
fibn data.FetchItemByNamer,
|
||||
restoreFolderID string,
|
||||
copyBuffer []byte,
|
||||
restorePerms bool,
|
||||
@ -551,7 +527,7 @@ func restoreV6File(
|
||||
// Get metadata file so we can determine the file name.
|
||||
metaName := trimmedName + metadata.MetaFileSuffix
|
||||
|
||||
meta, err := fetchAndReadMetadata(ctx, fetcher, metaName)
|
||||
meta, err := fetchAndReadMetadata(ctx, fibn, metaName)
|
||||
if err != nil {
|
||||
return details.ItemInfo{}, clues.Wrap(err, "restoring file")
|
||||
}
|
||||
@ -574,14 +550,13 @@ func restoreV6File(
|
||||
|
||||
itemID, itemInfo, err := restoreData(
|
||||
ctx,
|
||||
service,
|
||||
fetcher,
|
||||
rh,
|
||||
fibn,
|
||||
meta.FileName,
|
||||
itemData,
|
||||
drivePath.DriveID,
|
||||
restoreFolderID,
|
||||
copyBuffer,
|
||||
source)
|
||||
copyBuffer)
|
||||
if err != nil {
|
||||
return details.ItemInfo{}, err
|
||||
}
|
||||
@ -594,8 +569,7 @@ func restoreV6File(
|
||||
|
||||
err = RestorePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
service,
|
||||
rh,
|
||||
drivePath.DriveID,
|
||||
itemID,
|
||||
itemPath,
|
||||
@ -615,8 +589,7 @@ func restoreV6File(
|
||||
// folderCache is mutated, as a side effect of populating the items.
|
||||
func CreateRestoreFolders(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
service graph.Servicer,
|
||||
rh RestoreHandler,
|
||||
drivePath *path.DrivePath,
|
||||
restoreDir *path.Builder,
|
||||
folderPath path.Path,
|
||||
@ -626,7 +599,7 @@ func CreateRestoreFolders(
|
||||
) (string, error) {
|
||||
id, err := createRestoreFolders(
|
||||
ctx,
|
||||
service,
|
||||
rh,
|
||||
drivePath,
|
||||
restoreDir,
|
||||
caches)
|
||||
@ -645,8 +618,7 @@ func CreateRestoreFolders(
|
||||
|
||||
err = RestorePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
service,
|
||||
rh,
|
||||
drivePath.DriveID,
|
||||
id,
|
||||
folderPath,
|
||||
@ -656,12 +628,17 @@ func CreateRestoreFolders(
|
||||
return id, err
|
||||
}
|
||||
|
||||
type folderRestorer interface {
|
||||
GetFolderByNamer
|
||||
PostItemInContainerer
|
||||
}
|
||||
|
||||
// createRestoreFolders creates the restore folder hierarchy in the specified
|
||||
// drive and returns the folder ID of the last folder entry in the hierarchy.
|
||||
// folderCache is mutated, as a side effect of populating the items.
|
||||
func createRestoreFolders(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
fr folderRestorer,
|
||||
drivePath *path.DrivePath,
|
||||
restoreDir *path.Builder,
|
||||
caches *restoreCaches,
|
||||
@ -692,7 +669,7 @@ func createRestoreFolders(
|
||||
continue
|
||||
}
|
||||
|
||||
folderItem, err := api.GetFolderByName(ictx, service, driveID, parentFolderID, folder)
|
||||
folderItem, err := fr.GetFolderByName(ictx, driveID, parentFolderID, folder)
|
||||
if err != nil && !errors.Is(err, api.ErrFolderNotFound) {
|
||||
return "", clues.Wrap(err, "getting folder by display name")
|
||||
}
|
||||
@ -706,7 +683,7 @@ func createRestoreFolders(
|
||||
}
|
||||
|
||||
// create the folder if not found
|
||||
folderItem, err = CreateItem(ictx, service, driveID, parentFolderID, newItem(folder, true))
|
||||
folderItem, err = fr.PostItemInContainer(ictx, driveID, parentFolderID, newItem(folder, true))
|
||||
if err != nil {
|
||||
return "", clues.Wrap(err, "creating folder")
|
||||
}
|
||||
@ -720,16 +697,21 @@ func createRestoreFolders(
|
||||
return parentFolderID, nil
|
||||
}
|
||||
|
||||
type itemRestorer interface {
|
||||
ItemInfoAugmenter
|
||||
NewItemContentUploader
|
||||
PostItemInContainerer
|
||||
}
|
||||
|
||||
// restoreData will create a new item in the specified `parentFolderID` and upload the data.Stream
|
||||
func restoreData(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
fetcher fileFetcher,
|
||||
ir itemRestorer,
|
||||
fibn data.FetchItemByNamer,
|
||||
name string,
|
||||
itemData data.Stream,
|
||||
driveID, parentFolderID string,
|
||||
copyBuffer []byte,
|
||||
source driveSource,
|
||||
) (string, details.ItemInfo, error) {
|
||||
ctx, end := diagnostics.Span(ctx, "gc:oneDrive:restoreItem", diagnostics.Label("item_uuid", itemData.UUID()))
|
||||
defer end()
|
||||
@ -743,17 +725,15 @@ func restoreData(
|
||||
}
|
||||
|
||||
// Create Item
|
||||
newItem, err := CreateItem(ctx, service, driveID, parentFolderID, newItem(name, false))
|
||||
newItem, err := ir.PostItemInContainer(ctx, driveID, parentFolderID, newItem(name, false))
|
||||
if err != nil {
|
||||
return "", details.ItemInfo{}, err
|
||||
}
|
||||
|
||||
itemID := ptr.Val(newItem.GetId())
|
||||
ctx = clues.Add(ctx, "upload_item_id", itemID)
|
||||
|
||||
r, err := api.PostDriveItem(ctx, service, driveID, itemID)
|
||||
// Get a drive item writer
|
||||
w, uploadURL, err := driveItemWriter(ctx, ir, driveID, ptr.Val(newItem.GetId()), ss.Size())
|
||||
if err != nil {
|
||||
return "", details.ItemInfo{}, clues.Wrap(err, "get upload session")
|
||||
return "", details.ItemInfo{}, clues.Wrap(err, "get item upload session")
|
||||
}
|
||||
|
||||
var written int64
|
||||
@ -765,12 +745,6 @@ func restoreData(
|
||||
// show "register" any partial file uploads and so if we fail an
|
||||
// upload the file size will be 0.
|
||||
for i := 0; i <= maxUploadRetries; i++ {
|
||||
// Initialize and return an io.Writer to upload data for the
|
||||
// specified item It does so by creating an upload session and
|
||||
// using that URL to initialize an `itemWriter`
|
||||
// TODO: @vkamra verify if var session is the desired input
|
||||
w := graph.NewLargeItemWriter(itemID, ptr.Val(r.GetUploadUrl()), ss.Size())
|
||||
|
||||
pname := name
|
||||
iReader := itemData.ToReader()
|
||||
|
||||
@ -780,7 +754,7 @@ func restoreData(
|
||||
// If it is not the first try, we have to pull the file
|
||||
// again from kopia. Ideally we could just seek the stream
|
||||
// but we don't have a Seeker available here.
|
||||
itemData, err := fetcher.Fetch(ctx, itemData.UUID())
|
||||
itemData, err := fibn.FetchItemByName(ctx, itemData.UUID())
|
||||
if err != nil {
|
||||
return "", details.ItemInfo{}, clues.Wrap(err, "get data file")
|
||||
}
|
||||
@ -803,32 +777,29 @@ func restoreData(
|
||||
|
||||
// clear out the bar if err
|
||||
abort()
|
||||
|
||||
// refresh the io.Writer to restart the upload
|
||||
// TODO: @vkamra verify if var session is the desired input
|
||||
w = graph.NewLargeItemWriter(ptr.Val(newItem.GetId()), uploadURL, ss.Size())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", details.ItemInfo{}, clues.Wrap(err, "uploading file")
|
||||
}
|
||||
|
||||
dii := details.ItemInfo{}
|
||||
|
||||
switch source {
|
||||
case SharePointSource:
|
||||
dii.SharePoint = sharePointItemInfo(newItem, written)
|
||||
default:
|
||||
dii.OneDrive = oneDriveItemInfo(newItem, written)
|
||||
}
|
||||
dii := ir.AugmentItemInfo(details.ItemInfo{}, newItem, written, nil)
|
||||
|
||||
return ptr.Val(newItem.GetId()), dii, nil
|
||||
}
|
||||
|
||||
func fetchAndReadMetadata(
|
||||
ctx context.Context,
|
||||
fetcher fileFetcher,
|
||||
fibn data.FetchItemByNamer,
|
||||
metaName string,
|
||||
) (metadata.Metadata, error) {
|
||||
ctx = clues.Add(ctx, "meta_file_name", metaName)
|
||||
|
||||
metaFile, err := fetcher.Fetch(ctx, metaName)
|
||||
metaFile, err := fibn.FetchItemByName(ctx, metaName)
|
||||
if err != nil {
|
||||
return metadata.Metadata{}, clues.Wrap(err, "getting item metadata")
|
||||
}
|
||||
|
||||
@ -4,55 +4,29 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type MockGraphService struct{}
|
||||
|
||||
func (ms *MockGraphService) Client() *msgraphsdk.GraphServiceClient {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MockGraphService) Adapter() *msgraphsdk.GraphRequestAdapter {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ graph.Servicer = &oneDriveService{}
|
||||
|
||||
// TODO(ashmrtn): Merge with similar structs in graph and exchange packages.
|
||||
type oneDriveService struct {
|
||||
client msgraphsdk.GraphServiceClient
|
||||
adapter msgraphsdk.GraphRequestAdapter
|
||||
credentials account.M365Config
|
||||
status support.ConnectorOperationStatus
|
||||
}
|
||||
|
||||
func (ods *oneDriveService) Client() *msgraphsdk.GraphServiceClient {
|
||||
return &ods.client
|
||||
}
|
||||
|
||||
func (ods *oneDriveService) Adapter() *msgraphsdk.GraphRequestAdapter {
|
||||
return &ods.adapter
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func NewOneDriveService(credentials account.M365Config) (*oneDriveService, error) {
|
||||
adapter, err := graph.CreateAdapter(
|
||||
credentials.AzureTenantID,
|
||||
credentials.AzureClientID,
|
||||
credentials.AzureClientSecret)
|
||||
ac, err := api.NewClient(credentials)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
service := oneDriveService{
|
||||
adapter: *adapter,
|
||||
client: *msgraphsdk.NewGraphServiceClient(adapter),
|
||||
ac: ac,
|
||||
credentials: credentials,
|
||||
}
|
||||
|
||||
@ -70,10 +44,10 @@ func (ods *oneDriveService) updateStatus(status *support.ConnectorOperationStatu
|
||||
func loadTestService(t *testing.T) *oneDriveService {
|
||||
a := tester.NewM365Account(t)
|
||||
|
||||
m365, err := a.M365Config()
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
service, err := NewOneDriveService(m365)
|
||||
service, err := NewOneDriveService(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return service
|
||||
|
||||
32
src/internal/connector/onedrive/testdata/item.go
vendored
Normal file
32
src/internal/connector/onedrive/testdata/item.go
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package testdata
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
)
|
||||
|
||||
func NewStubDriveItem(
|
||||
id, name string,
|
||||
size int64,
|
||||
created, modified time.Time,
|
||||
isFile, isShared bool,
|
||||
) models.DriveItemable {
|
||||
stubItem := models.NewDriveItem()
|
||||
stubItem.SetId(&id)
|
||||
stubItem.SetName(&name)
|
||||
stubItem.SetSize(&size)
|
||||
stubItem.SetCreatedDateTime(&created)
|
||||
stubItem.SetLastModifiedDateTime(&modified)
|
||||
stubItem.SetAdditionalData(map[string]any{"@microsoft.graph.downloadUrl": "https://corsobackup.io"})
|
||||
|
||||
if isFile {
|
||||
stubItem.SetFile(models.NewFile())
|
||||
}
|
||||
|
||||
if isShared {
|
||||
stubItem.SetShared(&models.Shared{})
|
||||
}
|
||||
|
||||
return stubItem
|
||||
}
|
||||
@ -9,9 +9,9 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type itemProps struct {
|
||||
@ -31,8 +31,7 @@ type urlCache struct {
|
||||
refreshMu sync.Mutex
|
||||
deltaQueryCount int
|
||||
|
||||
svc graph.Servicer
|
||||
itemPagerFunc driveItemPagerFunc
|
||||
itemPager api.DriveItemEnumerator
|
||||
|
||||
errors *fault.Bus
|
||||
}
|
||||
@ -41,15 +40,13 @@ type urlCache struct {
|
||||
func newURLCache(
|
||||
driveID string,
|
||||
refreshInterval time.Duration,
|
||||
svc graph.Servicer,
|
||||
itemPager api.DriveItemEnumerator,
|
||||
errors *fault.Bus,
|
||||
itemPagerFunc driveItemPagerFunc,
|
||||
) (*urlCache, error) {
|
||||
err := validateCacheParams(
|
||||
driveID,
|
||||
refreshInterval,
|
||||
svc,
|
||||
itemPagerFunc)
|
||||
itemPager)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "cache params")
|
||||
}
|
||||
@ -59,8 +56,7 @@ func newURLCache(
|
||||
lastRefreshTime: time.Time{},
|
||||
driveID: driveID,
|
||||
refreshInterval: refreshInterval,
|
||||
svc: svc,
|
||||
itemPagerFunc: itemPagerFunc,
|
||||
itemPager: itemPager,
|
||||
errors: errors,
|
||||
},
|
||||
nil
|
||||
@ -70,8 +66,7 @@ func newURLCache(
|
||||
func validateCacheParams(
|
||||
driveID string,
|
||||
refreshInterval time.Duration,
|
||||
svc graph.Servicer,
|
||||
itemPagerFunc driveItemPagerFunc,
|
||||
itemPager api.DriveItemEnumerator,
|
||||
) error {
|
||||
if len(driveID) == 0 {
|
||||
return clues.New("drive id is empty")
|
||||
@ -81,11 +76,7 @@ func validateCacheParams(
|
||||
return clues.New("invalid refresh interval")
|
||||
}
|
||||
|
||||
if svc == nil {
|
||||
return clues.New("nil graph servicer")
|
||||
}
|
||||
|
||||
if itemPagerFunc == nil {
|
||||
if itemPager == nil {
|
||||
return clues.New("nil item pager")
|
||||
}
|
||||
|
||||
@ -174,7 +165,7 @@ func (uc *urlCache) deltaQuery(
|
||||
|
||||
_, _, _, err := collectItems(
|
||||
ctx,
|
||||
uc.itemPagerFunc(uc.svc, uc.driveID, ""),
|
||||
uc.itemPager,
|
||||
uc.driveID,
|
||||
"",
|
||||
uc.updateCache,
|
||||
|
||||
@ -16,13 +16,12 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type URLCacheIntegrationSuite struct {
|
||||
tester.Suite
|
||||
service graph.Servicer
|
||||
ac api.Client
|
||||
user string
|
||||
driveID string
|
||||
}
|
||||
@ -41,69 +40,60 @@ func (suite *URLCacheIntegrationSuite) SetupSuite() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
suite.service = loadTestService(t)
|
||||
suite.user = tester.SecondaryM365UserID(t)
|
||||
|
||||
pager, err := PagerForSource(OneDriveSource, suite.service, suite.user, nil)
|
||||
acct := tester.NewM365Account(t)
|
||||
|
||||
creds, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
odDrives, err := api.GetAllDrives(ctx, pager, true, maxDrivesRetries)
|
||||
suite.ac, err = api.NewClient(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.Greaterf(t, len(odDrives), 0, "user %s does not have a drive", suite.user)
|
||||
suite.driveID = ptr.Val(odDrives[0].GetId())
|
||||
|
||||
drive, err := suite.ac.Users().GetDefaultDrive(ctx, suite.user)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.driveID = ptr.Val(drive.GetId())
|
||||
}
|
||||
|
||||
// Basic test for urlCache. Create some files in onedrive, then access them via
|
||||
// url cache
|
||||
func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
t := suite.T()
|
||||
var (
|
||||
t = suite.T()
|
||||
ac = suite.ac.Drives()
|
||||
driveID = suite.driveID
|
||||
newFolderName = tester.DefaultTestRestoreDestination("folder").ContainerName
|
||||
driveItemPager = suite.ac.Drives().NewItemPager(driveID, "", api.DriveItemSelectDefault())
|
||||
)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
svc := suite.service
|
||||
driveID := suite.driveID
|
||||
|
||||
// Create a new test folder
|
||||
root, err := svc.Client().Drives().ByDriveId(driveID).Root().Get(ctx, nil)
|
||||
root, err := ac.GetRootFolder(ctx, driveID)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
newFolderName := tester.DefaultTestRestoreDestination("folder").ContainerName
|
||||
|
||||
newFolder, err := CreateItem(
|
||||
newFolder, err := ac.Drives().PostItemInContainer(
|
||||
ctx,
|
||||
svc,
|
||||
driveID,
|
||||
ptr.Val(root.GetId()),
|
||||
newItem(newFolderName, true))
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
require.NotNil(t, newFolder.GetId())
|
||||
|
||||
// Delete folder on exit
|
||||
defer func() {
|
||||
ictx := clues.Add(ctx, "folder_id", ptr.Val(newFolder.GetId()))
|
||||
|
||||
err := api.DeleteDriveItem(
|
||||
ictx,
|
||||
loadTestService(t),
|
||||
driveID,
|
||||
ptr.Val(newFolder.GetId()))
|
||||
if err != nil {
|
||||
logger.CtxErr(ictx, err).Errorw("deleting folder")
|
||||
}
|
||||
}()
|
||||
nfid := ptr.Val(newFolder.GetId())
|
||||
|
||||
// Create a bunch of files in the new folder
|
||||
var items []models.DriveItemable
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
newItemName := "testItem_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
newItemName := "test_url_cache_basic_" + dttm.FormatNow(dttm.SafeForTesting)
|
||||
|
||||
item, err := CreateItem(
|
||||
item, err := ac.Drives().PostItemInContainer(
|
||||
ctx,
|
||||
svc,
|
||||
driveID,
|
||||
ptr.Val(newFolder.GetId()),
|
||||
nfid,
|
||||
newItem(newItemName, false))
|
||||
if err != nil {
|
||||
// Something bad happened, skip this item
|
||||
@ -117,12 +107,14 @@ func (suite *URLCacheIntegrationSuite) TestURLCacheBasic() {
|
||||
cache, err := newURLCache(
|
||||
suite.driveID,
|
||||
1*time.Hour,
|
||||
svc,
|
||||
fault.New(true),
|
||||
defaultItemPager)
|
||||
driveItemPager,
|
||||
fault.New(true))
|
||||
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
err = cache.refreshCache(ctx)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// Launch parallel requests to the cache, one per item
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < len(items); i++ {
|
||||
|
||||
@ -54,7 +54,7 @@ type Collection struct {
|
||||
jobs []string
|
||||
// M365 IDs of the items of this collection
|
||||
category DataCategory
|
||||
service graph.Servicer
|
||||
client api.Sites
|
||||
ctrl control.Options
|
||||
betaService *betaAPI.BetaService
|
||||
statusUpdater support.StatusUpdater
|
||||
@ -63,7 +63,7 @@ type Collection struct {
|
||||
// NewCollection helper function for creating a Collection
|
||||
func NewCollection(
|
||||
folderPath path.Path,
|
||||
service graph.Servicer,
|
||||
ac api.Client,
|
||||
category DataCategory,
|
||||
statusUpdater support.StatusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
@ -72,7 +72,7 @@ func NewCollection(
|
||||
fullPath: folderPath,
|
||||
jobs: make([]string, 0),
|
||||
data: make(chan data.Stream, collectionChannelBufferSize),
|
||||
service: service,
|
||||
client: ac.Sites(),
|
||||
statusUpdater: statusUpdater,
|
||||
category: category,
|
||||
ctrl: ctrlOpts,
|
||||
@ -175,7 +175,10 @@ func (sc *Collection) populate(ctx context.Context, errs *fault.Bus) {
|
||||
sc.finishPopulation(ctx, metrics)
|
||||
}
|
||||
|
||||
func (sc *Collection) runPopulate(ctx context.Context, errs *fault.Bus) (support.CollectionMetrics, error) {
|
||||
func (sc *Collection) runPopulate(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
) (support.CollectionMetrics, error) {
|
||||
var (
|
||||
err error
|
||||
metrics support.CollectionMetrics
|
||||
@ -197,7 +200,7 @@ func (sc *Collection) runPopulate(ctx context.Context, errs *fault.Bus) (support
|
||||
case List:
|
||||
metrics, err = sc.retrieveLists(ctx, writer, colProgress, errs)
|
||||
case Pages:
|
||||
metrics, err = sc.retrievePages(ctx, writer, colProgress, errs)
|
||||
metrics, err = sc.retrievePages(ctx, sc.client, writer, colProgress, errs)
|
||||
}
|
||||
|
||||
return metrics, err
|
||||
@ -216,7 +219,12 @@ func (sc *Collection) retrieveLists(
|
||||
el = errs.Local()
|
||||
)
|
||||
|
||||
lists, err := loadSiteLists(ctx, sc.service, sc.fullPath.ResourceOwner(), sc.jobs, errs)
|
||||
lists, err := loadSiteLists(
|
||||
ctx,
|
||||
sc.client.Stable,
|
||||
sc.fullPath.ResourceOwner(),
|
||||
sc.jobs,
|
||||
errs)
|
||||
if err != nil {
|
||||
return metrics, err
|
||||
}
|
||||
@ -262,6 +270,7 @@ func (sc *Collection) retrieveLists(
|
||||
|
||||
func (sc *Collection) retrievePages(
|
||||
ctx context.Context,
|
||||
as api.Sites,
|
||||
wtr *kjson.JsonSerializationWriter,
|
||||
progress chan<- struct{},
|
||||
errs *fault.Bus,
|
||||
@ -276,7 +285,7 @@ func (sc *Collection) retrievePages(
|
||||
return metrics, clues.New("beta service required").WithClues(ctx)
|
||||
}
|
||||
|
||||
parent, err := api.GetSite(ctx, sc.service, sc.fullPath.ResourceOwner())
|
||||
parent, err := as.GetByID(ctx, sc.fullPath.ResourceOwner())
|
||||
if err != nil {
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
@ -13,7 +13,7 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||
betaAPI "github.com/alcionai/corso/src/internal/connector/sharepoint/api"
|
||||
spMock "github.com/alcionai/corso/src/internal/connector/sharepoint/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
@ -21,12 +21,14 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type SharePointCollectionSuite struct {
|
||||
tester.Suite
|
||||
siteID string
|
||||
creds account.M365Config
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func (suite *SharePointCollectionSuite) SetupSuite() {
|
||||
@ -38,6 +40,11 @@ func (suite *SharePointCollectionSuite) SetupSuite() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.creds = m365
|
||||
|
||||
ac, err := api.NewClient(m365)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac = ac
|
||||
}
|
||||
|
||||
func TestSharePointCollectionSuite(t *testing.T) {
|
||||
@ -67,9 +74,12 @@ func (suite *SharePointCollectionSuite) TestCollection_Item_Read() {
|
||||
// TestListCollection tests basic functionality to create
|
||||
// SharePoint collection and to use the data stream channel.
|
||||
func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
tenant := "some"
|
||||
user := "user"
|
||||
dirRoot := "directory"
|
||||
var (
|
||||
tenant = "some"
|
||||
user = "user"
|
||||
dirRoot = "directory"
|
||||
)
|
||||
|
||||
tables := []struct {
|
||||
name, itemName string
|
||||
category DataCategory
|
||||
@ -130,13 +140,13 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
},
|
||||
getItem: func(t *testing.T, itemName string) *Item {
|
||||
byteArray := spMock.Page(itemName)
|
||||
page, err := api.CreatePageFromBytes(byteArray)
|
||||
page, err := betaAPI.CreatePageFromBytes(byteArray)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
data := &Item{
|
||||
id: itemName,
|
||||
data: io.NopCloser(bytes.NewReader(byteArray)),
|
||||
info: api.PageInfo(page, int64(len(byteArray))),
|
||||
info: betaAPI.PageInfo(page, int64(len(byteArray))),
|
||||
}
|
||||
|
||||
return data
|
||||
@ -151,7 +161,12 @@ func (suite *SharePointCollectionSuite) TestCollection_Items() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
col := NewCollection(test.getDir(t), nil, test.category, nil, control.Defaults())
|
||||
col := NewCollection(
|
||||
test.getDir(t),
|
||||
suite.ac,
|
||||
test.category,
|
||||
nil,
|
||||
control.Defaults())
|
||||
col.data <- test.getItem(t, test.itemName)
|
||||
|
||||
readItems := []data.Stream{}
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
type statusUpdater interface {
|
||||
@ -29,19 +30,18 @@ type statusUpdater interface {
|
||||
// for the specified user
|
||||
func DataCollections(
|
||||
ctx context.Context,
|
||||
itemClient graph.Requester,
|
||||
ac api.Client,
|
||||
selector selectors.Selector,
|
||||
site idname.Provider,
|
||||
metadata []data.RestoreCollection,
|
||||
creds account.M365Config,
|
||||
serv graph.Servicer,
|
||||
su statusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, error) {
|
||||
) ([]data.BackupCollection, *prefixmatcher.StringSetMatcher, bool, error) {
|
||||
b, err := selector.ToSharePointBackup()
|
||||
if err != nil {
|
||||
return nil, nil, clues.Wrap(err, "sharePointDataCollection: parsing selector")
|
||||
return nil, nil, false, clues.Wrap(err, "sharePointDataCollection: parsing selector")
|
||||
}
|
||||
|
||||
ctx = clues.Add(
|
||||
@ -50,10 +50,11 @@ func DataCollections(
|
||||
"site_url", clues.Hide(site.Name()))
|
||||
|
||||
var (
|
||||
el = errs.Local()
|
||||
collections = []data.BackupCollection{}
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||
el = errs.Local()
|
||||
collections = []data.BackupCollection{}
|
||||
categories = map[path.CategoryType]struct{}{}
|
||||
ssmb = prefixmatcher.NewStringSetBuilder()
|
||||
canUsePreviousBackup bool
|
||||
)
|
||||
|
||||
for _, scope := range b.Scopes() {
|
||||
@ -72,7 +73,7 @@ func DataCollections(
|
||||
case path.ListsCategory:
|
||||
spcs, err = collectLists(
|
||||
ctx,
|
||||
serv,
|
||||
ac,
|
||||
creds.AzureTenantID,
|
||||
site,
|
||||
su,
|
||||
@ -83,11 +84,14 @@ func DataCollections(
|
||||
continue
|
||||
}
|
||||
|
||||
// Lists don't make use of previous metadata
|
||||
// TODO: Revisit when we add support of lists
|
||||
canUsePreviousBackup = true
|
||||
|
||||
case path.LibrariesCategory:
|
||||
spcs, err = collectLibraries(
|
||||
spcs, canUsePreviousBackup, err = collectLibraries(
|
||||
ctx,
|
||||
itemClient,
|
||||
serv,
|
||||
ac.Drives(),
|
||||
creds.AzureTenantID,
|
||||
site,
|
||||
metadata,
|
||||
@ -105,7 +109,7 @@ func DataCollections(
|
||||
spcs, err = collectPages(
|
||||
ctx,
|
||||
creds,
|
||||
serv,
|
||||
ac,
|
||||
site,
|
||||
su,
|
||||
ctrlOpts,
|
||||
@ -114,6 +118,10 @@ func DataCollections(
|
||||
el.AddRecoverable(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Lists don't make use of previous metadata
|
||||
// TODO: Revisit when we add support of pages
|
||||
canUsePreviousBackup = true
|
||||
}
|
||||
|
||||
collections = append(collections, spcs...)
|
||||
@ -133,18 +141,18 @@ func DataCollections(
|
||||
su.UpdateStatus,
|
||||
errs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
collections = append(collections, baseCols...)
|
||||
}
|
||||
|
||||
return collections, ssmb.ToReader(), el.Failure()
|
||||
return collections, ssmb.ToReader(), canUsePreviousBackup, el.Failure()
|
||||
}
|
||||
|
||||
func collectLists(
|
||||
ctx context.Context,
|
||||
serv graph.Servicer,
|
||||
ac api.Client,
|
||||
tenantID string,
|
||||
site idname.Provider,
|
||||
updater statusUpdater,
|
||||
@ -158,7 +166,7 @@ func collectLists(
|
||||
spcs = make([]data.BackupCollection, 0)
|
||||
)
|
||||
|
||||
lists, err := preFetchLists(ctx, serv, site.ID())
|
||||
lists, err := preFetchLists(ctx, ac.Stable, site.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -179,7 +187,12 @@ func collectLists(
|
||||
el.AddRecoverable(clues.Wrap(err, "creating list collection path").WithClues(ctx))
|
||||
}
|
||||
|
||||
collection := NewCollection(dir, serv, List, updater.UpdateStatus, ctrlOpts)
|
||||
collection := NewCollection(
|
||||
dir,
|
||||
ac,
|
||||
List,
|
||||
updater.UpdateStatus,
|
||||
ctrlOpts)
|
||||
collection.AddJob(tuple.id)
|
||||
|
||||
spcs = append(spcs, collection)
|
||||
@ -192,8 +205,7 @@ func collectLists(
|
||||
// all the drives associated with the site.
|
||||
func collectLibraries(
|
||||
ctx context.Context,
|
||||
itemClient graph.Requester,
|
||||
serv graph.Servicer,
|
||||
ad api.Drives,
|
||||
tenantID string,
|
||||
site idname.Provider,
|
||||
metadata []data.RestoreCollection,
|
||||
@ -202,28 +214,26 @@ func collectLibraries(
|
||||
updater statusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, error) {
|
||||
) ([]data.BackupCollection, bool, error) {
|
||||
logger.Ctx(ctx).Debug("creating SharePoint Library collections")
|
||||
|
||||
var (
|
||||
collections = []data.BackupCollection{}
|
||||
colls = onedrive.NewCollections(
|
||||
itemClient,
|
||||
&libraryBackupHandler{ad},
|
||||
tenantID,
|
||||
site.ID(),
|
||||
onedrive.SharePointSource,
|
||||
folderMatcher{scope},
|
||||
serv,
|
||||
updater.UpdateStatus,
|
||||
ctrlOpts)
|
||||
)
|
||||
|
||||
odcs, err := colls.Get(ctx, metadata, ssmb, errs)
|
||||
odcs, canUsePreviousBackup, err := colls.Get(ctx, metadata, ssmb, errs)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting library")
|
||||
return nil, false, graph.Wrap(ctx, err, "getting library")
|
||||
}
|
||||
|
||||
return append(collections, odcs...), nil
|
||||
return append(collections, odcs...), canUsePreviousBackup, nil
|
||||
}
|
||||
|
||||
// collectPages constructs a sharepoint Collections struct and Get()s the associated
|
||||
@ -231,7 +241,7 @@ func collectLibraries(
|
||||
func collectPages(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
serv graph.Servicer,
|
||||
ac api.Client,
|
||||
site idname.Provider,
|
||||
updater statusUpdater,
|
||||
ctrlOpts control.Options,
|
||||
@ -277,7 +287,12 @@ func collectPages(
|
||||
el.AddRecoverable(clues.Wrap(err, "creating page collection path").WithClues(ctx))
|
||||
}
|
||||
|
||||
collection := NewCollection(dir, serv, Pages, updater.UpdateStatus, ctrlOpts)
|
||||
collection := NewCollection(
|
||||
dir,
|
||||
ac,
|
||||
Pages,
|
||||
updater.UpdateStatus,
|
||||
ctrlOpts)
|
||||
collection.betaService = betaService
|
||||
collection.AddJob(tuple.ID)
|
||||
|
||||
|
||||
@ -10,21 +10,24 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/selectors"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// consts
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
testBaseDrivePath = "drives/driveID1/root:"
|
||||
)
|
||||
var testBaseDrivePath = path.Builder{}.Append(
|
||||
odConsts.DrivesPathDir,
|
||||
"driveID1",
|
||||
odConsts.RootPathDir)
|
||||
|
||||
type testFolderMatcher struct {
|
||||
scope selectors.SharePointScope
|
||||
@ -34,8 +37,8 @@ func (fm testFolderMatcher) IsAny() bool {
|
||||
return fm.scope.IsAny(selectors.SharePointLibraryFolder)
|
||||
}
|
||||
|
||||
func (fm testFolderMatcher) Matches(path string) bool {
|
||||
return fm.scope.Matches(selectors.SharePointLibraryFolder, path)
|
||||
func (fm testFolderMatcher) Matches(p string) bool {
|
||||
return fm.scope.Matches(selectors.SharePointLibraryFolder, p)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -54,11 +57,15 @@ func (suite *SharePointLibrariesUnitSuite) TestUpdateCollections() {
|
||||
anyFolder := (&selectors.SharePointBackup{}).LibraryFolders(selectors.Any())[0]
|
||||
|
||||
const (
|
||||
tenant = "tenant"
|
||||
site = "site"
|
||||
driveID = "driveID1"
|
||||
tenantID = "tenant"
|
||||
site = "site"
|
||||
driveID = "driveID1"
|
||||
)
|
||||
|
||||
pb := path.Builder{}.Append(testBaseDrivePath.Elements()...)
|
||||
ep, err := libraryBackupHandler{}.CanonicalPath(pb, tenantID, site)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
tests := []struct {
|
||||
testCase string
|
||||
items []models.DriveItemable
|
||||
@ -73,21 +80,16 @@ func (suite *SharePointLibrariesUnitSuite) TestUpdateCollections() {
|
||||
{
|
||||
testCase: "Single File",
|
||||
items: []models.DriveItemable{
|
||||
driveRootItem("root"),
|
||||
driveItem("file", testBaseDrivePath, "root", true),
|
||||
driveRootItem(odConsts.RootID),
|
||||
driveItem("file", testBaseDrivePath.String(), odConsts.RootID, true),
|
||||
},
|
||||
scope: anyFolder,
|
||||
expect: assert.NoError,
|
||||
expectedCollectionIDs: []string{"root"},
|
||||
expectedCollectionPaths: expectedPathAsSlice(
|
||||
suite.T(),
|
||||
tenant,
|
||||
site,
|
||||
testBaseDrivePath,
|
||||
),
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
scope: anyFolder,
|
||||
expect: assert.NoError,
|
||||
expectedCollectionIDs: []string{odConsts.RootID},
|
||||
expectedCollectionPaths: []string{ep.String()},
|
||||
expectedItemCount: 1,
|
||||
expectedFileCount: 1,
|
||||
expectedContainerCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
@ -111,12 +113,10 @@ func (suite *SharePointLibrariesUnitSuite) TestUpdateCollections() {
|
||||
)
|
||||
|
||||
c := onedrive.NewCollections(
|
||||
graph.NewNoTimeoutHTTPWrapper(),
|
||||
tenant,
|
||||
&libraryBackupHandler{api.Drives{}},
|
||||
tenantID,
|
||||
site,
|
||||
onedrive.SharePointSource,
|
||||
testFolderMatcher{test.scope},
|
||||
&MockGraphService{},
|
||||
nil,
|
||||
control.Defaults())
|
||||
|
||||
@ -203,13 +203,16 @@ func (suite *SharePointPagesSuite) TestCollectPages() {
|
||||
a = tester.NewM365Account(t)
|
||||
)
|
||||
|
||||
account, err := a.M365Config()
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
ac, err := api.NewClient(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
col, err := collectPages(
|
||||
ctx,
|
||||
account,
|
||||
nil,
|
||||
creds,
|
||||
ac,
|
||||
mock.NewProvider(siteID, siteID),
|
||||
&MockGraphService{},
|
||||
control.Defaults(),
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
)
|
||||
@ -56,16 +55,3 @@ func createTestService(t *testing.T, credentials account.M365Config) *graph.Serv
|
||||
|
||||
return graph.NewService(adapter)
|
||||
}
|
||||
|
||||
func expectedPathAsSlice(t *testing.T, tenant, user string, rest ...string) []string {
|
||||
res := make([]string, 0, len(rest))
|
||||
|
||||
for _, r := range rest {
|
||||
p, err := onedrive.GetCanonicalPath(r, tenant, user, onedrive.SharePointSource)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
res = append(res, p.String())
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
275
src/internal/connector/sharepoint/library_handler.go
Normal file
275
src/internal/connector/sharepoint/library_handler.go
Normal file
@ -0,0 +1,275 @@
|
||||
package sharepoint
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
var _ onedrive.BackupHandler = &libraryBackupHandler{}
|
||||
|
||||
type libraryBackupHandler struct {
|
||||
ac api.Drives
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) Get(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
headers map[string]string,
|
||||
) (*http.Response, error) {
|
||||
return h.ac.Get(ctx, url, headers)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) PathPrefix(
|
||||
tenantID, resourceOwner, driveID string,
|
||||
) (path.Path, error) {
|
||||
return path.Build(
|
||||
tenantID,
|
||||
resourceOwner,
|
||||
path.SharePointService,
|
||||
path.LibrariesCategory,
|
||||
false,
|
||||
odConsts.DrivesPathDir,
|
||||
driveID,
|
||||
odConsts.RootPathDir)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) CanonicalPath(
|
||||
folders *path.Builder,
|
||||
tenantID, resourceOwner string,
|
||||
) (path.Path, error) {
|
||||
return folders.ToDataLayerSharePointPath(tenantID, resourceOwner, path.LibrariesCategory, false)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) ServiceCat() (path.ServiceType, path.CategoryType) {
|
||||
return path.SharePointService, path.LibrariesCategory
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) NewDrivePager(
|
||||
resourceOwner string,
|
||||
fields []string,
|
||||
) api.DrivePager {
|
||||
return h.ac.NewSiteDrivePager(resourceOwner, fields)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) NewItemPager(
|
||||
driveID, link string,
|
||||
fields []string,
|
||||
) api.DriveItemEnumerator {
|
||||
return h.ac.NewItemPager(driveID, link, fields)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
size int64,
|
||||
parentPath *path.Builder,
|
||||
) details.ItemInfo {
|
||||
return augmentItemInfo(dii, item, size, parentPath)
|
||||
}
|
||||
|
||||
// constructWebURL is a helper function for recreating the webURL
|
||||
// for the originating SharePoint site. Uses the additionalData map
|
||||
// from a models.DriveItemable that possesses a downloadURL within the map.
|
||||
// Returns "" if the map is nil or key is not present.
|
||||
func constructWebURL(adtl map[string]any) string {
|
||||
var (
|
||||
desiredKey = "@microsoft.graph.downloadUrl"
|
||||
sep = `/_layouts`
|
||||
url string
|
||||
)
|
||||
|
||||
if adtl == nil {
|
||||
return url
|
||||
}
|
||||
|
||||
r := adtl[desiredKey]
|
||||
point, ok := r.(*string)
|
||||
|
||||
if !ok {
|
||||
return url
|
||||
}
|
||||
|
||||
value := ptr.Val(point)
|
||||
if len(value) == 0 {
|
||||
return url
|
||||
}
|
||||
|
||||
temp := strings.Split(value, sep)
|
||||
url = temp[0]
|
||||
|
||||
return url
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) FormatDisplayPath(
|
||||
driveName string,
|
||||
pb *path.Builder,
|
||||
) string {
|
||||
return "/" + driveName + "/" + pb.String()
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) NewLocationIDer(
|
||||
driveID string,
|
||||
elems ...string,
|
||||
) details.LocationIDer {
|
||||
return details.NewSharePointLocationIDer(driveID, elems...)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) GetItemPermission(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.PermissionCollectionResponseable, error) {
|
||||
return h.ac.GetItemPermission(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
func (h libraryBackupHandler) GetItem(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetItem(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Restore
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var _ onedrive.RestoreHandler = &libraryRestoreHandler{}
|
||||
|
||||
type libraryRestoreHandler struct {
|
||||
ac api.Drives
|
||||
}
|
||||
|
||||
func NewRestoreHandler(ac api.Client) *libraryRestoreHandler {
|
||||
return &libraryRestoreHandler{ac.Drives()}
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) AugmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
size int64,
|
||||
parentPath *path.Builder,
|
||||
) details.ItemInfo {
|
||||
return augmentItemInfo(dii, item, size, parentPath)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) NewItemContentUpload(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.UploadSessionable, error) {
|
||||
return h.ac.NewItemContentUpload(ctx, driveID, itemID)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) DeleteItemPermission(
|
||||
ctx context.Context,
|
||||
driveID, itemID, permissionID string,
|
||||
) error {
|
||||
return h.ac.DeleteItemPermission(ctx, driveID, itemID, permissionID)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) PostItemPermissionUpdate(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
body *drives.ItemItemsItemInvitePostRequestBody,
|
||||
) (drives.ItemItemsItemInviteResponseable, error) {
|
||||
return h.ac.PostItemPermissionUpdate(ctx, driveID, itemID, body)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) PostItemInContainer(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID string,
|
||||
newItem models.DriveItemable,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.PostItemInContainer(ctx, driveID, parentFolderID, newItem)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) GetFolderByName(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID, folderName string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetFolderByName(ctx, driveID, parentFolderID, folderName)
|
||||
}
|
||||
|
||||
func (h libraryRestoreHandler) GetRootFolder(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
) (models.DriveItemable, error) {
|
||||
return h.ac.GetRootFolder(ctx, driveID)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Common
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func augmentItemInfo(
|
||||
dii details.ItemInfo,
|
||||
item models.DriveItemable,
|
||||
size int64,
|
||||
parentPath *path.Builder,
|
||||
) details.ItemInfo {
|
||||
var driveName, siteID, driveID, weburl, creatorEmail string
|
||||
|
||||
// TODO: we rely on this info for details/restore lookups,
|
||||
// so if it's nil we have an issue, and will need an alternative
|
||||
// way to source the data.
|
||||
|
||||
if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil {
|
||||
// User is sometimes not available when created via some
|
||||
// external applications (like backup/restore solutions)
|
||||
additionalData := item.GetCreatedBy().GetUser().GetAdditionalData()
|
||||
|
||||
ed, ok := additionalData["email"]
|
||||
if !ok {
|
||||
ed = additionalData["displayName"]
|
||||
}
|
||||
|
||||
if ed != nil {
|
||||
creatorEmail = *ed.(*string)
|
||||
}
|
||||
}
|
||||
|
||||
gsi := item.GetSharepointIds()
|
||||
if gsi != nil {
|
||||
siteID = ptr.Val(gsi.GetSiteId())
|
||||
weburl = ptr.Val(gsi.GetSiteUrl())
|
||||
|
||||
if len(weburl) == 0 {
|
||||
weburl = constructWebURL(item.GetAdditionalData())
|
||||
}
|
||||
}
|
||||
|
||||
if item.GetParentReference() != nil {
|
||||
driveID = ptr.Val(item.GetParentReference().GetDriveId())
|
||||
driveName = strings.TrimSpace(ptr.Val(item.GetParentReference().GetName()))
|
||||
}
|
||||
|
||||
var pps string
|
||||
if parentPath != nil {
|
||||
pps = parentPath.String()
|
||||
}
|
||||
|
||||
dii.SharePoint = &details.SharePointInfo{
|
||||
Created: ptr.Val(item.GetCreatedDateTime()),
|
||||
DriveID: driveID,
|
||||
DriveName: driveName,
|
||||
ItemName: ptr.Val(item.GetName()),
|
||||
ItemType: details.SharePointLibrary,
|
||||
Modified: ptr.Val(item.GetLastModifiedDateTime()),
|
||||
Owner: creatorEmail,
|
||||
ParentPath: pps,
|
||||
SiteID: siteID,
|
||||
Size: size,
|
||||
WebURL: weburl,
|
||||
}
|
||||
|
||||
return dii
|
||||
}
|
||||
58
src/internal/connector/sharepoint/library_handler_test.go
Normal file
58
src/internal/connector/sharepoint/library_handler_test.go
Normal file
@ -0,0 +1,58 @@
|
||||
package sharepoint
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
type LibraryBackupHandlerUnitSuite struct {
|
||||
tester.Suite
|
||||
}
|
||||
|
||||
func TestLibraryBackupHandlerUnitSuite(t *testing.T) {
|
||||
suite.Run(t, &LibraryBackupHandlerUnitSuite{Suite: tester.NewUnitSuite(t)})
|
||||
}
|
||||
|
||||
func (suite *LibraryBackupHandlerUnitSuite) TestCanonicalPath() {
|
||||
tenantID, resourceOwner := "tenant", "resourceOwner"
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
expect string
|
||||
expectErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "sharepoint",
|
||||
expect: "tenant/sharepoint/resourceOwner/libraries/prefix",
|
||||
expectErr: assert.NoError,
|
||||
},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
t := suite.T()
|
||||
h := libraryBackupHandler{}
|
||||
p := path.Builder{}.Append("prefix")
|
||||
|
||||
result, err := h.CanonicalPath(p, tenantID, resourceOwner)
|
||||
test.expectErr(t, err, clues.ToCore(err))
|
||||
|
||||
if result != nil {
|
||||
assert.Equal(t, test.expect, result.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *LibraryBackupHandlerUnitSuite) TestServiceCat() {
|
||||
t := suite.T()
|
||||
|
||||
s, c := libraryBackupHandler{}.ServiceCat()
|
||||
assert.Equal(t, path.SharePointService, s)
|
||||
assert.Equal(t, path.LibrariesCategory, c)
|
||||
}
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime/trace"
|
||||
"sync"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
@ -18,12 +17,12 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/support"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/diagnostics"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
)
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
@ -43,13 +42,11 @@ import (
|
||||
func RestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
creds account.M365Config,
|
||||
service graph.Servicer,
|
||||
ac api.Client,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
dcs []data.RestoreCollection,
|
||||
deets *details.Builder,
|
||||
pool *sync.Pool,
|
||||
errs *fault.Bus,
|
||||
) (*support.ConnectorOperationStatus, error) {
|
||||
var (
|
||||
@ -83,22 +80,19 @@ func RestoreCollections(
|
||||
case path.LibrariesCategory:
|
||||
metrics, err = onedrive.RestoreCollection(
|
||||
ictx,
|
||||
creds,
|
||||
libraryRestoreHandler{ac.Drives()},
|
||||
backupVersion,
|
||||
service,
|
||||
dc,
|
||||
caches,
|
||||
onedrive.SharePointSource,
|
||||
dest.ContainerName,
|
||||
deets,
|
||||
opts.RestorePermissions,
|
||||
pool,
|
||||
errs)
|
||||
|
||||
case path.ListsCategory:
|
||||
metrics, err = RestoreListCollection(
|
||||
ictx,
|
||||
service,
|
||||
ac.Stable,
|
||||
dc,
|
||||
dest.ContainerName,
|
||||
deets,
|
||||
@ -107,7 +101,7 @@ func RestoreCollections(
|
||||
case path.PagesCategory:
|
||||
metrics, err = RestorePageCollection(
|
||||
ictx,
|
||||
creds,
|
||||
ac.Stable,
|
||||
dc,
|
||||
dest.ContainerName,
|
||||
deets,
|
||||
@ -292,7 +286,7 @@ func RestoreListCollection(
|
||||
// - the context cancellation station. True iff context is canceled.
|
||||
func RestorePageCollection(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
gs graph.Servicer,
|
||||
dc data.RestoreCollection,
|
||||
restoreContainerName string,
|
||||
deets *details.Builder,
|
||||
@ -309,17 +303,9 @@ func RestorePageCollection(
|
||||
|
||||
defer end()
|
||||
|
||||
adpt, err := graph.CreateAdapter(
|
||||
creds.AzureTenantID,
|
||||
creds.AzureClientID,
|
||||
creds.AzureClientSecret)
|
||||
if err != nil {
|
||||
return metrics, clues.Wrap(err, "constructing graph client")
|
||||
}
|
||||
|
||||
var (
|
||||
el = errs.Local()
|
||||
service = betaAPI.NewBetaService(adpt)
|
||||
service = betaAPI.NewBetaService(gs.Adapter())
|
||||
items = dc.Items(ctx, errs)
|
||||
)
|
||||
|
||||
|
||||
@ -70,19 +70,24 @@ type BackupCollection interface {
|
||||
// RestoreCollection is an extension of Collection that is used during restores.
|
||||
type RestoreCollection interface {
|
||||
Collection
|
||||
FetchItemByNamer
|
||||
}
|
||||
|
||||
type FetchItemByNamer interface {
|
||||
// Fetch retrieves an item with the given name from the Collection if it
|
||||
// exists. Items retrieved with Fetch may still appear in the channel returned
|
||||
// by Items().
|
||||
Fetch(ctx context.Context, name string) (Stream, error)
|
||||
FetchItemByName(ctx context.Context, name string) (Stream, error)
|
||||
}
|
||||
|
||||
// NotFoundRestoreCollection is a wrapper for a Collection that returns
|
||||
// NoFetchRestoreCollection is a wrapper for a Collection that returns
|
||||
// ErrNotFound for all Fetch calls.
|
||||
type NotFoundRestoreCollection struct {
|
||||
type NoFetchRestoreCollection struct {
|
||||
Collection
|
||||
FetchItemByNamer
|
||||
}
|
||||
|
||||
func (c NotFoundRestoreCollection) Fetch(context.Context, string) (Stream, error) {
|
||||
func (c NoFetchRestoreCollection) FetchItemByName(context.Context, string) (Stream, error) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
|
||||
@ -20,38 +20,48 @@ var (
|
||||
|
||||
type kopiaDataCollection struct {
|
||||
path path.Path
|
||||
streams []data.Stream
|
||||
dir fs.Directory
|
||||
items []string
|
||||
counter ByteCounter
|
||||
expectedVersion uint32
|
||||
}
|
||||
|
||||
func (kdc *kopiaDataCollection) addStream(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
) error {
|
||||
s, err := kdc.Fetch(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kdc.streams = append(kdc.streams, s)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kdc *kopiaDataCollection) Items(
|
||||
ctx context.Context,
|
||||
_ *fault.Bus, // unused, just matching the interface
|
||||
errs *fault.Bus,
|
||||
) <-chan data.Stream {
|
||||
res := make(chan data.Stream)
|
||||
var (
|
||||
res = make(chan data.Stream)
|
||||
el = errs.Local()
|
||||
loadCount = 0
|
||||
)
|
||||
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for _, s := range kdc.streams {
|
||||
for _, item := range kdc.items {
|
||||
s, err := kdc.FetchItemByName(ctx, item)
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "fetching item").
|
||||
WithClues(ctx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
loadCount++
|
||||
if loadCount%1000 == 0 {
|
||||
logger.Ctx(ctx).Infow(
|
||||
"loading items from kopia",
|
||||
"loaded_items", loadCount)
|
||||
}
|
||||
|
||||
res <- s
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
"done loading items from kopia",
|
||||
"loaded_items", loadCount)
|
||||
}()
|
||||
|
||||
return res
|
||||
@ -64,7 +74,7 @@ func (kdc kopiaDataCollection) FullPath() path.Path {
|
||||
// Fetch returns the file with the given name from the collection as a
|
||||
// data.Stream. Returns a data.ErrNotFound error if the file isn't in the
|
||||
// collection.
|
||||
func (kdc kopiaDataCollection) Fetch(
|
||||
func (kdc kopiaDataCollection) FetchItemByName(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
) (data.Stream, error) {
|
||||
|
||||
@ -165,15 +165,15 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
{
|
||||
name: "SingleStream",
|
||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||
uuids[0]: assert.NoError,
|
||||
uuids[0]: nil,
|
||||
},
|
||||
expectedLoaded: []loadedData{files[0]},
|
||||
},
|
||||
{
|
||||
name: "MultipleStreams",
|
||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||
uuids[0]: assert.NoError,
|
||||
uuids[1]: assert.NoError,
|
||||
uuids[0]: nil,
|
||||
uuids[1]: nil,
|
||||
},
|
||||
expectedLoaded: files,
|
||||
},
|
||||
@ -181,7 +181,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
name: "Some Not Found Errors",
|
||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||
fileLookupErrName: assert.Error,
|
||||
uuids[0]: assert.NoError,
|
||||
uuids[0]: nil,
|
||||
},
|
||||
expectedLoaded: []loadedData{files[0]},
|
||||
},
|
||||
@ -189,7 +189,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
name: "Some Not A File Errors",
|
||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||
notFileErrName: assert.Error,
|
||||
uuids[0]: assert.NoError,
|
||||
uuids[0]: nil,
|
||||
},
|
||||
expectedLoaded: []loadedData{files[0]},
|
||||
},
|
||||
@ -197,7 +197,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
name: "Some Open Errors",
|
||||
uuidsAndErrors: map[string]assert.ErrorAssertionFunc{
|
||||
fileOpenErrName: assert.Error,
|
||||
uuids[0]: assert.NoError,
|
||||
uuids[0]: nil,
|
||||
},
|
||||
expectedLoaded: []loadedData{files[0]},
|
||||
},
|
||||
@ -217,20 +217,27 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
items := []string{}
|
||||
errs := []assert.ErrorAssertionFunc{}
|
||||
|
||||
for uuid, err := range test.uuidsAndErrors {
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
items = append(items, uuid)
|
||||
}
|
||||
|
||||
c := kopiaDataCollection{
|
||||
dir: getLayout(),
|
||||
path: nil,
|
||||
items: items,
|
||||
expectedVersion: serializationVersion,
|
||||
}
|
||||
|
||||
for uuid, expectErr := range test.uuidsAndErrors {
|
||||
err := c.addStream(ctx, uuid)
|
||||
expectErr(t, err, "adding stream to collection", clues.ToCore(err))
|
||||
}
|
||||
|
||||
var (
|
||||
found []loadedData
|
||||
bus = fault.New(true)
|
||||
bus = fault.New(false)
|
||||
)
|
||||
|
||||
for returnedStream := range c.Items(ctx, bus) {
|
||||
@ -256,7 +263,12 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
f.size = ss.Size()
|
||||
}
|
||||
|
||||
assert.Empty(t, bus.Recovered(), "expected no recoverable errors")
|
||||
// We expect the items to be fetched in the order they are
|
||||
// in the struct or the errors will not line up
|
||||
for i, err := range bus.Recovered() {
|
||||
assert.True(t, errs[i](t, err), "expected error", clues.ToCore(err))
|
||||
}
|
||||
|
||||
assert.NoError(t, bus.Failure(), "expected no hard failures")
|
||||
|
||||
assert.ElementsMatch(t, test.expectedLoaded, found, "loaded items")
|
||||
@ -264,7 +276,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestReturnsStreams() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *KopiaDataCollectionUnitSuite) TestFetch() {
|
||||
func (suite *KopiaDataCollectionUnitSuite) TestFetchItemByName() {
|
||||
var (
|
||||
tenant = "a-tenant"
|
||||
user = "a-user"
|
||||
@ -381,7 +393,7 @@ func (suite *KopiaDataCollectionUnitSuite) TestFetch() {
|
||||
expectedVersion: serializationVersion,
|
||||
}
|
||||
|
||||
s, err := col.Fetch(ctx, test.inputName)
|
||||
s, err := col.FetchItemByName(ctx, test.inputName)
|
||||
|
||||
test.lookupErr(t, err)
|
||||
|
||||
|
||||
@ -86,7 +86,7 @@ func (mc *mergeCollection) Items(
|
||||
// match found or the first error that is not data.ErrNotFound. If multiple
|
||||
// collections have the requested item, the instance in the collection with the
|
||||
// lexicographically smallest storage path is returned.
|
||||
func (mc *mergeCollection) Fetch(
|
||||
func (mc *mergeCollection) FetchItemByName(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
) (data.Stream, error) {
|
||||
@ -99,7 +99,7 @@ func (mc *mergeCollection) Fetch(
|
||||
|
||||
logger.Ctx(ictx).Debug("looking for item in merged collection")
|
||||
|
||||
s, err := c.Fetch(ictx, name)
|
||||
s, err := c.FetchItemByName(ictx, name)
|
||||
if err == nil {
|
||||
return s, nil
|
||||
} else if err != nil && !errors.Is(err, data.ErrNotFound) {
|
||||
|
||||
@ -76,8 +76,8 @@ func (suite *MergeCollectionUnitSuite) TestItems() {
|
||||
|
||||
// Not testing fetch here so safe to use this wrapper.
|
||||
cols := []data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{Collection: c1},
|
||||
data.NotFoundRestoreCollection{Collection: c2},
|
||||
data.NoFetchRestoreCollection{Collection: c1},
|
||||
data.NoFetchRestoreCollection{Collection: c2},
|
||||
}
|
||||
|
||||
dc := &mergeCollection{fullPath: pth}
|
||||
@ -123,7 +123,7 @@ func (suite *MergeCollectionUnitSuite) TestAddCollection_DifferentPathFails() {
|
||||
assert.Error(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *MergeCollectionUnitSuite) TestFetch() {
|
||||
func (suite *MergeCollectionUnitSuite) TestFetchItemByName() {
|
||||
var (
|
||||
fileData1 = []byte("abcdefghijklmnopqrstuvwxyz")
|
||||
fileData2 = []byte("zyxwvutsrqponmlkjihgfedcba")
|
||||
@ -275,7 +275,7 @@ func (suite *MergeCollectionUnitSuite) TestFetch() {
|
||||
require.NoError(t, err, "adding collection", clues.ToCore(err))
|
||||
}
|
||||
|
||||
s, err := dc.Fetch(ctx, test.fileName)
|
||||
s, err := dc.FetchItemByName(ctx, test.fileName)
|
||||
test.expectError(t, err, clues.ToCore(err))
|
||||
|
||||
if err != nil {
|
||||
|
||||
@ -392,9 +392,8 @@ func loadDirsAndItems(
|
||||
bus *fault.Bus,
|
||||
) ([]data.RestoreCollection, error) {
|
||||
var (
|
||||
el = bus.Local()
|
||||
res = make([]data.RestoreCollection, 0, len(toLoad))
|
||||
loadCount = 0
|
||||
el = bus.Local()
|
||||
res = make([]data.RestoreCollection, 0, len(toLoad))
|
||||
)
|
||||
|
||||
for _, col := range toLoad {
|
||||
@ -426,6 +425,7 @@ func loadDirsAndItems(
|
||||
dc := &kopiaDataCollection{
|
||||
path: col.restorePath,
|
||||
dir: dir,
|
||||
items: dirItems.items,
|
||||
counter: bcounter,
|
||||
expectedVersion: serializationVersion,
|
||||
}
|
||||
@ -437,35 +437,9 @@ func loadDirsAndItems(
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
for _, item := range dirItems.items {
|
||||
if el.Failure() != nil {
|
||||
return nil, el.Failure()
|
||||
}
|
||||
|
||||
err := dc.addStream(ictx, item)
|
||||
if err != nil {
|
||||
el.AddRecoverable(clues.Wrap(err, "loading item").
|
||||
WithClues(ictx).
|
||||
Label(fault.LabelForceNoBackupCreation))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
loadCount++
|
||||
if loadCount%1000 == 0 {
|
||||
logger.Ctx(ctx).Infow(
|
||||
"loading items from kopia",
|
||||
"loaded_items", loadCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Infow(
|
||||
"done loading items from kopia",
|
||||
"loaded_items", loadCount)
|
||||
|
||||
return res, el.Failure()
|
||||
}
|
||||
|
||||
|
||||
@ -843,16 +843,28 @@ func (suite *KopiaIntegrationSuite) TestBackupCollections_ReaderError() {
|
||||
|
||||
ic := i64counter{}
|
||||
|
||||
_, err = suite.w.ProduceRestoreCollections(
|
||||
dcs, err := suite.w.ProduceRestoreCollections(
|
||||
suite.ctx,
|
||||
string(stats.SnapshotID),
|
||||
toRestorePaths(t, failedPath),
|
||||
&ic,
|
||||
fault.New(true))
|
||||
assert.NoError(t, err, "error producing restore collections")
|
||||
|
||||
require.Len(t, dcs, 1, "number of restore collections")
|
||||
|
||||
errs := fault.New(true)
|
||||
items := dcs[0].Items(suite.ctx, errs)
|
||||
|
||||
// Get all the items from channel
|
||||
//nolint:revive
|
||||
for range items {
|
||||
}
|
||||
|
||||
// Files that had an error shouldn't make a dir entry in kopia. If they do we
|
||||
// may run into kopia-assisted incrementals issues because only mod time and
|
||||
// not file size is checked for StreamingFiles.
|
||||
assert.ErrorIs(t, err, data.ErrNotFound, "errored file is restorable", clues.ToCore(err))
|
||||
assert.ErrorIs(t, errs.Failure(), data.ErrNotFound, "errored file is restorable", clues.ToCore(err))
|
||||
}
|
||||
|
||||
type backedupFile struct {
|
||||
@ -1223,13 +1235,25 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestBackupExcludeItem() {
|
||||
|
||||
ic := i64counter{}
|
||||
|
||||
_, err = suite.w.ProduceRestoreCollections(
|
||||
dcs, err := suite.w.ProduceRestoreCollections(
|
||||
suite.ctx,
|
||||
string(stats.SnapshotID),
|
||||
toRestorePaths(t, suite.files[suite.testPath1.String()][0].itemPath),
|
||||
&ic,
|
||||
fault.New(true))
|
||||
test.restoreCheck(t, err, clues.ToCore(err))
|
||||
|
||||
assert.NoError(t, err, "errors producing collection", clues.ToCore(err))
|
||||
require.Len(t, dcs, 1, "unexpected number of restore collections")
|
||||
|
||||
errs := fault.New(true)
|
||||
items := dcs[0].Items(suite.ctx, errs)
|
||||
|
||||
// Get all the items from channel
|
||||
//nolint:revive
|
||||
for range items {
|
||||
}
|
||||
|
||||
test.restoreCheck(t, errs.Failure(), errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1248,18 +1272,20 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
||||
// suite's map of files. Files that are not in the suite's map are assumed to
|
||||
// generate errors and not be in the output.
|
||||
table := []struct {
|
||||
name string
|
||||
inputPaths []path.Path
|
||||
expectedCollections int
|
||||
expectedErr assert.ErrorAssertionFunc
|
||||
name string
|
||||
inputPaths []path.Path
|
||||
expectedCollections int
|
||||
expectedErr assert.ErrorAssertionFunc
|
||||
expectedCollectionErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "SingleItem",
|
||||
inputPaths: []path.Path{
|
||||
suite.files[suite.testPath1.String()][0].itemPath,
|
||||
},
|
||||
expectedCollections: 1,
|
||||
expectedErr: assert.NoError,
|
||||
expectedCollections: 1,
|
||||
expectedErr: assert.NoError,
|
||||
expectedCollectionErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "MultipleItemsSameCollection",
|
||||
@ -1267,8 +1293,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
||||
suite.files[suite.testPath1.String()][0].itemPath,
|
||||
suite.files[suite.testPath1.String()][1].itemPath,
|
||||
},
|
||||
expectedCollections: 1,
|
||||
expectedErr: assert.NoError,
|
||||
expectedCollections: 1,
|
||||
expectedErr: assert.NoError,
|
||||
expectedCollectionErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "MultipleItemsDifferentCollections",
|
||||
@ -1276,8 +1303,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
||||
suite.files[suite.testPath1.String()][0].itemPath,
|
||||
suite.files[suite.testPath2.String()][0].itemPath,
|
||||
},
|
||||
expectedCollections: 2,
|
||||
expectedErr: assert.NoError,
|
||||
expectedCollections: 2,
|
||||
expectedErr: assert.NoError,
|
||||
expectedCollectionErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "TargetNotAFile",
|
||||
@ -1286,8 +1314,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
||||
suite.testPath1,
|
||||
suite.files[suite.testPath2.String()][0].itemPath,
|
||||
},
|
||||
expectedCollections: 0,
|
||||
expectedErr: assert.Error,
|
||||
expectedCollections: 0,
|
||||
expectedErr: assert.Error,
|
||||
expectedCollectionErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "NonExistentFile",
|
||||
@ -1296,8 +1325,9 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
||||
doesntExist,
|
||||
suite.files[suite.testPath2.String()][0].itemPath,
|
||||
},
|
||||
expectedCollections: 0,
|
||||
expectedErr: assert.Error,
|
||||
expectedCollections: 0,
|
||||
expectedErr: assert.NoError,
|
||||
expectedCollectionErr: assert.Error, // folder for doesntExist does not exist
|
||||
},
|
||||
}
|
||||
|
||||
@ -1330,12 +1360,28 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections() {
|
||||
toRestorePaths(t, test.inputPaths...),
|
||||
&ic,
|
||||
fault.New(true))
|
||||
test.expectedErr(t, err, clues.ToCore(err))
|
||||
test.expectedCollectionErr(t, err, clues.ToCore(err), "producing collections")
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
errs := fault.New(true)
|
||||
|
||||
for _, dc := range result {
|
||||
// Get all the items from channel
|
||||
items := dc.Items(suite.ctx, errs)
|
||||
//nolint:revive
|
||||
for range items {
|
||||
}
|
||||
}
|
||||
|
||||
test.expectedErr(t, errs.Failure(), errs.Failure(), "getting items")
|
||||
|
||||
if errs.Failure() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Len(t, result, test.expectedCollections)
|
||||
assert.Less(t, int64(0), ic.i)
|
||||
testForFiles(t, ctx, expected, result)
|
||||
@ -1456,7 +1502,6 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Path
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
assert.Len(t, result, test.expectedCollections)
|
||||
assert.Less(t, int64(0), ic.i)
|
||||
testForFiles(t, ctx, expected, result)
|
||||
})
|
||||
}
|
||||
@ -1465,7 +1510,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Path
|
||||
// TestProduceRestoreCollections_Fetch tests that the Fetch function still works
|
||||
// properly even with different Restore and Storage paths and items from
|
||||
// different kopia directories.
|
||||
func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Fetch() {
|
||||
func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_FetchItemByName() {
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
@ -1507,7 +1552,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Fetc
|
||||
// Item from first kopia directory.
|
||||
f := suite.files[suite.testPath1.String()][0]
|
||||
|
||||
item, err := result[0].Fetch(ctx, f.itemPath.Item())
|
||||
item, err := result[0].FetchItemByName(ctx, f.itemPath.Item())
|
||||
require.NoError(t, err, "fetching file", clues.ToCore(err))
|
||||
|
||||
r := item.ToReader()
|
||||
@ -1520,7 +1565,7 @@ func (suite *KopiaSimpleRepoIntegrationSuite) TestProduceRestoreCollections_Fetc
|
||||
// Item from second kopia directory.
|
||||
f = suite.files[suite.testPath2.String()][0]
|
||||
|
||||
item, err = result[0].Fetch(ctx, f.itemPath.Item())
|
||||
item, err = result[0].FetchItemByName(ctx, f.itemPath.Item())
|
||||
require.NoError(t, err, "fetching file", clues.ToCore(err))
|
||||
|
||||
r = item.ToReader()
|
||||
|
||||
@ -324,7 +324,7 @@ func (op *BackupOperation) do(
|
||||
}
|
||||
}
|
||||
|
||||
cs, ssmb, err := produceBackupDataCollections(
|
||||
cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections(
|
||||
ctx,
|
||||
op.bp,
|
||||
op.ResourceOwner,
|
||||
@ -348,7 +348,7 @@ func (op *BackupOperation) do(
|
||||
cs,
|
||||
ssmb,
|
||||
backupID,
|
||||
op.incremental && canUseMetaData,
|
||||
op.incremental && canUseMetaData && canUsePreviousBackup,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "persisting collection backups")
|
||||
@ -406,7 +406,7 @@ func produceBackupDataCollections(
|
||||
lastBackupVersion int,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, error) {
|
||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error) {
|
||||
complete := observe.MessageWithCompletion(ctx, "Discovering items to backup")
|
||||
defer func() {
|
||||
complete <- struct{}{}
|
||||
|
||||
@ -30,6 +30,7 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||
odConsts "github.com/alcionai/corso/src/internal/connector/onedrive/consts"
|
||||
"github.com/alcionai/corso/src/internal/connector/onedrive/metadata"
|
||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
evmock "github.com/alcionai/corso/src/internal/events/mock"
|
||||
@ -347,7 +348,6 @@ func generateContainerOfItems(
|
||||
ctx context.Context, //revive:disable-line:context-as-argument
|
||||
gc *connector.GraphConnector,
|
||||
service path.ServiceType,
|
||||
acct account.Account,
|
||||
cat path.CategoryType,
|
||||
sel selectors.Selector,
|
||||
tenantID, resourceOwner, driveID, destFldr string,
|
||||
@ -397,7 +397,6 @@ func generateContainerOfItems(
|
||||
deets, err := gc.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
acct,
|
||||
sel,
|
||||
dest,
|
||||
opts,
|
||||
@ -468,7 +467,7 @@ func buildCollections(
|
||||
mc.Data[i] = c.items[i].data
|
||||
}
|
||||
|
||||
collections = append(collections, data.NotFoundRestoreCollection{Collection: mc})
|
||||
collections = append(collections, data.NoFetchRestoreCollection{Collection: mc})
|
||||
}
|
||||
|
||||
return collections
|
||||
@ -513,6 +512,7 @@ func toDataLayerPath(
|
||||
type BackupOpIntegrationSuite struct {
|
||||
tester.Suite
|
||||
user, site string
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func TestBackupOpIntegrationSuite(t *testing.T) {
|
||||
@ -524,8 +524,18 @@ func TestBackupOpIntegrationSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
func (suite *BackupOpIntegrationSuite) SetupSuite() {
|
||||
suite.user = tester.M365UserID(suite.T())
|
||||
suite.site = tester.M365SiteID(suite.T())
|
||||
t := suite.T()
|
||||
|
||||
suite.user = tester.M365UserID(t)
|
||||
suite.site = tester.M365SiteID(t)
|
||||
|
||||
a := tester.NewM365Account(t)
|
||||
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.ac, err = api.NewClient(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
}
|
||||
|
||||
func (suite *BackupOpIntegrationSuite) TestNewBackupOperation() {
|
||||
@ -847,7 +857,6 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
ctx,
|
||||
gc,
|
||||
service,
|
||||
acct,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
|
||||
m365.AzureTenantID, uidn.ID(), "", destName,
|
||||
@ -1029,7 +1038,6 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
|
||||
ctx,
|
||||
gc,
|
||||
service,
|
||||
acct,
|
||||
category,
|
||||
selectors.NewExchangeRestore([]string{uidn.ID()}).Selector,
|
||||
m365.AzureTenantID, suite.user, "", container3,
|
||||
@ -1316,9 +1324,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalOneDrive() {
|
||||
gtdi := func(
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
) string {
|
||||
d, err := api.GetUsersDrive(ctx, gs, suite.user)
|
||||
d, err := suite.ac.Users().GetDefaultDrive(ctx, suite.user)
|
||||
if err != nil {
|
||||
err = graph.Wrap(ctx, err, "retrieving default user drive").
|
||||
With("user", suite.user)
|
||||
@ -1332,6 +1339,10 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalOneDrive() {
|
||||
return id
|
||||
}
|
||||
|
||||
grh := func(ac api.Client) onedrive.RestoreHandler {
|
||||
return onedrive.NewRestoreHandler(ac)
|
||||
}
|
||||
|
||||
runDriveIncrementalTest(
|
||||
suite,
|
||||
suite.user,
|
||||
@ -1341,6 +1352,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalOneDrive() {
|
||||
path.FilesCategory,
|
||||
ic,
|
||||
gtdi,
|
||||
grh,
|
||||
false)
|
||||
}
|
||||
|
||||
@ -1355,9 +1367,8 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalSharePoint() {
|
||||
gtdi := func(
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
) string {
|
||||
d, err := api.GetSitesDefaultDrive(ctx, gs, suite.site)
|
||||
d, err := suite.ac.Sites().GetDefaultDrive(ctx, suite.site)
|
||||
if err != nil {
|
||||
err = graph.Wrap(ctx, err, "retrieving default site drive").
|
||||
With("site", suite.site)
|
||||
@ -1371,6 +1382,10 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalSharePoint() {
|
||||
return id
|
||||
}
|
||||
|
||||
grh := func(ac api.Client) onedrive.RestoreHandler {
|
||||
return sharepoint.NewRestoreHandler(ac)
|
||||
}
|
||||
|
||||
runDriveIncrementalTest(
|
||||
suite,
|
||||
suite.site,
|
||||
@ -1380,6 +1395,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_incrementalSharePoint() {
|
||||
path.LibrariesCategory,
|
||||
ic,
|
||||
gtdi,
|
||||
grh,
|
||||
true)
|
||||
}
|
||||
|
||||
@ -1390,7 +1406,8 @@ func runDriveIncrementalTest(
|
||||
service path.ServiceType,
|
||||
category path.CategoryType,
|
||||
includeContainers func([]string) selectors.Selector,
|
||||
getTestDriveID func(*testing.T, context.Context, graph.Servicer) string,
|
||||
getTestDriveID func(*testing.T, context.Context) string,
|
||||
getRestoreHandler func(api.Client) onedrive.RestoreHandler,
|
||||
skipPermissionsTests bool,
|
||||
) {
|
||||
t := suite.T()
|
||||
@ -1429,12 +1446,14 @@ func runDriveIncrementalTest(
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
gc, sel := GCWithSelector(t, ctx, acct, resource, sel, nil, nil)
|
||||
ac := gc.AC.Drives()
|
||||
rh := getRestoreHandler(gc.AC)
|
||||
|
||||
roidn := inMock.NewProvider(sel.ID(), sel.Name())
|
||||
|
||||
var (
|
||||
atid = creds.AzureTenantID
|
||||
driveID = getTestDriveID(t, ctx, gc.Service)
|
||||
driveID = getTestDriveID(t, ctx)
|
||||
fileDBF = func(id, timeStamp, subject, body string) []byte {
|
||||
return []byte(id + subject)
|
||||
}
|
||||
@ -1462,7 +1481,6 @@ func runDriveIncrementalTest(
|
||||
ctx,
|
||||
gc,
|
||||
service,
|
||||
acct,
|
||||
category,
|
||||
sel,
|
||||
atid, roidn.ID(), driveID, destName,
|
||||
@ -1488,7 +1506,7 @@ func runDriveIncrementalTest(
|
||||
// onedrive package `getFolder` function.
|
||||
itemURL := fmt.Sprintf("https://graph.microsoft.com/v1.0/drives/%s/root:/%s", driveID, destName)
|
||||
resp, err := drives.
|
||||
NewItemItemsDriveItemItemRequestBuilder(itemURL, gc.Service.Adapter()).
|
||||
NewItemItemsDriveItemItemRequestBuilder(itemURL, gc.AC.Stable.Adapter()).
|
||||
Get(ctx, nil)
|
||||
require.NoError(t, err, "getting drive folder ID", "folder name", destName, clues.ToCore(err))
|
||||
|
||||
@ -1543,9 +1561,8 @@ func runDriveIncrementalTest(
|
||||
driveItem := models.NewDriveItem()
|
||||
driveItem.SetName(&newFileName)
|
||||
driveItem.SetFile(models.NewFile())
|
||||
newFile, err = onedrive.CreateItem(
|
||||
newFile, err = ac.PostItemInContainer(
|
||||
ctx,
|
||||
gc.Service,
|
||||
driveID,
|
||||
targetContainer,
|
||||
driveItem)
|
||||
@ -1562,19 +1579,14 @@ func runDriveIncrementalTest(
|
||||
{
|
||||
name: "add permission to new file",
|
||||
updateFiles: func(t *testing.T) {
|
||||
driveItem := models.NewDriveItem()
|
||||
driveItem.SetName(&newFileName)
|
||||
driveItem.SetFile(models.NewFile())
|
||||
err = onedrive.UpdatePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
gc.Service,
|
||||
rh,
|
||||
driveID,
|
||||
*newFile.GetId(),
|
||||
ptr.Val(newFile.GetId()),
|
||||
[]metadata.Permission{writePerm},
|
||||
[]metadata.Permission{},
|
||||
permissionIDMappings,
|
||||
)
|
||||
permissionIDMappings)
|
||||
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
|
||||
// no expectedDeets: metadata isn't tracked
|
||||
},
|
||||
@ -1585,13 +1597,9 @@ func runDriveIncrementalTest(
|
||||
{
|
||||
name: "remove permission from new file",
|
||||
updateFiles: func(t *testing.T) {
|
||||
driveItem := models.NewDriveItem()
|
||||
driveItem.SetName(&newFileName)
|
||||
driveItem.SetFile(models.NewFile())
|
||||
err = onedrive.UpdatePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
gc.Service,
|
||||
rh,
|
||||
driveID,
|
||||
*newFile.GetId(),
|
||||
[]metadata.Permission{},
|
||||
@ -1608,13 +1616,9 @@ func runDriveIncrementalTest(
|
||||
name: "add permission to container",
|
||||
updateFiles: func(t *testing.T) {
|
||||
targetContainer := containerIDs[container1]
|
||||
driveItem := models.NewDriveItem()
|
||||
driveItem.SetName(&newFileName)
|
||||
driveItem.SetFile(models.NewFile())
|
||||
err = onedrive.UpdatePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
gc.Service,
|
||||
rh,
|
||||
driveID,
|
||||
targetContainer,
|
||||
[]metadata.Permission{writePerm},
|
||||
@ -1631,13 +1635,9 @@ func runDriveIncrementalTest(
|
||||
name: "remove permission from container",
|
||||
updateFiles: func(t *testing.T) {
|
||||
targetContainer := containerIDs[container1]
|
||||
driveItem := models.NewDriveItem()
|
||||
driveItem.SetName(&newFileName)
|
||||
driveItem.SetFile(models.NewFile())
|
||||
err = onedrive.UpdatePermissions(
|
||||
ctx,
|
||||
creds,
|
||||
gc.Service,
|
||||
rh,
|
||||
driveID,
|
||||
targetContainer,
|
||||
[]metadata.Permission{},
|
||||
@ -1653,9 +1653,8 @@ func runDriveIncrementalTest(
|
||||
{
|
||||
name: "update contents of a file",
|
||||
updateFiles: func(t *testing.T) {
|
||||
err := api.PutDriveItemContent(
|
||||
err := suite.ac.Drives().PutItemContent(
|
||||
ctx,
|
||||
gc.Service,
|
||||
driveID,
|
||||
ptr.Val(newFile.GetId()),
|
||||
[]byte("new content"))
|
||||
@ -1678,9 +1677,8 @@ func runDriveIncrementalTest(
|
||||
parentRef.SetId(&container)
|
||||
driveItem.SetParentReference(parentRef)
|
||||
|
||||
err := api.PatchDriveItem(
|
||||
err := suite.ac.Drives().PatchItem(
|
||||
ctx,
|
||||
gc.Service,
|
||||
driveID,
|
||||
ptr.Val(newFile.GetId()),
|
||||
driveItem)
|
||||
@ -1702,9 +1700,8 @@ func runDriveIncrementalTest(
|
||||
parentRef.SetId(&dest)
|
||||
driveItem.SetParentReference(parentRef)
|
||||
|
||||
err := api.PatchDriveItem(
|
||||
err := suite.ac.Drives().PatchItem(
|
||||
ctx,
|
||||
gc.Service,
|
||||
driveID,
|
||||
ptr.Val(newFile.GetId()),
|
||||
driveItem)
|
||||
@ -1723,9 +1720,8 @@ func runDriveIncrementalTest(
|
||||
{
|
||||
name: "delete file",
|
||||
updateFiles: func(t *testing.T) {
|
||||
err := api.DeleteDriveItem(
|
||||
err := suite.ac.Drives().DeleteItem(
|
||||
ctx,
|
||||
newDeleteServicer(t),
|
||||
driveID,
|
||||
ptr.Val(newFile.GetId()))
|
||||
require.NoErrorf(t, err, "deleting file %v", clues.ToCore(err))
|
||||
@ -1748,9 +1744,8 @@ func runDriveIncrementalTest(
|
||||
parentRef.SetId(&parent)
|
||||
driveItem.SetParentReference(parentRef)
|
||||
|
||||
err := api.PatchDriveItem(
|
||||
err := suite.ac.Drives().PatchItem(
|
||||
ctx,
|
||||
gc.Service,
|
||||
driveID,
|
||||
child,
|
||||
driveItem)
|
||||
@ -1777,9 +1772,8 @@ func runDriveIncrementalTest(
|
||||
parentRef.SetId(&parent)
|
||||
driveItem.SetParentReference(parentRef)
|
||||
|
||||
err := api.PatchDriveItem(
|
||||
err := suite.ac.Drives().PatchItem(
|
||||
ctx,
|
||||
gc.Service,
|
||||
driveID,
|
||||
child,
|
||||
driveItem)
|
||||
@ -1800,9 +1794,8 @@ func runDriveIncrementalTest(
|
||||
name: "delete a folder",
|
||||
updateFiles: func(t *testing.T) {
|
||||
container := containerIDs[containerRename]
|
||||
err := api.DeleteDriveItem(
|
||||
err := suite.ac.Drives().DeleteItem(
|
||||
ctx,
|
||||
newDeleteServicer(t),
|
||||
driveID,
|
||||
container)
|
||||
require.NoError(t, err, "deleting folder", clues.ToCore(err))
|
||||
@ -1821,7 +1814,6 @@ func runDriveIncrementalTest(
|
||||
ctx,
|
||||
gc,
|
||||
service,
|
||||
acct,
|
||||
category,
|
||||
sel,
|
||||
atid, roidn.ID(), driveID, container3,
|
||||
@ -1834,7 +1826,7 @@ func runDriveIncrementalTest(
|
||||
"https://graph.microsoft.com/v1.0/drives/%s/root:/%s",
|
||||
driveID,
|
||||
container3)
|
||||
resp, err := drives.NewItemItemsDriveItemItemRequestBuilder(itemURL, gc.Service.Adapter()).
|
||||
resp, err := drives.NewItemItemsDriveItemItemRequestBuilder(itemURL, gc.AC.Stable.Adapter()).
|
||||
Get(ctx, nil)
|
||||
require.NoError(t, err, "getting drive folder ID", "folder name", container3, clues.ToCore(err))
|
||||
|
||||
@ -1928,7 +1920,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() {
|
||||
connector.Users)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
userable, err := gc.Discovery.Users().GetByID(ctx, suite.user)
|
||||
userable, err := gc.AC.Users().GetByID(ctx, suite.user)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
uid := ptr.Val(userable.GetId())
|
||||
@ -2046,19 +2038,3 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_sharePoint() {
|
||||
runAndCheckBackup(t, ctx, &bo, mb, false)
|
||||
checkBackupIsInManifests(t, ctx, kw, &bo, sels, suite.site, path.LibrariesCategory)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func newDeleteServicer(t *testing.T) graph.Servicer {
|
||||
acct := tester.NewM365Account(t)
|
||||
|
||||
m365, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
a, err := graph.CreateAdapter(acct.ID(), m365.AzureClientID, m365.AzureClientSecret)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
return graph.NewService(a)
|
||||
}
|
||||
|
||||
@ -727,6 +727,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
itemParents1, err := path.GetDriveFolderPath(itemPath1)
|
||||
require.NoError(suite.T(), err, clues.ToCore(err))
|
||||
|
||||
itemParents1String := itemParents1.String()
|
||||
|
||||
table := []struct {
|
||||
name string
|
||||
populatedModels map[model.StableID]backup.Backup
|
||||
@ -899,7 +901,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
|
||||
ItemInfo: details.ItemInfo{
|
||||
OneDrive: &details.OneDriveInfo{
|
||||
ItemType: details.OneDriveItem,
|
||||
ParentPath: itemParents1,
|
||||
ParentPath: itemParents1String,
|
||||
Size: 42,
|
||||
},
|
||||
},
|
||||
|
||||
@ -7,7 +7,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/common/prefixmatcher"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/backup"
|
||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||
"github.com/alcionai/corso/src/pkg/control"
|
||||
@ -27,7 +26,7 @@ type (
|
||||
lastBackupVersion int,
|
||||
ctrlOpts control.Options,
|
||||
errs *fault.Bus,
|
||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, error)
|
||||
) ([]data.BackupCollection, prefixmatcher.StringSetReader, bool, error)
|
||||
IsBackupRunnable(ctx context.Context, service path.ServiceType, resourceOwner string) (bool, error)
|
||||
|
||||
Wait() *data.CollectionStats
|
||||
@ -37,7 +36,6 @@ type (
|
||||
ConsumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
backupVersion int,
|
||||
acct account.Account,
|
||||
selector selectors.Selector,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
|
||||
@ -553,7 +553,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
||||
mr: mockManifestRestorer{
|
||||
mockRestoreProducer: mockRestoreProducer{
|
||||
collsByID: map[string][]data.RestoreCollection{
|
||||
"id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}},
|
||||
"id": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id_coll"}}},
|
||||
},
|
||||
},
|
||||
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "")},
|
||||
@ -580,8 +580,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
||||
mr: mockManifestRestorer{
|
||||
mockRestoreProducer: mockRestoreProducer{
|
||||
collsByID: map[string][]data.RestoreCollection{
|
||||
"id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}},
|
||||
"incmpl_id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "incmpl_id_coll"}}},
|
||||
"id": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id_coll"}}},
|
||||
"incmpl_id": {data.NoFetchRestoreCollection{Collection: mockColl{id: "incmpl_id_coll"}}},
|
||||
},
|
||||
},
|
||||
mans: []kopia.ManifestEntry{
|
||||
@ -600,7 +600,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
||||
mr: mockManifestRestorer{
|
||||
mockRestoreProducer: mockRestoreProducer{
|
||||
collsByID: map[string][]data.RestoreCollection{
|
||||
"id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}},
|
||||
"id": {data.NoFetchRestoreCollection{Collection: mockColl{id: "id_coll"}}},
|
||||
},
|
||||
},
|
||||
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "bid")},
|
||||
@ -616,8 +616,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
||||
mr: mockManifestRestorer{
|
||||
mockRestoreProducer: mockRestoreProducer{
|
||||
collsByID: map[string][]data.RestoreCollection{
|
||||
"mail": {data.NotFoundRestoreCollection{Collection: mockColl{id: "mail_coll"}}},
|
||||
"contact": {data.NotFoundRestoreCollection{Collection: mockColl{id: "contact_coll"}}},
|
||||
"mail": {data.NoFetchRestoreCollection{Collection: mockColl{id: "mail_coll"}}},
|
||||
"contact": {data.NoFetchRestoreCollection{Collection: mockColl{id: "contact_coll"}}},
|
||||
},
|
||||
},
|
||||
mans: []kopia.ManifestEntry{
|
||||
@ -681,7 +681,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
||||
for _, dc := range dcs {
|
||||
if !assert.IsTypef(
|
||||
t,
|
||||
data.NotFoundRestoreCollection{},
|
||||
data.NoFetchRestoreCollection{},
|
||||
dc,
|
||||
"unexpected type returned [%T]",
|
||||
dc,
|
||||
@ -689,7 +689,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
|
||||
continue
|
||||
}
|
||||
|
||||
tmp := dc.(data.NotFoundRestoreCollection)
|
||||
tmp := dc.(data.NoFetchRestoreCollection)
|
||||
|
||||
if !assert.IsTypef(
|
||||
t,
|
||||
|
||||
@ -36,13 +36,13 @@ type RestoreOperation struct {
|
||||
operation
|
||||
|
||||
BackupID model.StableID `json:"backupID"`
|
||||
Destination control.RestoreDestination `json:"destination"`
|
||||
Results RestoreResults `json:"results"`
|
||||
Selectors selectors.Selector `json:"selectors"`
|
||||
Destination control.RestoreDestination `json:"destination"`
|
||||
Version string `json:"version"`
|
||||
|
||||
account account.Account
|
||||
rc inject.RestoreConsumer
|
||||
acct account.Account
|
||||
rc inject.RestoreConsumer
|
||||
}
|
||||
|
||||
// RestoreResults aggregate the details of the results of the operation.
|
||||
@ -66,11 +66,11 @@ func NewRestoreOperation(
|
||||
) (RestoreOperation, error) {
|
||||
op := RestoreOperation{
|
||||
operation: newOperation(opts, bus, kw, sw),
|
||||
acct: acct,
|
||||
BackupID: backupID,
|
||||
Selectors: sel,
|
||||
Destination: dest,
|
||||
Selectors: sel,
|
||||
Version: "v0",
|
||||
account: acct,
|
||||
rc: rc,
|
||||
}
|
||||
if err := op.validate(); err != nil {
|
||||
@ -116,7 +116,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
||||
restoreID: uuid.NewString(),
|
||||
}
|
||||
start = time.Now()
|
||||
sstore = streamstore.NewStreamer(op.kopia, op.account.ID(), op.Selectors.PathService())
|
||||
sstore = streamstore.NewStreamer(op.kopia, op.acct.ID(), op.Selectors.PathService())
|
||||
)
|
||||
|
||||
// -----
|
||||
@ -135,7 +135,7 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
||||
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"tenant_id", clues.Hide(op.account.ID()),
|
||||
"tenant_id", clues.Hide(op.acct.ID()),
|
||||
"backup_id", op.BackupID,
|
||||
"service", op.Selectors.Service,
|
||||
"destination_container", clues.Hide(op.Destination.ContainerName))
|
||||
@ -256,7 +256,6 @@ func (op *RestoreOperation) do(
|
||||
ctx,
|
||||
op.rc,
|
||||
bup.Version,
|
||||
op.account,
|
||||
op.Selectors,
|
||||
op.Destination,
|
||||
op.Options,
|
||||
@ -314,7 +313,6 @@ func consumeRestoreCollections(
|
||||
ctx context.Context,
|
||||
rc inject.RestoreConsumer,
|
||||
backupVersion int,
|
||||
acct account.Account,
|
||||
sel selectors.Selector,
|
||||
dest control.RestoreDestination,
|
||||
opts control.Options,
|
||||
@ -330,7 +328,6 @@ func consumeRestoreCollections(
|
||||
deets, err := rc.ConsumeRestoreCollections(
|
||||
ctx,
|
||||
backupVersion,
|
||||
acct,
|
||||
sel,
|
||||
dest,
|
||||
opts,
|
||||
|
||||
@ -15,7 +15,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/connector"
|
||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/connector/mock"
|
||||
"github.com/alcionai/corso/src/internal/data"
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
@ -50,7 +49,6 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
kw = &kopia.Wrapper{}
|
||||
sw = &store.Wrapper{}
|
||||
gc = &mock.GraphConnector{}
|
||||
acct = account.Account{}
|
||||
now = time.Now()
|
||||
dest = tester.DefaultTestRestoreDestination("")
|
||||
)
|
||||
@ -70,7 +68,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
NumBytes: 42,
|
||||
},
|
||||
cs: []data.RestoreCollection{
|
||||
data.NotFoundRestoreCollection{
|
||||
data.NoFetchRestoreCollection{
|
||||
Collection: &exchMock.DataCollection{},
|
||||
},
|
||||
},
|
||||
@ -112,7 +110,7 @@ func (suite *RestoreOpSuite) TestRestoreOperation_PersistResults() {
|
||||
kw,
|
||||
sw,
|
||||
gc,
|
||||
acct,
|
||||
account.Account{},
|
||||
"foo",
|
||||
selectors.Selector{DiscreteOwner: "test"},
|
||||
dest,
|
||||
@ -220,7 +218,6 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() {
|
||||
kw = &kopia.Wrapper{}
|
||||
sw = &store.Wrapper{}
|
||||
gc = &mock.GraphConnector{}
|
||||
acct = tester.NewM365Account(suite.T())
|
||||
dest = tester.DefaultTestRestoreDestination("")
|
||||
opts = control.Defaults()
|
||||
)
|
||||
@ -230,18 +227,19 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() {
|
||||
kw *kopia.Wrapper
|
||||
sw *store.Wrapper
|
||||
rc inject.RestoreConsumer
|
||||
acct account.Account
|
||||
targets []string
|
||||
errCheck assert.ErrorAssertionFunc
|
||||
}{
|
||||
{"good", kw, sw, gc, acct, nil, assert.NoError},
|
||||
{"missing kopia", nil, sw, gc, acct, nil, assert.Error},
|
||||
{"missing modelstore", kw, nil, gc, acct, nil, assert.Error},
|
||||
{"missing restore consumer", kw, sw, nil, acct, nil, assert.Error},
|
||||
{"good", kw, sw, gc, nil, assert.NoError},
|
||||
{"missing kopia", nil, sw, gc, nil, assert.Error},
|
||||
{"missing modelstore", kw, nil, gc, nil, assert.Error},
|
||||
{"missing restore consumer", kw, sw, nil, nil, assert.Error},
|
||||
}
|
||||
for _, test := range table {
|
||||
suite.Run(test.name, func() {
|
||||
ctx, flush := tester.NewContext(suite.T())
|
||||
t := suite.T()
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
_, err := NewRestoreOperation(
|
||||
@ -250,12 +248,12 @@ func (suite *RestoreOpIntegrationSuite) TestNewRestoreOperation() {
|
||||
test.kw,
|
||||
test.sw,
|
||||
test.rc,
|
||||
test.acct,
|
||||
tester.NewM365Account(t),
|
||||
"backup-id",
|
||||
selectors.Selector{DiscreteOwner: "test"},
|
||||
dest,
|
||||
evmock.NewBus())
|
||||
test.errCheck(suite.T(), err, clues.ToCore(err))
|
||||
test.errCheck(t, err, clues.ToCore(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -346,18 +344,7 @@ func setupSharePointBackup(
|
||||
evmock.NewBus())
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
// get the count of drives
|
||||
m365, err := acct.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
adpt, err := graph.CreateAdapter(
|
||||
m365.AzureTenantID,
|
||||
m365.AzureClientID,
|
||||
m365.AzureClientSecret)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
service := graph.NewService(adpt)
|
||||
spPgr := api.NewSiteDrivePager(service, owner, []string{"id", "name"})
|
||||
spPgr := gc.AC.Drives().NewSiteDrivePager(owner, []string{"id", "name"})
|
||||
|
||||
drives, err := api.GetAllDrives(ctx, spPgr, true, 3)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
@ -30,13 +30,13 @@ func ToDrivePath(p Path) (*DrivePath, error) {
|
||||
}
|
||||
|
||||
// Returns the path to the folder within the drive (i.e. under `root:`)
|
||||
func GetDriveFolderPath(p Path) (string, error) {
|
||||
func GetDriveFolderPath(p Path) (*Builder, error) {
|
||||
drivePath, err := ToDrivePath(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return Builder{}.Append(drivePath.Folders...).String(), nil
|
||||
return Builder{}.Append(drivePath.Folders...), nil
|
||||
}
|
||||
|
||||
// BuildDriveLocation takes a driveID and a set of unescaped element names,
|
||||
|
||||
@ -450,8 +450,7 @@ func (pb Builder) ToDataLayerPath(
|
||||
tenant,
|
||||
service.String(),
|
||||
user,
|
||||
category.String(),
|
||||
),
|
||||
category.String()),
|
||||
service: service,
|
||||
category: category,
|
||||
hasItem: isItem,
|
||||
|
||||
@ -1,6 +1,9 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
@ -27,6 +30,11 @@ type Client struct {
|
||||
// downloading large items such as drive item content or outlook
|
||||
// mail and event attachments.
|
||||
LargeItem graph.Servicer
|
||||
|
||||
// The Requester provides a client specifically for calling
|
||||
// arbitrary urls instead of constructing queries using the
|
||||
// graph api client.
|
||||
Requester graph.Requester
|
||||
}
|
||||
|
||||
// NewClient produces a new exchange api client. Must be used in
|
||||
@ -42,7 +50,9 @@ func NewClient(creds account.M365Config) (Client, error) {
|
||||
return Client{}, err
|
||||
}
|
||||
|
||||
return Client{creds, s, li}, nil
|
||||
rqr := graph.NewNoTimeoutHTTPWrapper()
|
||||
|
||||
return Client{creds, s, li, rqr}, nil
|
||||
}
|
||||
|
||||
// Service generates a new graph servicer. New servicers are used for paged
|
||||
@ -75,3 +85,20 @@ func newLargeItemService(creds account.M365Config) (*graph.Service, error) {
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
type Getter interface {
|
||||
Get(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
headers map[string]string,
|
||||
) (*http.Response, error)
|
||||
}
|
||||
|
||||
// Get performs an ad-hoc get request using its graph.Requester
|
||||
func (c Client) Get(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
headers map[string]string,
|
||||
) (*http.Response, error) {
|
||||
return c.Requester.Request(ctx, http.MethodGet, url, nil, headers)
|
||||
}
|
||||
|
||||
@ -10,14 +10,12 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
exchMock "github.com/alcionai/corso/src/internal/connector/exchange/mock"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
)
|
||||
|
||||
type ExchangeServiceSuite struct {
|
||||
tester.Suite
|
||||
gs graph.Servicer
|
||||
credentials account.M365Config
|
||||
}
|
||||
|
||||
@ -38,14 +36,6 @@ func (suite *ExchangeServiceSuite) SetupSuite() {
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.credentials = m365
|
||||
|
||||
adpt, err := graph.CreateAdapter(
|
||||
m365.AzureTenantID,
|
||||
m365.AzureClientID,
|
||||
m365.AzureClientSecret)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.gs = graph.NewService(adpt)
|
||||
}
|
||||
|
||||
//nolint:lll
|
||||
|
||||
@ -296,9 +296,8 @@ type contactPager struct {
|
||||
options *users.ItemContactFoldersItemContactsRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewContactPager(
|
||||
func (c Contacts) NewContactPager(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
userID, containerID string,
|
||||
immutableIDs bool,
|
||||
) itemPager {
|
||||
@ -309,7 +308,7 @@ func NewContactPager(
|
||||
Headers: newPreferHeaders(preferPageSize(maxNonDeltaPageSize), preferImmutableIDs(immutableIDs)),
|
||||
}
|
||||
|
||||
builder := gs.
|
||||
builder := c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
@ -317,7 +316,7 @@ func NewContactPager(
|
||||
ByContactFolderId(containerID).
|
||||
Contacts()
|
||||
|
||||
return &contactPager{gs, builder, config}
|
||||
return &contactPager{c.Stable, builder, config}
|
||||
}
|
||||
|
||||
func (p *contactPager) getPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
@ -364,9 +363,8 @@ func getContactDeltaBuilder(
|
||||
return builder
|
||||
}
|
||||
|
||||
func NewContactDeltaPager(
|
||||
func (c Contacts) NewContactDeltaPager(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
userID, containerID, oldDelta string,
|
||||
immutableIDs bool,
|
||||
) itemPager {
|
||||
@ -379,12 +377,12 @@ func NewContactDeltaPager(
|
||||
|
||||
var builder *users.ItemContactFoldersItemContactsDeltaRequestBuilder
|
||||
if oldDelta != "" {
|
||||
builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(oldDelta, gs.Adapter())
|
||||
builder = users.NewItemContactFoldersItemContactsDeltaRequestBuilder(oldDelta, c.Stable.Adapter())
|
||||
} else {
|
||||
builder = getContactDeltaBuilder(ctx, gs, userID, containerID, options)
|
||||
builder = getContactDeltaBuilder(ctx, c.Stable, userID, containerID, options)
|
||||
}
|
||||
|
||||
return &contactDeltaPager{gs, userID, containerID, builder, options}
|
||||
return &contactDeltaPager{c.Stable, userID, containerID, builder, options}
|
||||
}
|
||||
|
||||
func (p *contactDeltaPager) getPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
@ -419,8 +417,8 @@ func (c Contacts) GetAddedAndRemovedItemIDs(
|
||||
"category", selectors.ExchangeContact,
|
||||
"container_id", containerID)
|
||||
|
||||
pager := NewContactPager(ctx, c.Stable, userID, containerID, immutableIDs)
|
||||
deltaPager := NewContactDeltaPager(ctx, c.Stable, userID, containerID, oldDelta, immutableIDs)
|
||||
pager := c.NewContactPager(ctx, userID, containerID, immutableIDs)
|
||||
deltaPager := c.NewContactDeltaPager(ctx, userID, containerID, oldDelta, immutableIDs)
|
||||
|
||||
return getAddedAndRemovedItemIDs(ctx, c.Stable, pager, deltaPager, oldDelta, canMakeDeltaQueries)
|
||||
}
|
||||
|
||||
@ -9,184 +9,41 @@ import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Drives
|
||||
// controller
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func GetUsersDrive(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
user string,
|
||||
) (models.Driveable, error) {
|
||||
d, err := srv.Client().
|
||||
Users().
|
||||
ByUserId(user).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting user's drive")
|
||||
}
|
||||
|
||||
return d, nil
|
||||
func (c Client) Drives() Drives {
|
||||
return Drives{c}
|
||||
}
|
||||
|
||||
func GetSitesDefaultDrive(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
site string,
|
||||
) (models.Driveable, error) {
|
||||
d, err := srv.Client().
|
||||
Sites().
|
||||
BySiteId(site).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting site's drive")
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func GetDriveRoot(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
driveID string,
|
||||
) (models.DriveItemable, error) {
|
||||
root, err := srv.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Root().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting drive root")
|
||||
}
|
||||
|
||||
return root, nil
|
||||
// Drives is an interface-compliant provider of the client.
|
||||
type Drives struct {
|
||||
Client
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Drive Items
|
||||
// Folders
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// generic drive item getter
|
||||
func GetDriveItem(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
driveID, itemID string,
|
||||
) (models.DriveItemable, error) {
|
||||
di, err := srv.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting item")
|
||||
}
|
||||
|
||||
return di, nil
|
||||
}
|
||||
|
||||
func PostDriveItem(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
driveID, itemID string,
|
||||
) (models.UploadSessionable, error) {
|
||||
session := drives.NewItemItemsItemCreateUploadSessionPostRequestBody()
|
||||
|
||||
r, err := srv.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
CreateUploadSession().
|
||||
Post(ctx, session, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "uploading drive item")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func PatchDriveItem(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
driveID, itemID string,
|
||||
item models.DriveItemable,
|
||||
) error {
|
||||
_, err := srv.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Patch(ctx, item, nil)
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "patching drive item")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func PutDriveItemContent(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
driveID, itemID string,
|
||||
content []byte,
|
||||
) error {
|
||||
_, err := srv.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Content().
|
||||
Put(ctx, content, nil)
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "uploading drive item content")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deletes require unique http clients
|
||||
// https://github.com/alcionai/corso/issues/2707
|
||||
func DeleteDriveItem(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
driveID, itemID string,
|
||||
) error {
|
||||
err := gs.Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Delete(ctx, nil)
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "deleting item").With("item_id", itemID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const itemByPathRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s:/%s"
|
||||
|
||||
var ErrFolderNotFound = clues.New("folder not found")
|
||||
|
||||
// GetFolderByName will lookup the specified folder by name within the parentFolderID folder.
|
||||
func GetFolderByName(
|
||||
func (c Drives) GetFolderByName(
|
||||
ctx context.Context,
|
||||
srv graph.Servicer,
|
||||
driveID, parentFolderID, folder string,
|
||||
driveID, parentFolderID, folderID string,
|
||||
) (models.DriveItemable, error) {
|
||||
// The `Children().Get()` API doesn't yet support $filter, so using that to find a folder
|
||||
// will be sub-optimal.
|
||||
// Instead, we leverage OneDrive path-based addressing -
|
||||
// https://learn.microsoft.com/en-us/graph/onedrive-addressing-driveitems#path-based-addressing
|
||||
// - which allows us to lookup an item by its path relative to the parent ID
|
||||
rawURL := fmt.Sprintf(itemByPathRawURLFmt, driveID, parentFolderID, folder)
|
||||
builder := drives.NewItemItemsDriveItemItemRequestBuilder(rawURL, srv.Adapter())
|
||||
rawURL := fmt.Sprintf(itemByPathRawURLFmt, driveID, parentFolderID, folderID)
|
||||
builder := drives.NewItemItemsDriveItemItemRequestBuilder(rawURL, c.Stable.Adapter())
|
||||
|
||||
foundItem, err := builder.Get(ctx, nil)
|
||||
if err != nil {
|
||||
@ -205,16 +62,163 @@ func GetFolderByName(
|
||||
return foundItem, nil
|
||||
}
|
||||
|
||||
func (c Drives) GetRootFolder(
|
||||
ctx context.Context,
|
||||
driveID string,
|
||||
) (models.DriveItemable, error) {
|
||||
root, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Root().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting drive root")
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Items
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// generic drive item getter
|
||||
func (c Drives) GetItem(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.DriveItemable, error) {
|
||||
di, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting item")
|
||||
}
|
||||
|
||||
return di, nil
|
||||
}
|
||||
|
||||
func (c Drives) NewItemContentUpload(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) (models.UploadSessionable, error) {
|
||||
session := drives.NewItemItemsItemCreateUploadSessionPostRequestBody()
|
||||
|
||||
r, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
CreateUploadSession().
|
||||
Post(ctx, session, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "uploading drive item")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
const itemChildrenRawURLFmt = "https://graph.microsoft.com/v1.0/drives/%s/items/%s/children"
|
||||
|
||||
// PostItemInContainer creates a new item in the specified folder
|
||||
func (c Drives) PostItemInContainer(
|
||||
ctx context.Context,
|
||||
driveID, parentFolderID string,
|
||||
newItem models.DriveItemable,
|
||||
) (models.DriveItemable, error) {
|
||||
// Graph SDK doesn't yet provide a POST method for `/children` so we set the `rawUrl` ourselves as recommended
|
||||
// here: https://github.com/microsoftgraph/msgraph-sdk-go/issues/155#issuecomment-1136254310
|
||||
rawURL := fmt.Sprintf(itemChildrenRawURLFmt, driveID, parentFolderID)
|
||||
builder := drives.NewItemItemsRequestBuilder(rawURL, c.Stable.Adapter())
|
||||
|
||||
newItem, err := builder.Post(ctx, newItem, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "creating item in folder")
|
||||
}
|
||||
|
||||
return newItem, nil
|
||||
}
|
||||
|
||||
func (c Drives) PatchItem(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
item models.DriveItemable,
|
||||
) error {
|
||||
_, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Patch(ctx, item, nil)
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "patching drive item")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Drives) PutItemContent(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
content []byte,
|
||||
) error {
|
||||
_, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Content().
|
||||
Put(ctx, content, nil)
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "uploading drive item content")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deletes require unique http clients
|
||||
// https://github.com/alcionai/corso/issues/2707
|
||||
func (c Drives) DeleteItem(
|
||||
ctx context.Context,
|
||||
driveID, itemID string,
|
||||
) error {
|
||||
// deletes require unique http clients
|
||||
// https://github.com/alcionai/corso/issues/2707
|
||||
srv, err := c.Service()
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "creating adapter to delete item permission")
|
||||
}
|
||||
|
||||
err = srv.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
ByDriveItemId(itemID).
|
||||
Delete(ctx, nil)
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "deleting item").With("item_id", itemID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Permissions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func GetItemPermission(
|
||||
func (c Drives) GetItemPermission(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID, itemID string,
|
||||
) (models.PermissionCollectionResponseable, error) {
|
||||
perm, err := service.
|
||||
perm, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
@ -229,15 +233,15 @@ func GetItemPermission(
|
||||
return perm, nil
|
||||
}
|
||||
|
||||
func PostItemPermissionUpdate(
|
||||
func (c Drives) PostItemPermissionUpdate(
|
||||
ctx context.Context,
|
||||
service graph.Servicer,
|
||||
driveID, itemID string,
|
||||
body *drives.ItemItemsItemInvitePostRequestBody,
|
||||
) (drives.ItemItemsItemInviteResponseable, error) {
|
||||
ctx = graph.ConsumeNTokens(ctx, graph.PermissionsLC)
|
||||
|
||||
itm, err := service.Client().
|
||||
itm, err := c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().
|
||||
@ -251,17 +255,18 @@ func PostItemPermissionUpdate(
|
||||
return itm, nil
|
||||
}
|
||||
|
||||
func DeleteDriveItemPermission(
|
||||
func (c Drives) DeleteItemPermission(
|
||||
ctx context.Context,
|
||||
creds account.M365Config,
|
||||
driveID, itemID, permissionID string,
|
||||
) error {
|
||||
a, err := graph.CreateAdapter(creds.AzureTenantID, creds.AzureClientID, creds.AzureClientSecret)
|
||||
// deletes require unique http clients
|
||||
// https://github.com/alcionai/corso/issues/2707
|
||||
srv, err := c.Service()
|
||||
if err != nil {
|
||||
return graph.Wrap(ctx, err, "creating adapter to delete item permission")
|
||||
}
|
||||
|
||||
err = graph.NewService(a).
|
||||
err = srv.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
|
||||
@ -21,18 +21,26 @@ import (
|
||||
// item pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type driveItemPager struct {
|
||||
type DriveItemEnumerator interface {
|
||||
GetPage(context.Context) (DeltaPageLinker, error)
|
||||
SetNext(nextLink string)
|
||||
Reset()
|
||||
ValuesIn(DeltaPageLinker) ([]models.DriveItemable, error)
|
||||
}
|
||||
|
||||
var _ DriveItemEnumerator = &DriveItemPager{}
|
||||
|
||||
type DriveItemPager struct {
|
||||
gs graph.Servicer
|
||||
driveID string
|
||||
builder *drives.ItemItemsItemDeltaRequestBuilder
|
||||
options *drives.ItemItemsItemDeltaRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewItemPager(
|
||||
gs graph.Servicer,
|
||||
func (c Drives) NewItemPager(
|
||||
driveID, link string,
|
||||
selectFields []string,
|
||||
) *driveItemPager {
|
||||
) *DriveItemPager {
|
||||
preferHeaderItems := []string{
|
||||
"deltashowremovedasdeleted",
|
||||
"deltatraversepermissiongaps",
|
||||
@ -48,24 +56,25 @@ func NewItemPager(
|
||||
},
|
||||
}
|
||||
|
||||
res := &driveItemPager{
|
||||
gs: gs,
|
||||
res := &DriveItemPager{
|
||||
gs: c.Stable,
|
||||
driveID: driveID,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().
|
||||
builder: c.Stable.
|
||||
Client().
|
||||
Drives().
|
||||
ByDriveId(driveID).
|
||||
Items().ByDriveItemId(onedrive.RootID).Delta(),
|
||||
}
|
||||
|
||||
if len(link) > 0 {
|
||||
res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, gs.Adapter())
|
||||
res.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, c.Stable.Adapter())
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *driveItemPager) GetPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
func (p *DriveItemPager) GetPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
var (
|
||||
resp DeltaPageLinker
|
||||
err error
|
||||
@ -79,11 +88,11 @@ func (p *driveItemPager) GetPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *driveItemPager) SetNext(link string) {
|
||||
func (p *DriveItemPager) SetNext(link string) {
|
||||
p.builder = drives.NewItemItemsItemDeltaRequestBuilder(link, p.gs.Adapter())
|
||||
}
|
||||
|
||||
func (p *driveItemPager) Reset() {
|
||||
func (p *DriveItemPager) Reset() {
|
||||
p.builder = p.gs.Client().
|
||||
Drives().
|
||||
ByDriveId(p.driveID).
|
||||
@ -92,7 +101,7 @@ func (p *driveItemPager) Reset() {
|
||||
Delta()
|
||||
}
|
||||
|
||||
func (p *driveItemPager) ValuesIn(l DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||
func (p *DriveItemPager) ValuesIn(l DeltaPageLinker) ([]models.DriveItemable, error) {
|
||||
return getValues[models.DriveItemable](l)
|
||||
}
|
||||
|
||||
@ -100,6 +109,8 @@ func (p *driveItemPager) ValuesIn(l DeltaPageLinker) ([]models.DriveItemable, er
|
||||
// user pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var _ DrivePager = &userDrivePager{}
|
||||
|
||||
type userDrivePager struct {
|
||||
userID string
|
||||
gs graph.Servicer
|
||||
@ -107,8 +118,7 @@ type userDrivePager struct {
|
||||
options *users.ItemDrivesRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewUserDrivePager(
|
||||
gs graph.Servicer,
|
||||
func (c Drives) NewUserDrivePager(
|
||||
userID string,
|
||||
fields []string,
|
||||
) *userDrivePager {
|
||||
@ -120,9 +130,13 @@ func NewUserDrivePager(
|
||||
|
||||
res := &userDrivePager{
|
||||
userID: userID,
|
||||
gs: gs,
|
||||
gs: c.Stable,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().Users().ByUserId(userID).Drives(),
|
||||
builder: c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
Drives(),
|
||||
}
|
||||
|
||||
return res
|
||||
@ -140,7 +154,12 @@ func (p *userDrivePager) GetPage(ctx context.Context) (PageLinker, error) {
|
||||
err error
|
||||
)
|
||||
|
||||
d, err := p.gs.Client().Users().ByUserId(p.userID).Drive().Get(ctx, nil)
|
||||
d, err := p.gs.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(p.userID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
@ -180,6 +199,8 @@ func (p *userDrivePager) ValuesIn(l PageLinker) ([]models.Driveable, error) {
|
||||
// site pager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var _ DrivePager = &siteDrivePager{}
|
||||
|
||||
type siteDrivePager struct {
|
||||
gs graph.Servicer
|
||||
builder *sites.ItemDrivesRequestBuilder
|
||||
@ -191,8 +212,7 @@ type siteDrivePager struct {
|
||||
// in a query. NOTE: Fields are case-sensitive. Incorrect field settings will
|
||||
// cause errors during later paging.
|
||||
// Available fields: https://learn.microsoft.com/en-us/graph/api/resources/drive?view=graph-rest-1.0
|
||||
func NewSiteDrivePager(
|
||||
gs graph.Servicer,
|
||||
func (c Drives) NewSiteDrivePager(
|
||||
siteID string,
|
||||
fields []string,
|
||||
) *siteDrivePager {
|
||||
@ -203,9 +223,13 @@ func NewSiteDrivePager(
|
||||
}
|
||||
|
||||
res := &siteDrivePager{
|
||||
gs: gs,
|
||||
gs: c.Stable,
|
||||
options: requestConfig,
|
||||
builder: gs.Client().Sites().BySiteId(siteID).Drives(),
|
||||
builder: c.Stable.
|
||||
Client().
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Drives(),
|
||||
}
|
||||
|
||||
return res
|
||||
@ -313,7 +337,8 @@ func GetAllDrives(
|
||||
func getValues[T any](l PageLinker) ([]T, error) {
|
||||
page, ok := l.(interface{ GetValue() []T })
|
||||
if !ok {
|
||||
return nil, clues.New("page does not comply with GetValue() interface").With("page_item_type", fmt.Sprintf("%T", l))
|
||||
return nil, clues.New("page does not comply with GetValue() interface").
|
||||
With("page_item_type", fmt.Sprintf("%T", l))
|
||||
}
|
||||
|
||||
return page.GetValue(), nil
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/internal/tester"
|
||||
"github.com/alcionai/corso/src/pkg/account"
|
||||
"github.com/alcionai/corso/src/pkg/services/m365/api"
|
||||
@ -16,24 +15,19 @@ import (
|
||||
|
||||
type OneDriveAPISuite struct {
|
||||
tester.Suite
|
||||
creds account.M365Config
|
||||
service graph.Servicer
|
||||
creds account.M365Config
|
||||
ac api.Client
|
||||
}
|
||||
|
||||
func (suite *OneDriveAPISuite) SetupSuite() {
|
||||
t := suite.T()
|
||||
a := tester.NewM365Account(t)
|
||||
m365, err := a.M365Config()
|
||||
creds, err := a.M365Config()
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.creds = m365
|
||||
adpt, err := graph.CreateAdapter(
|
||||
m365.AzureTenantID,
|
||||
m365.AzureClientID,
|
||||
m365.AzureClientSecret)
|
||||
suite.creds = creds
|
||||
suite.ac, err = api.NewClient(creds)
|
||||
require.NoError(t, err, clues.ToCore(err))
|
||||
|
||||
suite.service = graph.NewService(adpt)
|
||||
}
|
||||
|
||||
func TestOneDriveAPIs(t *testing.T) {
|
||||
@ -51,7 +45,8 @@ func (suite *OneDriveAPISuite) TestCreatePagerAndGetPage() {
|
||||
defer flush()
|
||||
|
||||
siteID := tester.M365SiteID(t)
|
||||
pager := api.NewSiteDrivePager(suite.service, siteID, []string{"name"})
|
||||
pager := suite.ac.Drives().NewSiteDrivePager(siteID, []string{"name"})
|
||||
|
||||
a, err := pager.GetPage(ctx)
|
||||
assert.NoError(t, err, clues.ToCore(err))
|
||||
assert.NotNil(t, a)
|
||||
|
||||
@ -446,9 +446,8 @@ type eventPager struct {
|
||||
options *users.ItemCalendarsItemEventsRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewEventPager(
|
||||
func (c Events) NewEventPager(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
userID, containerID string,
|
||||
immutableIDs bool,
|
||||
) (itemPager, error) {
|
||||
@ -456,7 +455,7 @@ func NewEventPager(
|
||||
Headers: newPreferHeaders(preferPageSize(maxNonDeltaPageSize), preferImmutableIDs(immutableIDs)),
|
||||
}
|
||||
|
||||
builder := gs.
|
||||
builder := c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
@ -464,7 +463,7 @@ func NewEventPager(
|
||||
ByCalendarId(containerID).
|
||||
Events()
|
||||
|
||||
return &eventPager{gs, builder, options}, nil
|
||||
return &eventPager{c.Stable, builder, options}, nil
|
||||
}
|
||||
|
||||
func (p *eventPager) getPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
@ -501,9 +500,8 @@ type eventDeltaPager struct {
|
||||
options *users.ItemCalendarsItemEventsDeltaRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewEventDeltaPager(
|
||||
func (c Events) NewEventDeltaPager(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
userID, containerID, oldDelta string,
|
||||
immutableIDs bool,
|
||||
) (itemPager, error) {
|
||||
@ -514,12 +512,12 @@ func NewEventDeltaPager(
|
||||
var builder *users.ItemCalendarsItemEventsDeltaRequestBuilder
|
||||
|
||||
if oldDelta == "" {
|
||||
builder = getEventDeltaBuilder(ctx, gs, userID, containerID, options)
|
||||
builder = getEventDeltaBuilder(ctx, c.Stable, userID, containerID, options)
|
||||
} else {
|
||||
builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(oldDelta, gs.Adapter())
|
||||
builder = users.NewItemCalendarsItemEventsDeltaRequestBuilder(oldDelta, c.Stable.Adapter())
|
||||
}
|
||||
|
||||
return &eventDeltaPager{gs, userID, containerID, builder, options}, nil
|
||||
return &eventDeltaPager{c.Stable, userID, containerID, builder, options}, nil
|
||||
}
|
||||
|
||||
func getEventDeltaBuilder(
|
||||
@ -571,12 +569,12 @@ func (c Events) GetAddedAndRemovedItemIDs(
|
||||
) ([]string, []string, DeltaUpdate, error) {
|
||||
ctx = clues.Add(ctx, "container_id", containerID)
|
||||
|
||||
pager, err := NewEventPager(ctx, c.Stable, userID, containerID, immutableIDs)
|
||||
pager, err := c.NewEventPager(ctx, userID, containerID, immutableIDs)
|
||||
if err != nil {
|
||||
return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating non-delta pager")
|
||||
}
|
||||
|
||||
deltaPager, err := NewEventDeltaPager(ctx, c.Stable, userID, containerID, oldDelta, immutableIDs)
|
||||
deltaPager, err := c.NewEventDeltaPager(ctx, userID, containerID, oldDelta, immutableIDs)
|
||||
if err != nil {
|
||||
return nil, nil, DeltaUpdate{}, graph.Wrap(ctx, err, "creating delta pager")
|
||||
}
|
||||
|
||||
@ -197,12 +197,12 @@ type mailFolderPager struct {
|
||||
builder *users.ItemMailFoldersRequestBuilder
|
||||
}
|
||||
|
||||
func NewMailFolderPager(service graph.Servicer, userID string) mailFolderPager {
|
||||
func (c Mail) NewMailFolderPager(userID string) mailFolderPager {
|
||||
// v1.0 non delta /mailFolders endpoint does not return any of the nested folders
|
||||
rawURL := fmt.Sprintf(mailFoldersBetaURLTemplate, userID)
|
||||
builder := users.NewItemMailFoldersRequestBuilder(rawURL, service.Adapter())
|
||||
builder := users.NewItemMailFoldersRequestBuilder(rawURL, c.Stable.Adapter())
|
||||
|
||||
return mailFolderPager{service, builder}
|
||||
return mailFolderPager{c.Stable, builder}
|
||||
}
|
||||
|
||||
func (p *mailFolderPager) getPage(ctx context.Context) (PageLinker, error) {
|
||||
@ -241,7 +241,7 @@ func (c Mail) EnumerateContainers(
|
||||
errs *fault.Bus,
|
||||
) error {
|
||||
el := errs.Local()
|
||||
pgr := NewMailFolderPager(c.Stable, userID)
|
||||
pgr := c.NewMailFolderPager(userID)
|
||||
|
||||
for {
|
||||
if el.Failure() != nil {
|
||||
@ -544,9 +544,8 @@ type mailPager struct {
|
||||
options *users.ItemMailFoldersItemMessagesRequestBuilderGetRequestConfiguration
|
||||
}
|
||||
|
||||
func NewMailPager(
|
||||
func (c Mail) NewMailPager(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
userID, containerID string,
|
||||
immutableIDs bool,
|
||||
) itemPager {
|
||||
@ -557,7 +556,7 @@ func NewMailPager(
|
||||
Headers: newPreferHeaders(preferPageSize(maxNonDeltaPageSize), preferImmutableIDs(immutableIDs)),
|
||||
}
|
||||
|
||||
builder := gs.
|
||||
builder := c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
@ -565,7 +564,7 @@ func NewMailPager(
|
||||
ByMailFolderId(containerID).
|
||||
Messages()
|
||||
|
||||
return &mailPager{gs, builder, config}
|
||||
return &mailPager{c.Stable, builder, config}
|
||||
}
|
||||
|
||||
func (p *mailPager) getPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
@ -620,9 +619,8 @@ func getMailDeltaBuilder(
|
||||
return builder
|
||||
}
|
||||
|
||||
func NewMailDeltaPager(
|
||||
func (c Mail) NewMailDeltaPager(
|
||||
ctx context.Context,
|
||||
gs graph.Servicer,
|
||||
userID, containerID, oldDelta string,
|
||||
immutableIDs bool,
|
||||
) itemPager {
|
||||
@ -636,12 +634,12 @@ func NewMailDeltaPager(
|
||||
var builder *users.ItemMailFoldersItemMessagesDeltaRequestBuilder
|
||||
|
||||
if len(oldDelta) > 0 {
|
||||
builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, gs.Adapter())
|
||||
builder = users.NewItemMailFoldersItemMessagesDeltaRequestBuilder(oldDelta, c.Stable.Adapter())
|
||||
} else {
|
||||
builder = getMailDeltaBuilder(ctx, gs, userID, containerID, config)
|
||||
builder = getMailDeltaBuilder(ctx, c.Stable, userID, containerID, config)
|
||||
}
|
||||
|
||||
return &mailDeltaPager{gs, userID, containerID, builder, config}
|
||||
return &mailDeltaPager{c.Stable, userID, containerID, builder, config}
|
||||
}
|
||||
|
||||
func (p *mailDeltaPager) getPage(ctx context.Context) (DeltaPageLinker, error) {
|
||||
@ -683,8 +681,8 @@ func (c Mail) GetAddedAndRemovedItemIDs(
|
||||
"category", selectors.ExchangeMail,
|
||||
"container_id", containerID)
|
||||
|
||||
pager := NewMailPager(ctx, c.Stable, userID, containerID, immutableIDs)
|
||||
deltaPager := NewMailDeltaPager(ctx, c.Stable, userID, containerID, oldDelta, immutableIDs)
|
||||
pager := c.NewMailPager(ctx, userID, containerID, immutableIDs)
|
||||
deltaPager := c.NewMailDeltaPager(ctx, userID, containerID, oldDelta, immutableIDs)
|
||||
|
||||
return getAddedAndRemovedItemIDs(ctx, c.Stable, pager, deltaPager, oldDelta, canMakeDeltaQueries)
|
||||
}
|
||||
|
||||
@ -32,23 +32,9 @@ type Sites struct {
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// methods
|
||||
// api calls
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// GetSite returns a minimal Site with the SiteID and the WebURL
|
||||
// TODO: delete in favor of sites.GetByID()
|
||||
func GetSite(ctx context.Context, gs graph.Servicer, siteID string) (models.Siteable, error) {
|
||||
resp, err := gs.Client().
|
||||
Sites().
|
||||
BySiteId(siteID).
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetAll retrieves all sites.
|
||||
func (c Sites) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Siteable, error) {
|
||||
service, err := c.Service()
|
||||
@ -171,6 +157,27 @@ func (c Sites) GetIDAndName(ctx context.Context, siteID string) (string, string,
|
||||
return ptr.Val(s.GetId()), ptr.Val(s.GetWebUrl()), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Info
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (c Sites) GetDefaultDrive(
|
||||
ctx context.Context,
|
||||
site string,
|
||||
) (models.Driveable, error) {
|
||||
d, err := c.Stable.
|
||||
Client().
|
||||
Sites().
|
||||
BySiteId(site).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting site's default drive")
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
187
src/pkg/services/m365/api/user_info.go
Normal file
187
src/pkg/services/m365/api/user_info.go
Normal file
@ -0,0 +1,187 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/common/tform"
|
||||
"github.com/alcionai/corso/src/pkg/path"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// User Info
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type UserInfo struct {
|
||||
ServicesEnabled map[path.ServiceType]struct{}
|
||||
Mailbox MailboxInfo
|
||||
}
|
||||
|
||||
type MailboxInfo struct {
|
||||
Purpose string
|
||||
ArchiveFolder string
|
||||
DateFormat string
|
||||
TimeFormat string
|
||||
DelegateMeetMsgDeliveryOpt string
|
||||
Timezone string
|
||||
AutomaticRepliesSetting AutomaticRepliesSettings
|
||||
Language Language
|
||||
WorkingHours WorkingHours
|
||||
ErrGetMailBoxSetting []error
|
||||
QuotaExceeded bool
|
||||
}
|
||||
|
||||
type AutomaticRepliesSettings struct {
|
||||
ExternalAudience string
|
||||
ExternalReplyMessage string
|
||||
InternalReplyMessage string
|
||||
ScheduledEndDateTime timeInfo
|
||||
ScheduledStartDateTime timeInfo
|
||||
Status string
|
||||
}
|
||||
|
||||
type timeInfo struct {
|
||||
DateTime string
|
||||
Timezone string
|
||||
}
|
||||
|
||||
type Language struct {
|
||||
Locale string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
type WorkingHours struct {
|
||||
DaysOfWeek []string
|
||||
StartTime string
|
||||
EndTime string
|
||||
TimeZone struct {
|
||||
Name string
|
||||
}
|
||||
}
|
||||
|
||||
func newUserInfo() *UserInfo {
|
||||
return &UserInfo{
|
||||
ServicesEnabled: map[path.ServiceType]struct{}{
|
||||
path.ExchangeService: {},
|
||||
path.OneDriveService: {},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceEnabled returns true if the UserInfo has an entry for the
|
||||
// service. If no entry exists, the service is assumed to not be enabled.
|
||||
func (ui *UserInfo) ServiceEnabled(service path.ServiceType) bool {
|
||||
if ui == nil || len(ui.ServicesEnabled) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := ui.ServicesEnabled[service]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// Returns if we can run delta queries on a mailbox. We cannot run
|
||||
// them if the mailbox is full which is indicated by QuotaExceeded.
|
||||
func (ui *UserInfo) CanMakeDeltaQueries() bool {
|
||||
return !ui.Mailbox.QuotaExceeded
|
||||
}
|
||||
|
||||
func parseMailboxSettings(
|
||||
settings models.Userable,
|
||||
mi MailboxInfo,
|
||||
) MailboxInfo {
|
||||
var (
|
||||
additionalData = settings.GetAdditionalData()
|
||||
err error
|
||||
)
|
||||
|
||||
mi.ArchiveFolder, err = str.AnyValueToString("archiveFolder", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Timezone, err = str.AnyValueToString("timeZone", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.DateFormat, err = str.AnyValueToString("dateFormat", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.TimeFormat, err = str.AnyValueToString("timeFormat", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Purpose, err = str.AnyValueToString("userPurpose", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.DelegateMeetMsgDeliveryOpt, err = str.AnyValueToString("delegateMeetingMessageDeliveryOptions", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// decode automatic replies settings
|
||||
replySetting, err := tform.AnyValueToT[map[string]any]("automaticRepliesSetting", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.Status, err = str.AnyValueToString("status", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ExternalAudience, err = str.AnyValueToString("externalAudience", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ExternalReplyMessage, err = str.AnyValueToString("externalReplyMessage", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.InternalReplyMessage, err = str.AnyValueToString("internalReplyMessage", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// decode scheduledStartDateTime
|
||||
startDateTime, err := tform.AnyValueToT[map[string]any]("scheduledStartDateTime", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.DateTime, err = str.AnyValueToString("dateTime", startDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.Timezone, err = str.AnyValueToString("timeZone", startDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
endDateTime, err := tform.AnyValueToT[map[string]any]("scheduledEndDateTime", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.DateTime, err = str.AnyValueToString("dateTime", endDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.Timezone, err = str.AnyValueToString("timeZone", endDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// Language decode
|
||||
language, err := tform.AnyValueToT[map[string]any]("language", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Language.DisplayName, err = str.AnyValueToString("displayName", language)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Language.Locale, err = str.AnyValueToString("locale", language)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// working hours
|
||||
workingHours, err := tform.AnyValueToT[map[string]any]("workingHours", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.StartTime, err = str.AnyValueToString("startTime", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.EndTime, err = str.AnyValueToString("endTime", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
timeZone, err := tform.AnyValueToT[map[string]any]("timeZone", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.TimeZone.Name, err = str.AnyValueToString("name", timeZone)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
days, err := tform.AnyValueToT[[]any]("daysOfWeek", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
for _, day := range days {
|
||||
s, err := str.AnyToString(day)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
mi.WorkingHours.DaysOfWeek = append(mi.WorkingHours.DaysOfWeek, s)
|
||||
}
|
||||
|
||||
return mi
|
||||
}
|
||||
@ -12,8 +12,6 @@ import (
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
"github.com/alcionai/corso/src/internal/common/str"
|
||||
"github.com/alcionai/corso/src/internal/common/tform"
|
||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||
"github.com/alcionai/corso/src/pkg/fault"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
@ -39,85 +37,7 @@ type Users struct {
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// structs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type UserInfo struct {
|
||||
ServicesEnabled map[path.ServiceType]struct{}
|
||||
Mailbox MailboxInfo
|
||||
}
|
||||
|
||||
type MailboxInfo struct {
|
||||
Purpose string
|
||||
ArchiveFolder string
|
||||
DateFormat string
|
||||
TimeFormat string
|
||||
DelegateMeetMsgDeliveryOpt string
|
||||
Timezone string
|
||||
AutomaticRepliesSetting AutomaticRepliesSettings
|
||||
Language Language
|
||||
WorkingHours WorkingHours
|
||||
ErrGetMailBoxSetting []error
|
||||
QuotaExceeded bool
|
||||
}
|
||||
|
||||
type AutomaticRepliesSettings struct {
|
||||
ExternalAudience string
|
||||
ExternalReplyMessage string
|
||||
InternalReplyMessage string
|
||||
ScheduledEndDateTime timeInfo
|
||||
ScheduledStartDateTime timeInfo
|
||||
Status string
|
||||
}
|
||||
|
||||
type timeInfo struct {
|
||||
DateTime string
|
||||
Timezone string
|
||||
}
|
||||
|
||||
type Language struct {
|
||||
Locale string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
type WorkingHours struct {
|
||||
DaysOfWeek []string
|
||||
StartTime string
|
||||
EndTime string
|
||||
TimeZone struct {
|
||||
Name string
|
||||
}
|
||||
}
|
||||
|
||||
func newUserInfo() *UserInfo {
|
||||
return &UserInfo{
|
||||
ServicesEnabled: map[path.ServiceType]struct{}{
|
||||
path.ExchangeService: {},
|
||||
path.OneDriveService: {},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceEnabled returns true if the UserInfo has an entry for the
|
||||
// service. If no entry exists, the service is assumed to not be enabled.
|
||||
func (ui *UserInfo) ServiceEnabled(service path.ServiceType) bool {
|
||||
if ui == nil || len(ui.ServicesEnabled) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := ui.ServicesEnabled[service]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// Returns if we can run delta queries on a mailbox. We cannot run
|
||||
// them if the mailbox is full which is indicated by QuotaExceeded.
|
||||
func (ui *UserInfo) CanMakeDeltaQueries() bool {
|
||||
return !ui.Mailbox.QuotaExceeded
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// methods
|
||||
// User CRUD
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Filter out both guest users, and (for on-prem installations) non-synced users.
|
||||
@ -133,28 +53,26 @@ func (ui *UserInfo) CanMakeDeltaQueries() bool {
|
||||
//nolint:lll
|
||||
var userFilterNoGuests = "onPremisesSyncEnabled eq true OR userType ne 'Guest'"
|
||||
|
||||
func userOptions(fs *string) *users.UsersRequestBuilderGetRequestConfiguration {
|
||||
return &users.UsersRequestBuilderGetRequestConfiguration{
|
||||
Headers: newEventualConsistencyHeaders(),
|
||||
QueryParameters: &users.UsersRequestBuilderGetQueryParameters{
|
||||
Select: idAnd(userPrincipalName, displayName),
|
||||
Filter: fs,
|
||||
Count: ptr.To(true),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetAll retrieves all users.
|
||||
func (c Users) GetAll(ctx context.Context, errs *fault.Bus) ([]models.Userable, error) {
|
||||
func (c Users) GetAll(
|
||||
ctx context.Context,
|
||||
errs *fault.Bus,
|
||||
) ([]models.Userable, error) {
|
||||
service, err := c.Service()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp models.UserCollectionResponseable
|
||||
|
||||
resp, err = service.Client().Users().Get(ctx, userOptions(&userFilterNoGuests))
|
||||
config := &users.UsersRequestBuilderGetRequestConfiguration{
|
||||
Headers: newEventualConsistencyHeaders(),
|
||||
QueryParameters: &users.UsersRequestBuilderGetQueryParameters{
|
||||
Select: idAnd(userPrincipalName, displayName),
|
||||
Filter: &userFilterNoGuests,
|
||||
Count: ptr.To(true),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.Client().Users().Get(ctx, config)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting all users")
|
||||
}
|
||||
@ -241,238 +159,6 @@ func (c Users) GetAllIDsAndNames(ctx context.Context, errs *fault.Bus) (idname.C
|
||||
return idname.NewCache(idToName), nil
|
||||
}
|
||||
|
||||
func (c Users) GetInfo(ctx context.Context, userID string) (*UserInfo, error) {
|
||||
// Assume all services are enabled
|
||||
// then filter down to only services the user has enabled
|
||||
userInfo := newUserInfo()
|
||||
|
||||
requestParameters := users.ItemMailFoldersRequestBuilderGetQueryParameters{
|
||||
Select: idAnd(),
|
||||
Top: ptr.To[int32](1), // if we get any folders, then we have access.
|
||||
}
|
||||
|
||||
options := users.ItemMailFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &requestParameters,
|
||||
}
|
||||
|
||||
mfs, err := c.GetMailFolders(ctx, userID, options)
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Error("getting user's mail folders")
|
||||
|
||||
if graph.IsErrUserNotFound(err) {
|
||||
return nil, clues.Stack(graph.ErrResourceOwnerNotFound, err)
|
||||
}
|
||||
|
||||
if !graph.IsErrExchangeMailFolderNotFound(err) {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
delete(userInfo.ServicesEnabled, path.ExchangeService)
|
||||
}
|
||||
|
||||
if _, err := c.GetDrives(ctx, userID); err != nil {
|
||||
logger.CtxErr(ctx, err).Error("getting user's drives")
|
||||
|
||||
if graph.IsErrUserNotFound(err) {
|
||||
return nil, clues.Stack(graph.ErrResourceOwnerNotFound, err)
|
||||
}
|
||||
|
||||
if !clues.HasLabel(err, graph.LabelsMysiteNotFound) {
|
||||
return nil, clues.Stack(err)
|
||||
}
|
||||
|
||||
delete(userInfo.ServicesEnabled, path.OneDriveService)
|
||||
}
|
||||
|
||||
mbxInfo, err := c.getMailboxSettings(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userInfo.Mailbox = mbxInfo
|
||||
|
||||
// TODO: This tries to determine if the user has hit their mailbox
|
||||
// limit by trying to fetch an item and seeing if we get the quota
|
||||
// exceeded error. Ideally(if available) we should convert this to
|
||||
// pull the user's usage via an api and compare if they have used
|
||||
// up their quota.
|
||||
if mfs != nil {
|
||||
mf := mfs.GetValue()[0] // we will always have one
|
||||
options := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{
|
||||
Top: ptr.To[int32](1), // just one item is enough
|
||||
},
|
||||
}
|
||||
_, err = c.Stable.Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
MailFolders().
|
||||
ByMailFolderId(ptr.Val(mf.GetId())).
|
||||
Messages().
|
||||
Delta().
|
||||
Get(ctx, options)
|
||||
|
||||
if err != nil && !graph.IsErrQuotaExceeded(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userInfo.Mailbox.QuotaExceeded = graph.IsErrQuotaExceeded(err)
|
||||
}
|
||||
|
||||
return userInfo, nil
|
||||
}
|
||||
|
||||
// TODO: remove when exchange api goes into this package
|
||||
func (c Users) GetMailFolders(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
options users.ItemMailFoldersRequestBuilderGetRequestConfiguration,
|
||||
) (models.MailFolderCollectionResponseable, error) {
|
||||
mailFolders, err := c.Stable.Client().Users().ByUserId(userID).MailFolders().Get(ctx, &options)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting MailFolders")
|
||||
}
|
||||
|
||||
return mailFolders, nil
|
||||
}
|
||||
|
||||
// TODO: remove when drive api goes into this package
|
||||
func (c Users) GetDrives(ctx context.Context, userID string) (models.DriveCollectionResponseable, error) {
|
||||
drives, err := c.Stable.Client().Users().ByUserId(userID).Drives().Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting drives")
|
||||
}
|
||||
|
||||
return drives, nil
|
||||
}
|
||||
|
||||
func (c Users) getMailboxSettings(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
) (MailboxInfo, error) {
|
||||
var (
|
||||
rawURL = fmt.Sprintf("https://graph.microsoft.com/v1.0/users/%s/mailboxSettings", userID)
|
||||
adapter = c.Stable.Adapter()
|
||||
mi = MailboxInfo{
|
||||
ErrGetMailBoxSetting: []error{},
|
||||
}
|
||||
)
|
||||
|
||||
settings, err := users.NewUserItemRequestBuilder(rawURL, adapter).Get(ctx, nil)
|
||||
if err != nil && !(graph.IsErrAccessDenied(err) || graph.IsErrExchangeMailFolderNotFound(err)) {
|
||||
logger.CtxErr(ctx, err).Error("getting mailbox settings")
|
||||
return mi, graph.Wrap(ctx, err, "getting additional data")
|
||||
}
|
||||
|
||||
if graph.IsErrAccessDenied(err) {
|
||||
logger.Ctx(ctx).Info("err getting additional data: access denied")
|
||||
|
||||
mi.ErrGetMailBoxSetting = append(mi.ErrGetMailBoxSetting, clues.New("access denied"))
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
if graph.IsErrExchangeMailFolderNotFound(err) {
|
||||
logger.Ctx(ctx).Info("mailfolders not found")
|
||||
|
||||
mi.ErrGetMailBoxSetting = append(mi.ErrGetMailBoxSetting, ErrMailBoxSettingsNotFound)
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
additionalData := settings.GetAdditionalData()
|
||||
|
||||
mi.ArchiveFolder, err = str.AnyValueToString("archiveFolder", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Timezone, err = str.AnyValueToString("timeZone", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.DateFormat, err = str.AnyValueToString("dateFormat", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.TimeFormat, err = str.AnyValueToString("timeFormat", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Purpose, err = str.AnyValueToString("userPurpose", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.DelegateMeetMsgDeliveryOpt, err = str.AnyValueToString("delegateMeetingMessageDeliveryOptions", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// decode automatic replies settings
|
||||
replySetting, err := tform.AnyValueToT[map[string]any]("automaticRepliesSetting", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.Status, err = str.AnyValueToString("status", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ExternalAudience, err = str.AnyValueToString("externalAudience", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ExternalReplyMessage, err = str.AnyValueToString("externalReplyMessage", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.InternalReplyMessage, err = str.AnyValueToString("internalReplyMessage", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// decode scheduledStartDateTime
|
||||
startDateTime, err := tform.AnyValueToT[map[string]any]("scheduledStartDateTime", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.DateTime, err = str.AnyValueToString("dateTime", startDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledStartDateTime.Timezone, err = str.AnyValueToString("timeZone", startDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
endDateTime, err := tform.AnyValueToT[map[string]any]("scheduledEndDateTime", replySetting)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.DateTime, err = str.AnyValueToString("dateTime", endDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.AutomaticRepliesSetting.ScheduledEndDateTime.Timezone, err = str.AnyValueToString("timeZone", endDateTime)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// Language decode
|
||||
language, err := tform.AnyValueToT[map[string]any]("language", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Language.DisplayName, err = str.AnyValueToString("displayName", language)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.Language.Locale, err = str.AnyValueToString("locale", language)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
// working hours
|
||||
workingHours, err := tform.AnyValueToT[map[string]any]("workingHours", additionalData)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.StartTime, err = str.AnyValueToString("startTime", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.EndTime, err = str.AnyValueToString("endTime", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
timeZone, err := tform.AnyValueToT[map[string]any]("timeZone", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
mi.WorkingHours.TimeZone.Name, err = str.AnyValueToString("name", timeZone)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
days, err := tform.AnyValueToT[[]any]("daysOfWeek", workingHours)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
|
||||
for _, day := range days {
|
||||
s, err := str.AnyToString(day)
|
||||
mi.ErrGetMailBoxSetting = appendIfErr(mi.ErrGetMailBoxSetting, err)
|
||||
mi.WorkingHours.DaysOfWeek = append(mi.WorkingHours.DaysOfWeek, s)
|
||||
}
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func appendIfErr(errs []error, err error) []error {
|
||||
if err == nil {
|
||||
return errs
|
||||
@ -481,6 +167,177 @@ func appendIfErr(errs []error, err error) []error {
|
||||
return append(errs, err)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Info
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func (c Users) GetInfo(ctx context.Context, userID string) (*UserInfo, error) {
|
||||
var (
|
||||
// Assume all services are enabled
|
||||
// then filter down to only services the user has enabled
|
||||
userInfo = newUserInfo()
|
||||
|
||||
mailFolderFound = true
|
||||
)
|
||||
|
||||
// check whether the user is able to access their onedrive drive.
|
||||
// if they cannot, we can assume they are ineligible for onedrive backups.
|
||||
if _, err := c.GetDefaultDrive(ctx, userID); err != nil {
|
||||
if !clues.HasLabel(err, graph.LabelsMysiteNotFound) {
|
||||
logger.CtxErr(ctx, err).Error("getting user's drive")
|
||||
return nil, graph.Wrap(ctx, err, "getting user's drive")
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Info("resource owner does not have a drive")
|
||||
delete(userInfo.ServicesEnabled, path.OneDriveService)
|
||||
}
|
||||
|
||||
// check whether the user is able to access their inbox.
|
||||
// if they cannot, we can assume they are ineligible for exchange backups.
|
||||
inbx, err := c.GetMailInbox(ctx, userID)
|
||||
if err != nil {
|
||||
err = graph.Stack(ctx, err)
|
||||
|
||||
if graph.IsErrUserNotFound(err) {
|
||||
logger.CtxErr(ctx, err).Error("user not found")
|
||||
return nil, clues.Stack(graph.ErrResourceOwnerNotFound, err)
|
||||
}
|
||||
|
||||
if !graph.IsErrExchangeMailFolderNotFound(err) {
|
||||
logger.CtxErr(ctx, err).Error("getting user's mail folder")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Ctx(ctx).Info("resource owner does not have a mailbox enabled")
|
||||
delete(userInfo.ServicesEnabled, path.ExchangeService)
|
||||
|
||||
mailFolderFound = false
|
||||
}
|
||||
|
||||
// check whether the user has accessible mailbox settings.
|
||||
// if they do, aggregate them in the MailboxInfo
|
||||
mi := MailboxInfo{
|
||||
ErrGetMailBoxSetting: []error{},
|
||||
}
|
||||
|
||||
if !mailFolderFound {
|
||||
mi.ErrGetMailBoxSetting = append(mi.ErrGetMailBoxSetting, ErrMailBoxSettingsNotFound)
|
||||
userInfo.Mailbox = mi
|
||||
|
||||
return userInfo, nil
|
||||
}
|
||||
|
||||
mboxSettings, err := c.getMailboxSettings(ctx, userID)
|
||||
if err != nil {
|
||||
logger.CtxErr(ctx, err).Info("err getting user's mailbox settings")
|
||||
|
||||
if !graph.IsErrAccessDenied(err) {
|
||||
return nil, graph.Wrap(ctx, err, "getting user's mailbox settings")
|
||||
}
|
||||
|
||||
mi.ErrGetMailBoxSetting = append(mi.ErrGetMailBoxSetting, clues.New("access denied"))
|
||||
} else {
|
||||
mi = parseMailboxSettings(mboxSettings, mi)
|
||||
}
|
||||
|
||||
err = c.getFirstInboxMessage(ctx, userID, ptr.Val(inbx.GetId()))
|
||||
if err != nil {
|
||||
if !graph.IsErrQuotaExceeded(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userInfo.Mailbox.QuotaExceeded = graph.IsErrQuotaExceeded(err)
|
||||
}
|
||||
|
||||
userInfo.Mailbox = mi
|
||||
|
||||
return userInfo, nil
|
||||
}
|
||||
|
||||
func (c Users) getMailboxSettings(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
) (models.Userable, error) {
|
||||
settings, err := users.
|
||||
NewUserItemRequestBuilder(
|
||||
fmt.Sprintf("https://graph.microsoft.com/v1.0/users/%s/mailboxSettings", userID),
|
||||
c.Stable.Adapter(),
|
||||
).
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
func (c Users) GetMailInbox(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
) (models.MailFolderable, error) {
|
||||
inbox, err := c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
MailFolders().
|
||||
ByMailFolderId("inbox").
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting MailFolders")
|
||||
}
|
||||
|
||||
return inbox, nil
|
||||
}
|
||||
|
||||
func (c Users) GetDefaultDrive(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
) (models.Driveable, error) {
|
||||
d, err := c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
Drive().
|
||||
Get(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, graph.Wrap(ctx, err, "getting user's drive")
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// TODO: This tries to determine if the user has hit their mailbox
|
||||
// limit by trying to fetch an item and seeing if we get the quota
|
||||
// exceeded error. Ideally(if available) we should convert this to
|
||||
// pull the user's usage via an api and compare if they have used
|
||||
// up their quota.
|
||||
func (c Users) getFirstInboxMessage(
|
||||
ctx context.Context,
|
||||
userID, inboxID string,
|
||||
) error {
|
||||
config := &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &users.ItemMailFoldersItemMessagesDeltaRequestBuilderGetQueryParameters{
|
||||
Select: idAnd(),
|
||||
},
|
||||
Headers: newPreferHeaders(preferPageSize(1)),
|
||||
}
|
||||
|
||||
_, err := c.Stable.
|
||||
Client().
|
||||
Users().
|
||||
ByUserId(userID).
|
||||
MailFolders().
|
||||
ByMailFolderId(inboxID).
|
||||
Messages().
|
||||
Delta().
|
||||
Get(ctx, config)
|
||||
if err != nil {
|
||||
return graph.Stack(ctx, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/alcionai/clues"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||
"github.com/microsoftgraph/msgraph-sdk-go/users"
|
||||
|
||||
"github.com/alcionai/corso/src/internal/common/idname"
|
||||
"github.com/alcionai/corso/src/internal/common/ptr"
|
||||
@ -79,16 +78,7 @@ func UserHasMailbox(ctx context.Context, acct account.Account, userID string) (b
|
||||
return false, clues.Wrap(err, "getting mailbox").WithClues(ctx)
|
||||
}
|
||||
|
||||
requestParameters := users.ItemMailFoldersRequestBuilderGetQueryParameters{
|
||||
Select: []string{"id"},
|
||||
Top: ptr.To[int32](1), // if we get any folders, then we have access.
|
||||
}
|
||||
|
||||
options := users.ItemMailFoldersRequestBuilderGetRequestConfiguration{
|
||||
QueryParameters: &requestParameters,
|
||||
}
|
||||
|
||||
_, err = uapi.GetMailFolders(ctx, userID, options)
|
||||
_, err = uapi.GetMailInbox(ctx, userID)
|
||||
if err != nil {
|
||||
// we consider this a non-error case, since it
|
||||
// answers the question the caller is asking.
|
||||
@ -100,6 +90,10 @@ func UserHasMailbox(ctx context.Context, acct account.Account, userID string) (b
|
||||
return false, clues.Stack(graph.ErrResourceOwnerNotFound, err)
|
||||
}
|
||||
|
||||
if graph.IsErrExchangeMailFolderNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, clues.Stack(err)
|
||||
}
|
||||
|
||||
@ -114,7 +108,7 @@ func UserHasDrives(ctx context.Context, acct account.Account, userID string) (bo
|
||||
return false, clues.Wrap(err, "getting drives").WithClues(ctx)
|
||||
}
|
||||
|
||||
_, err = uapi.GetDrives(ctx, userID)
|
||||
_, err = uapi.GetDefaultDrive(ctx, userID)
|
||||
if err != nil {
|
||||
// we consider this a non-error case, since it
|
||||
// answers the question the caller is asking.
|
||||
|
||||
14
website/package-lock.json
generated
14
website/package-lock.json
generated
@ -20,7 +20,7 @@
|
||||
"feather-icons": "^4.29.0",
|
||||
"jarallax": "^2.1.3",
|
||||
"mdx-mermaid": "^1.3.2",
|
||||
"mermaid": "^10.2.0",
|
||||
"mermaid": "^10.2.2",
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
@ -9259,9 +9259,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/mermaid": {
|
||||
"version": "10.2.0",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.2.0.tgz",
|
||||
"integrity": "sha512-mYKXlH9ngKdMsJ87VYMdlDZXS+MXDAGKPf3XzDf2vvAPnmRoFm7GFebemOAOWYI1bWSECDyoWTGwesWe6mW1Cw==",
|
||||
"version": "10.2.2",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.2.2.tgz",
|
||||
"integrity": "sha512-ifYKlCcZKYq48hxC1poJXnvk/PbCdgqqbg5B4qsybb8nIItPM1ATKqVEDkyde6BBJxVFhVJr9hoUjipzniQJZg==",
|
||||
"dependencies": {
|
||||
"@braintree/sanitize-url": "^6.0.2",
|
||||
"cytoscape": "^3.23.0",
|
||||
@ -21647,9 +21647,9 @@
|
||||
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
|
||||
},
|
||||
"mermaid": {
|
||||
"version": "10.2.0",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.2.0.tgz",
|
||||
"integrity": "sha512-mYKXlH9ngKdMsJ87VYMdlDZXS+MXDAGKPf3XzDf2vvAPnmRoFm7GFebemOAOWYI1bWSECDyoWTGwesWe6mW1Cw==",
|
||||
"version": "10.2.2",
|
||||
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.2.2.tgz",
|
||||
"integrity": "sha512-ifYKlCcZKYq48hxC1poJXnvk/PbCdgqqbg5B4qsybb8nIItPM1ATKqVEDkyde6BBJxVFhVJr9hoUjipzniQJZg==",
|
||||
"requires": {
|
||||
"@braintree/sanitize-url": "^6.0.2",
|
||||
"cytoscape": "^3.23.0",
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
"feather-icons": "^4.29.0",
|
||||
"jarallax": "^2.1.3",
|
||||
"mdx-mermaid": "^1.3.2",
|
||||
"mermaid": "^10.2.0",
|
||||
"mermaid": "^10.2.2",
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user